...
 
Commits (4)
FROM ubuntu:latest
RUN apt-get update
RUN apt-get install -y build-essential python python-pip python-dev git gfortran autoconf gsl-bin libgsl-dev wget unzip
#need python-tk???
RUN apt-get install -y build-essential python python-pip python-dev git gfortran autoconf gsl-bin libgsl-dev wget unzip vim cmake
RUN pip install numpy scipy cython astropy ipython==5.3.0
RUN mkdir /home/Downloads
......@@ -15,18 +15,11 @@ RUN ./configure --prefix=/usr/local/ && make && make install
WORKDIR ..
#FFTW
RUN wget http://www.fftw.org/fftw-3.3.5.tar.gz && tar xzf fftw-3.3.5.tar.gz
WORKDIR fftw-3.3.5
RUN wget http://www.fftw.org/fftw-3.3.8.tar.gz && tar xzf fftw-3.3.8.tar.gz
WORKDIR fftw-3.3.8
RUN ./configure --enable-threads --enable-openmp --enable-shared --prefix=/usr/local/ && make && make install
WORKDIR ..
#GSL
RUN wget http://nl.mirror.babylon.network/gnu/gsl/gsl-2.3.tar.gz && tar xzf gsl-2.3.tar.gz
WORKDIR gsl-2.3
RUN ./configure --enable-shared --prefix=/usr/local/ && make && make install
WORKDIR ..
ENV LD_LIBRARY_PATH=/usr/local/lib
#HEALPIX
RUN wget http://downloads.sourceforge.net/project/healpix/Healpix_3.31/Healpix_3.31_2016Aug26.tar.gz && tar xzf Healpix_3.31_2016Aug26.tar.gz
WORKDIR Healpix_3.31
......@@ -37,9 +30,8 @@ RUN echo '4\n\
y\n\
0\n'\
> hlpx_config
RUN ./configure < healpy_config && make
RUN ./configure -L < hlpx_config && make
WORKDIR ..
#RUN [ -r /root/.healpix/3_31_Linux/config ] && . /root/.healpix/3_31_Linux/config
ENV HEALPIX_TARGET optimized_gcc
ENV HEALPIX /home/Downloads/Healpix_3.31
......@@ -47,15 +39,7 @@ ENV HEALPIX /home/Downloads/Healpix_3.31
RUN pip install healpy
#(Py)MultiNest
RUN apt-get update && apt-get install -y libblas3 libblas-dev \
liblapack3 liblapack-dev \
libatlas3-base libatlas-dev \
cmake \
build-essential \
git \
gfortran\
python-tk
#RUN apt-get install -y libopenmpi-dev openmpi-bin openmpi-doc
RUN apt-get install -y libblas3 libblas-dev liblapack3 liblapack-dev libatlas-base-dev libatlas3-base
RUN pip install numpy scipy matplotlib progressbar ipython==5.3.0
RUN git clone https://github.com/JohannesBuchner/MultiNest.git
......@@ -73,27 +57,22 @@ RUN apt-get install -y libopenmpi-dev openmpi-bin openmpi-doc
RUN pip install mpi4py
#hdf5
RUN apt-get install -y libhdf5-10 libhdf5-dev libhdf5-openmpi-10 libhdf5-openmpi-dev hdf5-tools
RUN apt-get install -y libhdf5-100 libhdf5-dev libhdf5-openmpi-100 libhdf5-openmpi-dev hdf5-tools
#h5py
RUN wget https://api.github.com/repos/h5py/h5py/tags -O - | grep tarball_url | grep -v rc | head -n 1 | cut -d '"' -f 4 | wget -i - -O h5py.tar.gz
RUN mkdir h5py
RUN tar xzf h5py.tar.gz -C h5py --strip-components=1
WORKDIR h5py
ENV CC=mpicc
ENV HDF5_DIR=/usr/lib/x86_64-linux-gnu/hdf5/openmpi
RUN python setup.py configure --mpi
RUN python setup.py build
RUN python setup.py install
ENV CC=mpicc
ENV HDF5_DIR=/usr/lib/x86_64-linux-gnu/hdf5/openmpi
RUN python setup.py configure --mpi
RUN python setup.py build
RUN python setup.py install
WORKDIR ..
#hampy
RUN pip install jupyter pandas
ARG CACHE_DATE=2017-10-31
#NIFTy
RUN git clone https://gitlab.mpcdf.mpg.de/ift/NIFTy.git -b master
RUN git clone https://gitlab.mpcdf.mpg.de/ift/NIFTy.git
WORKDIR NIFTy
RUN python setup.py install
WORKDIR ..
......@@ -101,12 +80,15 @@ WORKDIR ..
#Hammurabi
RUN git clone https://bitbucket.org/hammurabicode/hamx
WORKDIR hamx
RUN make -f install/Makefile
ENV HAMMURABI=/home/Downloads/hamx/bin/hamx
#RUN mkdir build && cd build && cmake .. && make install
#ENV HAMMURABI=/home/Downloads/hamx/bin/hamx
WORKDIR ..
#for pyhamx
RUN pip install jupyter pandas matplotlib
#IMAGINE
RUN git clone https://gitlab.mpcdf.mpg.de/ift/IMAGINE.git -b master
RUN git clone https://gitlab.mpcdf.mpg.de/ift/IMAGINE.git -b jxw
WORKDIR IMAGINE
RUN python setup.py install
WORKDIR ..
......
## likelihood module
### likelihood
provides enssential functions related to calculating likelihood
### simple likelihood
provides the trivial likelihood calculation
### ensemble likelihood
provides ensemble likelihood calculation
......@@ -41,11 +41,9 @@ class EnsembleLikelihood(Likelihood):
def _process_simple_field(self, observable, measured_data,
data_covariance):
# https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula#Generalization
# B = A^{-1} + U U^dagger
# A = data_covariance
# B^{-1} c = (A_inv -
# A_inv U (I_k + U^dagger A_inv U)^{-1} U^dagger A_inv) c
# Theo's scheme of inversing matrices now modifined by Jiaxin
# use numpy.linalg.solve(A,b) to get inv(A)*b
# thus safely avoid any possible difficulties brought by inversing matrices
data_covariance = data_covariance.copy()
k = observable.shape[0]
n = observable.shape[1]
......@@ -54,82 +52,33 @@ class EnsembleLikelihood(Likelihood):
obs_mean = observable.ensemble_mean().val.get_full_data()
U = obs_val - obs_mean
U *= np.sqrt(n)
# compute quantities for OAS estimator
mu = np.vdot(U, U)/k/n
alpha = (np.einsum(U, [0, 1], U, [2, 1])**2).sum()
alpha /= k**2
numerator = (1 - 2./n)*alpha + (mu*n)**2
denominator = (k + 1 - 2./n) * (alpha - ((mu*n)**2)/n)
# OAS calculation modifined by Jiaxin
# emperical S
S = np.vdot(U,U)/k
# trace of S
TrS = np.trace(S)
# trace of S^2
TrS2 = np.trace(np.dot(S,S))
numerator = (1.-2./n)*TrS2 + TrS**2
denominator = (k+1.-2./n)*(TrS2-(TrS**2)/n)
if denominator == 0:
rho = 1
else:
rho = np.min([1, numerator/denominator])
self.logger.debug("rho: %f = %f / %f" % (rho, numerator, denominator))
# rescale U half/half
V = U * np.sqrt(1-rho) / np.sqrt(k)
self.logger.info(('data_cov', np.mean(data_covariance),
'rho*mu', rho*mu,
'rho', rho,
'mu', mu,
'alpha', alpha))
B = data_covariance + rho*mu
V_B = V/B
# build middle-matrix (kxk)
middle = (np.eye(k) +
np.einsum(V.conjugate(), [0, 1],
V_B, [2, 1]))
middle = np.linalg.inv(middle)
c = measured_data - obs_mean
# total covariance
AC = data_covariance + (1-rho)*S + np.eye(n)*rho*TrS/n
# obs - mean(sim)
dc = measured_data - obs_mean
# If the data was incomplete, i.e. contains np.NANs, set those values
# to zero.
c = np.nan_to_num(c)
# assuming that A == A^dagger, this can be shortend
# a_c = A.inverse_times(c)
# u_a_c = a_c.dot(U, spaces=1)
# u_a_c = u_a_c.conjugate()
# and: double conjugate shouldn't make a difference
# u_a_c = c.conjugate().dot(a_u, spaces=1).conjugate()
# Pure NIFTy is
# u_a_c = c.dot(a_u, spaces=1)
# u_a_c_val = u_a_c.val.get_full_data()
V_B_c = np.einsum(c, [1], V_B, [0, 1])
first_summand_val = c/B
second_summand_val = np.einsum(middle, [0, 1], V_B_c, [1])
second_summand_val = np.einsum(V_B, [0, 1],
second_summand_val, [0])
# # second_summand_val *= -1
# second_summand = first_summand.copy_empty()
# second_summand.val = second_summand_val
result_1 = np.vdot(c, first_summand_val)
result_2 = -np.vdot(c, second_summand_val)
# compute regularizing determinant of the covariance
# det(A + UV^T) = det(A) det(I + V^T A^-1 U)
if self.use_determinant:
log_det = np.sum(np.log(data_covariance +
np.sum((obs_val-obs_mean)**2, axis=0)/k))/n
else:
log_det = 0.
result = -0.5*(result_1 + result_2 + log_det)
self.logger.info("Calculated (%s): -1/2(%g + %g + %g) = %g" %
(self.observable_name,
result_1, result_2, log_det, result))
# result_array[i] = result
# total_result = result_array.mean()
dc = np.nan_to_num(dc)
# calculate likelihood
(sign_AC, logdet_AC) = np.linalg.slogdet(AC*2.*np.pi)
result = -0.5*(dc*np.linalg.solve(AC,dc)) - 0.5*sing_AC*logdet_AC
return result
......@@ -37,7 +37,9 @@ class SimpleLikelihood(Likelihood):
diff = data - obs_mean
if self.data_covariance is not None:
right = diff/self.data_covariance
# modified by Jiaxin
# it is not a good practice to directly inverse matrices
right = np.linalg.solve (self.data_covariance,diff)
else:
right = diff
return -0.5 * np.vdot(diff, right)
......@@ -18,8 +18,8 @@
from imagine.magnetic_fields.magnetic_field import MagneticField
class ConstantMagneticField(MagneticField):
@property
def parameter_list(self):
parameter_list = ['b_x', 'b_y', 'b_z']
......
......@@ -17,14 +17,11 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
import numpy as np
from imagine.magnetic_fields.magnetic_field.magnetic_field_factory \
import MagneticFieldFactory
from imagine.magnetic_fields.magnetic_field.magnetic_field_factory import MagneticFieldFactory
from constant_magnetic_field import ConstantMagneticField
class ConstantMagneticFieldFactory(MagneticFieldFactory):
@property
def magnetic_field_class(self):
return ConstantMagneticField
......
......@@ -17,9 +17,7 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
from mpi4py import MPI
import simplejson as json
import numpy as np
from nifty import Field, FieldArray, RGSpace
......@@ -29,17 +27,16 @@ comm = MPI.COMM_WORLD
size = comm.size
rank = comm.rank
class MagneticField(Field):
def __init__(self, parameters={}, domain=None, val=None, dtype=None,
distribution_strategy=None, copy=False, random_seed=None):
super(MagneticField, self).__init__(
domain=domain,
val=val,
dtype=dtype,
distribution_strategy=distribution_strategy,
copy=copy)
super(MagneticField, self).__init__(domain=domain,
val=val,
dtype=dtype,
distribution_strategy=distribution_strategy,
copy=copy)
assert(len(self.domain) == 3)
assert(isinstance(self.domain[0], FieldArray))
......@@ -47,19 +44,17 @@ class MagneticField(Field):
assert(isinstance(self.domain[2], FieldArray))
self._parameters = {}
for p in self.descriptor_lookup:
for p in self.descriptor_lookup:#{
self._parameters[p] = np.float(parameters[p])
casted_random_seed = distributed_data_object(
global_shape=(self.shape[0],),
dtype=np.int,
distribution_strategy=distribution_strategy)
if random_seed is None:
#}
casted_random_seed = distributed_data_object(global_shape=(self.shape[0],),
dtype=np.int,
distribution_strategy=distribution_strategy)
if random_seed is None:#{
random_seed = np.random.randint(np.uint32(-1)/3,
size=self.shape[0])
random_seed = comm.bcast(random_seed, root=0)
#}
casted_random_seed[:] = random_seed
self.random_seed = casted_random_seed
......@@ -72,9 +67,9 @@ class MagneticField(Field):
return self._parameters
def set_val(self, new_val=None, copy=False):
if new_val is not None:
raise RuntimeError("Setting the field values explicitly is not "
"supported by MagneticField.")
if new_val is not None:#{
raise RuntimeError("Setting the field values explicitly is not supported by MagneticField.")
#}
self._val = self._create_field()
def _to_hdf5(self, hdf5_group):
......
......@@ -17,16 +17,11 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
import numpy as np
from keepers import Loggable
from imagine.carrier_mapper import unity_mapper
from nifty import FieldArray, RGSpace
from magnetic_field import MagneticField
class MagneticFieldFactory(Loggable, object):
def __init__(self, box_dimensions, resolution):
......@@ -34,9 +29,7 @@ class MagneticFieldFactory(Loggable, object):
self.box_dimensions = box_dimensions
self.resolution = resolution
self._parameter_defaults = self._initial_parameter_defaults
self._variable_to_parameter_mappings = \
self._initial_variable_to_parameter_mappings
self._variable_to_parameter_mappings = self._initial_variable_to_parameter_mappings
distances = np.array(self.box_dimensions) / np.array(self.resolution)
self._grid_space = RGSpace(shape=self.resolution,
distances=distances)
......@@ -44,9 +37,9 @@ class MagneticFieldFactory(Loggable, object):
self._ensemble_cache = {}
def _get_ensemble(self, ensemble_size):
if ensemble_size not in self._ensemble_cache:
self._ensemble_cache[ensemble_size] = \
FieldArray(shape=(ensemble_size,))
if ensemble_size not in self._ensemble_cache:#{
self._ensemble_cache[ensemble_size] = FieldArray(shape=(ensemble_size,))
#}
return self._ensemble_cache[ensemble_size]
@property
......@@ -56,8 +49,9 @@ class MagneticFieldFactory(Loggable, object):
@box_dimensions.setter
def box_dimensions(self, box_dimensions):
dim = tuple(np.array(box_dimensions, dtype=np.float))
if len(dim) != 3:
if len(dim) != 3:#{
raise ValueError("Input of box_dimensions must have length three.")
#}
self._box_dimensions = dim
@property
......@@ -67,8 +61,9 @@ class MagneticFieldFactory(Loggable, object):
@resolution.setter
def resolution(self, resolution):
resolution = tuple(np.array(resolution, dtype=np.int))
if len(resolution) != 3:
if len(resolution) != 3:#{
raise ValueError("Input for resolution must have length three.")
#}
self._resolution = resolution
@property
......@@ -104,10 +99,11 @@ class MagneticFieldFactory(Loggable, object):
@property
def variable_defaults(self):
variable_defaults = {}
for parameter in self.parameter_defaults:
for parameter in self.parameter_defaults:#{
low, high = self.variable_to_parameter_mappings[parameter]
default = self.parameter_defaults[parameter]
variable_defaults[parameter] = (default - low)/(high - low)
#}
return variable_defaults
@property
......@@ -122,17 +118,17 @@ class MagneticFieldFactory(Loggable, object):
value: [min, max]
"""
for k, v in new_mapping.items():
if k in self._variable_to_parameter_mappings:
if k in self._variable_to_parameter_mappings:#{
key = str(k)
value = [np.float(v[0]), np.float(v[1])]
self._variable_to_parameter_mappings.update({key: value})
self.logger.debug("Updated variable_to_parameter_mapping %s "
"to %s" % (key, str(value)))
self.logger.debug("Updated variable_to_parameter_mapping %s to %s" % (key, str(value)))
#}
def _map_variables_to_parameters(self, variables):
parameter_dict = {}
for variable_name in variables:
if variable_name in self.variable_to_parameter_mappings:
if variable_name in self.variable_to_parameter_mappings:#{
mapping = self.variable_to_parameter_mappings[variable_name]
mapped_variable = unity_mapper(variables[variable_name],
a=mapping[0],
......@@ -141,8 +137,10 @@ class MagneticFieldFactory(Loggable, object):
# a=mapping[0],
# m=mapping[1],
# b=mapping[2])
else:
#}
else:#{
mapped_variable = np.float(variables[variable_name])
#}
parameter_dict[variable_name] = mapped_variable
return parameter_dict
......@@ -150,16 +148,12 @@ class MagneticFieldFactory(Loggable, object):
mapped_variables = self._map_variables_to_parameters(variables)
work_parameters = self.parameter_defaults.copy()
work_parameters.update(mapped_variables)
domain = (self._get_ensemble(ensemble_size),
self._grid_space,
self._vector)
result_magnetic_field = self.magnetic_field_class(
domain=domain,
parameters=work_parameters,
distribution_strategy='equal',
random_seed=random_seed)
self.logger.debug("Generated magnetic field with work-parameters %s" %
work_parameters)
result_magnetic_field = self.magnetic_field_class(domain=domain,
parameters=work_parameters,
distribution_strategy='equal',
random_seed=random_seed)
self.logger.debug("Generated magnetic field with work-parameters %s" % work_parameters)
return result_magnetic_field
......@@ -18,20 +18,21 @@
from imagine.magnetic_fields.magnetic_field import MagneticField
class WMAP3yrMagneticField(MagneticField):
@property
def descriptor_lookup(self):
lookup = \
{'b0': ['./MagneticField/Regular/WMAP/b0', 'value'],
'psi0': ['./MagneticField/Regular/WMAP/psi0', 'value'],
'psi1': ['./MagneticField/Regular/WMAP/psi1', 'value'],
'chi0': ['./MagneticField/Regular/WMAP/chi0', 'value'],
'random_rms': ['./MagneticField/Random/Global/rms', 'value'],
'random_rho': ['./MagneticField/Random/Global/rho', 'value'],
'random_a0': ['./MagneticField/Random/Global/a0', 'value']
}
lookup = {'b0': ['./MagneticField/Regular/WMAP/b0', 'value'],
'psi0': ['./MagneticField/Regular/WMAP/psi0', 'value'],
'psi1': ['./MagneticField/Regular/WMAP/psi1', 'value'],
'chi0': ['./MagneticField/Regular/WMAP/chi0', 'value'],
'random_rms': ['./MagneticField/Random/Global/ES/rms', 'value'],
'random_rho': ['./MagneticField/Random/Global/ES/rho', 'value'],
'random_a0': ['./MagneticField/Random/Global/ES/a0', 'value'],
'random_k0': ['./MagneticField/Random/Global/ES/k0', 'value'],
'random_r0': ['./MagneticField/Random/Global/ES/r0', 'value'],
'random_z0': ['./MagneticField/Random/Global/ES/z0', 'value']
}
return lookup
def _create_field(self):
......
......@@ -16,13 +16,11 @@
# IMAGINE is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from imagine.magnetic_fields.magnetic_field.magnetic_field_factory \
import MagneticFieldFactory
from imagine.magnetic_fields.magnetic_field.magnetic_field_factory import MagneticFieldFactory
from wmap3yr_magnetic_field import WMAP3yrMagneticField
class WMAP3yrMagneticFieldFactory(MagneticFieldFactory):
@property
def magnetic_field_class(self):
return WMAP3yrMagneticField
......@@ -33,9 +31,13 @@ class WMAP3yrMagneticFieldFactory(MagneticFieldFactory):
'psi0': 27.0,
'psi1': 0.9,
'chi0': 25.0,
# using ES random field generator in hammurabiX
'random_rms': 2.0,
'random_rho': 0.5,
'random_a0': 1.7}
'random_a0': 1.7,
'random_k0': 0.5,
'random_r0': 8.0,
'random_z0': 1.0}
return defaults
@property
......@@ -43,13 +45,15 @@ class WMAP3yrMagneticFieldFactory(MagneticFieldFactory):
return self._generate_variable_to_parameter_mapping_defaults(n=3)
def _generate_variable_to_parameter_mapping_defaults(self, n):
defaults = {
'b0': self._positive_interval(6.0, 1.9, n), # b0 astro-ph/0603450
'psi0': self._positive_interval(27.0, 7.0, n), # psi0 astro-ph/0603450
'psi1': self._positive_interval(0.9, 5.0, n), # psi1 astro-ph/0603450
'chi0': self._positive_interval(25, 8.0, n), # xsi0 astro-ph/0603450
'random_rms': self._positive_interval(2.0, 0.6, n),
'random_rho': self._positive_interval(0.5, 0.166, n),
'random_a0': self._positive_interval(1.7, 0.5, n)
}
defaults = {'b0': self._positive_interval(6.0, 1.9, n), # b0 astro-ph/0603450
'psi0': self._positive_interval(27.0, 7.0, n), # psi0 astro-ph/0603450
'psi1': self._positive_interval(0.9, 5.0, n), # psi1 astro-ph/0603450
'chi0': self._positive_interval(25, 8.0, n), # xsi0 astro-ph/0603450
'random_rms': self._positive_interval(2.0, 0.6, n),
'random_rho': self._positive_interval(0.5, 0.166, n),
'random_a0': self._positive_interval(1.7, 0.5, n),
'random_k0': self._positive_interval(0.5, 0.15, n),
'random_r0': self._positive_interval(8.0, 2.0, n),
'random_z0': self._positive_interval(1.0, 0.3, n)
}
return defaults
......@@ -18,18 +18,15 @@
from nifty import Field, FieldArray
class Observable(Field):
def __init__(self, domain=None, val=None, dtype=None,
distribution_strategy=None, copy=False):
super(Observable, self).__init__(
domain=domain,
val=val,
dtype=dtype,
distribution_strategy=distribution_strategy,
copy=copy)
super(Observable, self).__init__(domain=domain,
val=val,
dtype=dtype,
distribution_strategy=distribution_strategy,
copy=copy)
assert(len(self.domain) == 2)
assert(isinstance(self.domain[0], FieldArray))
......@@ -42,8 +39,8 @@ class Observable(Field):
return self._ensemble_mean
def _to_hdf5(self, hdf5_group):
if hasattr(self, '_ensemble_mean'):
return_dict = {'ensemble_mean': self._ensemble_mean}
if hasattr(self, "_ensemble_mean"):
return_dict = {"ensemble_mean": self._ensemble_mean}
else:
return_dict = {}
return_dict.update(
......@@ -55,7 +52,7 @@ class Observable(Field):
new_field = super(Observable, cls)._from_hdf5(hdf5_group=hdf5_group,
repository=repository)
try:
observable_mean = repository.get('ensemble_mean', hdf5_group)
observable_mean = repository.get("ensemble_mean", hdf5_group)
new_field._observable_mean = observable_mean
except(KeyError):
pass
......
......@@ -20,11 +20,9 @@ import os
import tempfile
import subprocess
import xml.etree.ElementTree as et
import numpy as np
from d2o import distributed_data_object
from nifty import HPSpace
from imagine.observers.observer import Observer
......@@ -32,35 +30,30 @@ from .observable_mixins import ObservableMixin
from .model_mixins import MagneticFieldModel
from imagine.observables import Observable
class Hammurapy(Observer):
def __init__(self, hammurabi_executable, magnetic_field_model, observables,
input_directory='./input', working_directory_base='.',
nside=64):
input_directory='./input', working_directory_base='.',nside=64):
self.hammurabi_executable = os.path.abspath(hammurabi_executable)
if not isinstance(magnetic_field_model, MagneticFieldModel):
raise TypeError("magnetic_field_model must be an instance of the "
"MagneticField class.")
if not isinstance(magnetic_field_model, MagneticFieldModel):#{
raise TypeError("magnetic_field_model must be an instance of the MagneticField class.")
#}
self.magnetic_field_model = magnetic_field_model
if not isinstance(observables, list):
if not isinstance(observables, list):#{
if isinstance(observables, tuple):
observables = list(observables)
else:
observables = [observables]
for obs in observables:
#}
for obs in observables:#{
if not isinstance(obs, ObservableMixin):
raise TypeError("observables must be an instance of the "
"ObservableMixin class.")
raise TypeError("observables must be an instance of the ObservableMixin class.")
#}
self.observables = observables
self.input_directory = os.path.abspath(input_directory)
self.working_directory_base = os.path.abspath(working_directory_base)
self.nside = int(nside)
self._hpSpace = HPSpace(nside=self.nside)
self.last_call_log = ""
@property
......@@ -75,7 +68,7 @@ class Hammurapy(Observer):
# Try multiple times in order to overcome 'Directory not empty' errors
# Hopefully open file handles get closed in the meantime
n = 0
while n < 10:
while n < 10:#{
temp_process = subprocess.Popen(['rm', '-rf', path],
stderr=subprocess.PIPE)
# wait until process is finished
......@@ -84,21 +77,22 @@ class Hammurapy(Observer):
if errlog == '':
self.logger.debug("Successfully removed temporary folder.")
break
#}
else:
self.logger.warning('Could not delete %s' % path)
def _call_hammurabi(self, path):
temp_process = subprocess.Popen(
[self.hammurabi_executable, 'parameters.xml'],
stdout=subprocess.PIPE,
cwd=path)
temp_process = subprocess.Popen([self.hammurabi_executable, 'parameters.xml'],
stdout=subprocess.PIPE,
cwd=path)
self.last_call_log = temp_process.communicate()[0]
def _initialize_observable_dict(self, magnetic_field):
observable_dict = {}
for observable in self.observables:
observable_dict = self._initialize_observable_dict_helper(
observable_dict, magnetic_field, observable)
observable_dict = self._initialize_observable_dict_helper(observable_dict,
magnetic_field,
observable)
return observable_dict
def _initialize_observable_dict_helper(self, observable_dict,
......@@ -108,39 +102,31 @@ class Hammurapy(Observer):
# It is important to initialize the Observables with an explicit
# value. Otherwise the d2o will not instantaneuosly be created
# (c.f. lazy object creation).
observable_dict[component] = Observable(
val=0,
domain=(ensemble_space, self._hpSpace),
distribution_strategy='equal')
observable_dict[component] = Observable(val=0,
domain=(ensemble_space, self._hpSpace),
distribution_strategy='equal')
return observable_dict
def _write_parameter_xml(self, magnetic_field, local_ensemble_index,
working_directory):
# load the default xml
try:
try:#{
default_parameters_xml = os.path.join(self.input_directory,
'default_parameters.xml')
tree = et.parse(default_parameters_xml)
except IOError:
#}
except IOError:#{
import imagine
module_path = os.path.split(
imagine.observers.hammurapy.__file__)[0]
default_parameters_xml = os.path.join(
module_path, 'input/default_parameters.xml')
module_path = os.path.split(imagine.observers.hammurapy.__file__)[0]
default_parameters_xml = os.path.join(module_path, 'input/default_parameters.xml')
tree = et.parse(default_parameters_xml)
#}
root = tree.getroot()
# modify the default_parameters.xml
custom_parameters = [
['./Grid/Integration/shell/auto/nside_min', 'value',
self.nside],
['./Grid/Integration/nside_sim', 'value', self.nside],
# ['./Interface/fe_grid', 'read', '1'],
# ['./Interface/fe_grid', 'filename',
# os.path.join(self.input_directory, 'fe_grid.bin')]
custom_parameters = [['./Grid/Shell/layer/auto/nside_min', 'value', self.nside],
['./Grid/Shell/nside_sim', 'value', self.nside],
]
# access the magnetic-field's random-eed d2o directly, since we
# know that the distribution strategy is the same for the
# randam samples and the magnetic field itself
......@@ -156,27 +142,24 @@ class Hammurapy(Observer):
# set up grid parameters
grid_space = magnetic_field.domain[1]
lx, ly, lz = \
np.array(grid_space.shape)*np.array(grid_space.distances)/2.
lx, ly, lz = np.array(grid_space.shape)*np.array(grid_space.distances)/2.
nx, ny, nz = grid_space.shape
custom_parameters += [['./Grid/Box/x_min', 'value', -lx],
['./Grid/Box/x_max', 'value', lx],
['./Grid/Box/y_min', 'value', -ly],
['./Grid/Box/y_max', 'value', ly],
['./Grid/Box/z_min', 'value', -lz],
['./Grid/Box/z_max', 'value', lz],
['./Grid/Box/nx', 'value', nx],
['./Grid/Box/ny', 'value', ny],
['./Grid/Box/nz', 'value', nz]]
for parameter in custom_parameters:
custom_parameters += [['./Grid/Box_GMF/x_min', 'value', -lx],
['./Grid/Box_GMF/x_max', 'value', lx],
['./Grid/Box_GMF/y_min', 'value', -ly],
['./Grid/Box_GMF/y_max', 'value', ly],
['./Grid/Box_GMF/z_min', 'value', -lz],
['./Grid/Box_GMF/z_max', 'value', lz],
['./Grid/Box_GMF/nx', 'value', nx],
['./Grid/Box_GMF/ny', 'value', ny],
['./Grid/Box_GMF/nz', 'value', nz]]
for parameter in custom_parameters:#{
root.find(parameter[0]).set(parameter[1], str(parameter[2]))
#}
self.magnetic_field_model.update_parameter_xml(root)
for observable in self.observables:
for observable in self.observables:#{
observable.update_parameter_xml(root)
#}
parameters_file_path = os.path.join(working_directory,
'parameters.xml')
tree.write(parameters_file_path)
......@@ -184,54 +167,41 @@ class Hammurapy(Observer):
def _fill_observable_dict(self, observable_dict, working_directory,
local_ensemble_index):
for observable in self.observables:
observable.fill_observable_dict(
observable_dict=observable_dict,
working_directory=working_directory,
local_ensemble_index=local_ensemble_index,
nside=self.nside)
observable.fill_observable_dict(observable_dict=observable_dict,
working_directory=working_directory,
local_ensemble_index=local_ensemble_index,
nside=self.nside)
return observable_dict
def __call__(self, magnetic_field):
if not isinstance(magnetic_field, self.magnetic_field_class):
raise ValueError("Given magnetic field is not a subclass of" +
" %s" % str(self.magnetic_field_class))
observable_dict = self._initialize_observable_dict(
magnetic_field=magnetic_field)
raise ValueError("Given magnetic field is not a subclass of %s" % str(self.magnetic_field_class))
observable_dict = self._initialize_observable_dict(magnetic_field=magnetic_field)
# iterate over ensemble and put result into result_observable
# get the local shape by creating a dummy d2o
ensemble_number = magnetic_field.shape[0]
dummy = distributed_data_object(global_shape=(ensemble_number,),
distribution_strategy='equal',
dtype=np.float)
local_length = dummy.distributor.local_length
for local_ensemble_index in xrange(local_length):
self.logger.debug("Processing local_ensemble_index %i." %
local_ensemble_index)
self.logger.debug("Processing local_ensemble_index %i." % local_ensemble_index)
# create a temporary folder
working_directory = self._make_temp_folder()
self._write_parameter_xml(
magnetic_field=magnetic_field,
local_ensemble_index=local_ensemble_index,
working_directory=working_directory)
self._write_parameter_xml(magnetic_field=magnetic_field,
local_ensemble_index=local_ensemble_index,
working_directory=working_directory)
# call hammurabi
self._call_hammurabi(working_directory)
# if hammurabi failed, _fill_observable_dict will fail
try:
self._fill_observable_dict(observable_dict,
working_directory,
local_ensemble_index)
except:
self.logger.critical("Hammurabi failed! Last call log:\n" +
self.last_call_log)
self.logger.critical("Hammurabi failed! Last call log:\n" + self.last_call_log)
raise
finally:
self._remove_folder(working_directory)
return observable_dict
......@@ -30,8 +30,8 @@
<y value="0"/> <!-- kpc -->
<z value="6"/> <!-- pc -->
</SunPosition>
<!-- field grid -->
<Box>
<!-- GMF field grid -->
<Box_GMF>
<!-- grid vertex number -->
<nx value="800"/>
<ny value="800"/>
......@@ -43,20 +43,52 @@
<y_max value="20.0"/> <!-- kpc -->
<z_min value="-4.0"/> <!-- kpc -->
<z_max value="4.0"/> <!-- kpc -->
</Box>
</Box_GMF>
<!-- FE field grid -->
<Box_FE>
<!-- grid vertex number -->
<nx value="800"/>
<ny value="800"/>
<nz value="160"/>
<!-- box limit, in Galactic-centric frame -->
<x_min value="-20.0"/> <!-- kpc -->
<x_max value="20.0"/> <!-- kpc -->
<y_min value="-20.0"/> <!-- kpc -->
<y_max value="20.0"/> <!-- kpc -->
<z_min value="-4.0"/> <!-- kpc -->
<z_max value="4.0"/> <!-- kpc -->
</Box_FE>
<!-- CRE field grid -->
<Box_CRE>
<nz value="80"/>
<nx value="80"/>
<ny value="80"/>
<nE value="80"/>
<x_min value="0.0"/> <!-- kpc -->
<x_max value="0.0"/> <!-- kpc -->
<y_min value="0.0"/> <!-- kpc -->
<y_max value="0.0"/> <!-- kpc -->
<z_min value="-4.0"/> <!-- kpc -->
<z_max value="4.0"/> <!-- kpc -->
<!-- E_max = E_min*exp(nE*E_fact) -->
<E_min value="0.1"/> <!-- GeV -->
<E_max value="100.0"/> <!-- GeV -->
</Box_CRE>
<!-- LOS integration setting -->
<Integration>
<shell type="auto">
<Shell>
<layer type="auto">
<auto>
<shell_num value="1"/> <!-- total shell number -->
<nside_min value="32"/> <!-- inner most shell resolution -->
</auto>
<!-- set shell resolution from inside out -->
<!-- set shell cutting point from inside out -->
<manual> <!-- total shell number calculated automatically -->
<cut value="0.5"/> <!-- cut at half of total shell radius -->
<nside value="32"/> <!-- inner most shell Nside -->
<nside value="16"/>
</manual>
</shell>
</layer>
<nside_sim value="32"/> <!-- output map resolution -->
<!-- maximum radius in earth/Galactic centric frame -->
<ec_r_max value="30.0"/> <!-- kpc -->
......@@ -64,16 +96,16 @@
<!-- maximum height in galactic centric frame -->
<gc_z_max value="10.0"/> <!-- kpc -->
<!-- radial resolution -->
<ec_r_res value="0.5"/> <!-- kpc -->
<ec_r_res value="0.05"/> <!-- kpc -->
<!-- galactic latitude lower limit -->
<lat_lim value="0."/> <!-- [0,90] -->
</Integration>
</Shell>
</Grid>
<!-- magnetic fields -->
<MagneticField>
<!-- regular fields -->
<Regular type="Verify">
<Regular cue="1" type="Test">
<!-- WMAP LSA -->
<WMAP>
<b0 value="1.2"/> <!-- microG -->
......@@ -123,46 +155,54 @@
<comp_p value="3"/> <!-- cutoff power -->
</Jaffe>
<!-- verification -->
<Verify>
<Test>
<b0 value="2.0"/> <!-- microG -->
<l0 value="70"/> <!-- deg -->
<r value="0.3"/>
</Verify>
</Test>
</Regular>
<!-- turbulent fields -->
<!-- random/turbulent fields -->
<!-- seed should be non-negative int -->
<!-- seed=0, seed according to thread id and time -->
<Random cue="0" type="Local" seed="0">
<!-- global anisotropy -->
<Global>
<rms value="0.8"/> <!-- RMS at k0 -->
<k0 value="0.5"/> <!-- cutoff, 1/(Lambda in kpc) -->
<a0 value="1.7"/> <!-- 5/3, Kolmogorov -->
<rho value="0.5"/> <!-- [0,1] -->
<!-- energy density rescaling -->
<r0 value="8.0"/> <!-- in kpc -->
<z0 value="1.0"/> <!-- in kpc -->
<!-- global generators -->
<Global type="ES">
<!-- Ensslin-Steininger method -->
<ES>
<rms value="0.8"/> <!-- RMS -->
<k0 value="0.5"/> <!-- cutoff, 1/(Lambda in kpc) -->
<a0 value="1.7"/> <!-- 5/3, Kolmogorov -->
<rho value="0.5"/> <!-- [0,1] -->
<!-- energy density rescaling -->
<r0 value="8.0"/> <!-- in kpc -->
<z0 value="1.0"/> <!-- in kpc -->
</ES>
<!-- Jaffe method -->
<Jaffe>
<!-- to be implemented -->
</Jaffe>
</Global>
<!-- local turbulent -->
<Local>
<pa0 value="1"/> <!-- Alfven power norm at k0 -->
<pf0 value="1"/> <!-- fast power norm at k0 -->
<ps0 value="1"/> <!-- slow power norm at k0 -->
<aa0 value="1.7"/> <!-- Kolmogorov -->
<af0 value="1.5"/>
<as0 value="1.7"/>
<k0 value="0.5"/> <!-- cutoff, 1/(Lambda in kpc) -->
<beta value="0.1"/> <!-- plasma beta -->
<ma value="0.5"/> <!-- Alfven Mach number -->
<!-- local generators -->
<Local type="MHD">
<!-- compressive MHD approximation -->
<MHD>
<pa0 value="1"/> <!-- Alfven power norm at k0 -->
<pf0 value="1"/> <!-- fast power norm at k0 -->
<ps0 value="1"/> <!-- slow power norm at k0 -->
<aa0 value="1.7"/> <!-- Kolmogorov -->
<af0 value="1.5"/>
<as0 value="1.7"/>
<k0 value="0.5"/> <!-- cutoff, 1/(Lambda in kpc) -->
<beta value="0.1"/> <!-- plasma beta -->
<ma value="0.5"/> <!-- Alfven Mach number -->
</MHD>
</Local>
<!-- Jaffe turbulent -->
<Jaffe>
</Jaffe>
</Random>
</MagneticField>
<!-- free electron field -->
<FreeElectron>
<Regular type="YMW16">
<Regular cue="1" type="Test">
<!-- YMW16 -->
<YMW16>
<Warp>
......@@ -185,16 +225,31 @@
<!-- using HH14 SpiralArms -->
<SpiralArm>
<B2s value="4000"/>
<Ele_arm_0 value="0.135000"/>
<Ele_arm_1 value="0.129000"/>
<Ele_arm_2 value="0.103000"/>
<Ele_arm_3 value="0.116000"/>
<Ele_arm_4 value="0.005700"/>
<Wid_arm_0 value="300"/>
<Wid_arm_1 value="500"/>
<Wid_arm_2 value="300"/>
<Wid_arm_3 value="500"/>
<Wid_arm_4 value="300"/>
<Ele_arm_0 value="0.135000"/> <!-- electron density norm, cm^-3 -->
<Ele_arm_1 value="0.129000"/> <!-- electron density norm, cm^-3 -->
<Ele_arm_2 value="0.103000"/> <!-- electron density norm, cm^-3 -->
<Ele_arm_3 value="0.116000"/> <!-- electron density norm, cm^-3 -->
<Ele_arm_4 value="0.005700"/> <!-- electron density norm, cm^-3 -->
<Wid_arm_0 value="300"/> <!-- typical arm width, pc -->
<Wid_arm_1 value="500"/> <!-- typical arm width, pc -->
<Wid_arm_2 value="300"/> <!-- typical arm width, pc -->
<Wid_arm_3 value="500"/> <!-- typical arm width, pc -->
<Wid_arm_4 value="300"/> <!-- typical arm width, pc -->
<Rref_arm_0 value="3.35"/> <!-- arm reference radius, kpc -->
<Rref_arm_1 value="3.707"/> <!-- arm reference radius, kpc -->
<Rref_arm_2 value="3.56"/> <!-- arm reference radius, kpc -->
<Rref_arm_3 value="3.670"/> <!-- arm reference radius, kpc -->
<Rref_arm_4 value="8.21"/> <!-- arm reference radius, kpc -->
<Phiref_arm_0 value="44.4"/> <!-- arm reference azimuthal angle, deg -->
<Phiref_arm_1 value="120.0"/> <!-- arm reference azimuthal angle, deg -->
<Phiref_arm_2 value="218.6"/> <!-- arm reference azimuthal angle, deg -->
<Phiref_arm_3 value="330.3"/> <!-- arm reference azimuthal angle, deg -->
<Phiref_arm_4 value="55.1"/> <!-- arm reference azimuthal angle, deg -->
<pitch_arm_0 value="11.43"/> <!-- arm pitch angle, deg -->
<pitch_arm_1 value="9.84"/> <!-- arm pitch angle, deg -->
<pitch_arm_2 value="10.38"/> <!-- arm pitch angle, deg -->
<pitch_arm_3 value="10.54"/> <!-- arm pitch angle, deg -->
<pitch_arm_4 value="2.77"/> <!-- arm pitch angle, deg -->
<Aa value="11680"/>
<Ka value="5.015"/>
<ncn value="2.4"/>
......@@ -237,29 +292,35 @@
</LoopI>
</YMW16>
<!-- verification -->
<Verify>
<Test>
<n0 value="0.01"/> <!-- pccm -->
<r0 value="3.0"/> <!-- kpc -->
</Verify>
</Test>
</Regular>
<!-- turbulent free electron -->
<!-- random/turbulent free electron -->
<Random cue="0" type="Global" seed="0">
<Global>
<rms value="1.0"/>
<k0 value="2.0"/> <!-- cutoff 1/Lambda in kpc -->
<a0 value="-1.7"/>
<!-- field strength rescaling -->
<r0 value="8.0"/> <!-- in kpc -->
<z0 value="1.0"/> <!-- in kpc -->
<!-- global generator -->
<Global type="DFT">
<!-- default global generator -->
<DFT>
<rms value="1.0"/>
<k0 value="2.0"/> <!-- cutoff 1/Lambda in kpc -->
<a0 value="-1.7"/>
<!-- field strength rescaling -->
<r0 value="8.0"/> <!-- in kpc -->
<z0 value="1.0"/> <!-- in kpc -->
</DFT>
</Global>
<!-- local generator -->
<Local>
<!-- to be implemented -->
</Local>
</Random>
</FreeElectron>
<!-- analytic/numeric CRE -->
<CRE type="Analytic">
<!-- flux norm comes with unit [GeV m^2 s Sr]^-1 -->
<CRE type="Test">
<Analytic>
<!-- CRE spectral index: -alpha+beta*r+theta*z, Galactic cylindric frame -->
<alpha value="3.0"/>
......@@ -272,35 +333,17 @@
<j0 value="0.0217"/> <!-- local CRE flux norm factor @ E0 -->
</Analytic>
<Numeric type="2D">
<!-- 2D Galactic-centric cylindric frame -->
<nr value="80"/> <!-- silent if type="3D" -->
<nz value="80"/>
<nx value="80"/> <!-- silent if type="2D" -->
<ny value="80"/> <!-- silent if type="2D" -->
<!-- grid setting -->
<r_max value="20.0"/> <!-- kpc -->
<x_min value="0.0"/> <!-- kpc -->
<x_max value="0.0"/> <!-- kpc -->
<y_min value="0.0"/> <!-- kpc -->
<y_max value="0.0"/> <!-- kpc -->
<z_min value="-4.0"/> <!-- kpc -->
<z_max value="4.0"/> <!-- kpc -->
<!-- E_max = E_min*exp(nE*E_fact) -->
<E_min value="0.1"/> <!-- GeV -->
<E_max value="100.0"/> <!-- GeV -->
<E_fact value="0.1"/> <!-- dimensionless -->
<Numeric>
<!-- info of numeric CRE in root->Grid->Box_CRE -->
</Numeric>
<!-- verification -->
<Verify>
<Test>
<alpha value="3.0"/>
<r0 value="3.0"/> <!-- cutoff radius, kpc -->
<!-- by default, we choose AMS02 20.6GeV -->
<E0 value="20.6"/> <!-- CRE kinetic energy reference, GeV -->
<j0 value="0.0217"/> <!-- local CRE flux norm factor @ E0 -->
</Verify>
</Test>
</CRE>
</root>
......@@ -19,13 +19,14 @@
from .magnetic_field_model import MagneticFieldModel
from imagine.magnetic_fields.wmap3yr_magnetic_field import WMAP3yrMagneticField
class WMAP3yrMixin(MagneticFieldModel):
def update_parameter_xml(self, root):
custom_parameters = [
['./MagneticField/Regular', 'type', 'WMAP'],
['./MagneticField/Random', 'cue', '1'],
['./MagneticField/Random', 'type', 'Global']]
custom_parameters = [['./MagneticField/Regular', 'type', 'WMAP'],
['./MagneticField/Regular','cue','1']
['./MagneticField/Random', 'cue', '1'],
['./MagneticField/Random', 'type', 'Global'],
['./MagneticField/Random/Global','type','ES']]
for parameter in custom_parameters:
root.find(parameter[0]).set(parameter[1], str(parameter[2]))
......
......@@ -17,11 +17,10 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
import xml.etree.ElementTree as et
from .observable_mixin import ObservableMixin
class DMMixin(ObservableMixin):
@property
def obs_name(self):
return 'dm'
......@@ -31,7 +30,6 @@ class DMMixin(ObservableMixin):
return ['dm']
def update_parameter_xml(self, root):
element = et.Element('DM', {'cue': '1',
'filename': self.obs_name+'.fits'})
element = et.Element('DM', {'cue': '1', 'filename': self.obs_name+'.fits'})
output = root.find('Obsout')
output.append(element)
......@@ -17,11 +17,10 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
import xml.etree.ElementTree as et
from .observable_mixin import ObservableMixin
class FDMixin(ObservableMixin):
@property
def obs_name(self):
return 'fd'
......@@ -31,7 +30,6 @@ class FDMixin(ObservableMixin):
return ['fd']
def update_parameter_xml(self, root):
element = et.Element('Faraday', {'cue': '1',
'filename': self.obs_name+'.fits'})
element = et.Element('Faraday', {'cue': '1', 'filename': self.obs_name+'.fits'})
output = root.find('Obsout')
output.append(element)
......@@ -18,13 +18,12 @@
import abc
import os
import healpy
from keepers import Loggable
class ObservableMixin(Loggable, object):
@abc.abstractproperty
def obs_name(self):
raise NotImplementedError
......@@ -42,10 +41,10 @@ class ObservableMixin(Loggable, object):
map_list = self._read_fits_file(path=working_directory,
name=self.obs_name + '.fits',
nside=nside)
for i, map_component in enumerate(map_list):
for i, map_component in enumerate(map_list):#{
temp_obs = observable_dict[self.component_names[i]]
temp_obs.val.data[local_ensemble_index] = map_component
#}
def _read_fits_file(self, path, name, nside):
map_path = os.path.join(path, name)
......@@ -53,8 +52,7 @@ class ObservableMixin(Loggable, object):
i = 0
while True:
try:
loaded_map = healpy.read_map(map_path, verbose=False,
field=i)
loaded_map = healpy.read_map(map_path, verbose=False, field=i)
# loaded_map = healpy.ud_grade(loaded_map, nside_out=nside)
result_list += [loaded_map]
i += 1
......
......@@ -17,11 +17,10 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
import xml.etree.ElementTree as et
from .observable_mixin import ObservableMixin
class SyncMixin(ObservableMixin):
def __init__(self, frequency='23'):
self.frequency = str(frequency)
......
......@@ -18,11 +18,8 @@
import os
import numpy as np
from scipy import optimize
from mpi4py import MPI
from keepers import Loggable
from imagine.likelihoods import Likelihood
......@@ -39,7 +36,6 @@ rank = comm.rank
WORK_TAG = 0
DIE_TAG = 1
class Pipeline(Loggable, object):
"""
The pipeline
......@@ -62,19 +58,19 @@ class Pipeline(Loggable, object):
self.prior = prior
self.active_variables = active_variables
self.ensemble_size = ensemble_size
# rescaling total likelihood in _core_likelihood
self.likelihood_rescaler = 1.
# setting defaults for pymultinest
self.pymultinest_parameters = {'verbose': True,
'n_iter_before_update': 1,
'n_live_points': 100}
self.pymultinest_parameters = {"verbose": True,
"n_iter_before_update": 1,
"n_live_points": 100}
self.pymultinest_parameters.update(pymultinest_parameters)
self.sample_callback = sample_callback
# seed for generating random field
self.fixed_random_seed = None
self.likelihood_threshold = 0.
# checking likelihood threshold in _multinest_likelihood
self.check_threshold = False
self.likelihood_threshold = 0.
@property
def observer(self):
......@@ -82,8 +78,9 @@ class Pipeline(Loggable, object):
@observer.setter
def observer(self, observer):
if not isinstance(observer, Observer):
if not isinstance(observer, Observer):#{
raise TypeError("observer must be an instance of Observer-class.")
#}
self.logger.debug("Setting observer.")
self._observer = observer
......@@ -95,14 +92,15 @@ class Pipeline(Loggable, object):
def likelihood(self, likelihood):
self.logger.debug("Setting likelihood.")
self._likelihood = ()
if not (isinstance(likelihood, list) or
isinstance(likelihood, tuple)):
if not (isinstance(likelihood, list) or isinstance(likelihood, tuple)):#{
likelihood = [likelihood]
for l in likelihood:
if not isinstance(l, Likelihood):
raise TypeError(
"likelihood must be an instance of Likelihood-class.")
#}
for l in likelihood:#{
if not isinstance(l, Likelihood):#{
raise TypeError("likelihood must be an instance of Likelihood-class.")
#}
self._likelihood += (l,)
#}
@property
def prior(self):
......@@ -111,9 +109,9 @@ class Pipeline(Loggable, object):
@prior.setter
def prior(self, prior):
self.logger.debug("Setting prior.")
if not isinstance(prior, Prior):
raise TypeError(
"prior must be an instance of Prior-class.")
if not isinstance(prior, Prior):#{
raise TypeError("prior must be an instance of Prior-class.")
#}
self._prior = prior
@property
......@@ -122,10 +120,9 @@ class Pipeline(Loggable, object):
@magnetic_field_factory.setter
def magnetic_field_factory(self, magnetic_field_factory):
if not isinstance(magnetic_field_factory, MagneticFieldFactory):
raise TypeError(
"magnetic_field_factory must be an instance of the "
"MagneticFieldFactory-class.")
if not isinstance(magnetic_field_factory, MagneticFieldFactory):#{
raise TypeError("magnetic_field_factory must be an instance of the MagneticFieldFactory-class.")
#}
self.logger.debug("Setting magnetic_field_factory.")
self._magnetic_field_factory = magnetic_field_factory
......@@ -135,14 +132,14 @@ class Pipeline(Loggable, object):
@active_variables.setter
def active_variables(self, active_variables):
if not isinstance(active_variables, list):
raise TypeError(
"active_variables must be a list.")
self.logger.debug("Resetting active_variables to %s" %
str(active_variables))
if not isinstance(active_variables, list):#{
raise TypeError("active_variables must be a list.")
#}
self.logger.debug("Resetting active_variables to %s" % str(active_variables))
new_active = []
for av in active_variables:
for av in active_variables:#{
new_active += [str(av)]
#}
self._active_variables = new_active
@property
......@@ -151,10 +148,10 @@ class Pipeline(Loggable, object):
@ensemble_size.setter
def ensemble_size(self, ensemble_size):
ensemble_size = int(ensemble_size)
if ensemble_size <= 0:
if ensemble_size <= 0:#{
raise ValueError("ensemble_size must be positive!")
#}
self.logger.debug("Setting ensemble size to %i." % ensemble_size)
self._ensemble_size = ensemble_size
......@@ -162,74 +159,71 @@ class Pipeline(Loggable, object):
cube_content = np.empty(ndim)
for i in xrange(ndim):
cube_content[i] = cube[i]
# heuristic for minimizers:
# if a parameter value from outside of the cube is requested, return
# the worst possible likelihood value
if np.any(cube_content > 1.) or np.any(cube_content < 0.):
self.logger.info('Cube %s requested. Returned most negative '
'possible number.' % cube_content)
if np.any(cube_content > 1.) or np.any(cube_content < 0.):#{
self.logger.info("Cube %s requested. Returned most negative possible number." % cube_content)
return np.nan_to_num(-np.inf)
if rank != 0:
raise RuntimeError("_multinest_likelihood must only be called on "
"rank==0.")
#}
if rank != 0:#{
raise RuntimeError("_multinest_likelihood must only be called on rank==0.")
#}
error_count = 0
while error_count < 5:
for i in xrange(1, size):
for i in xrange(1, size):#{
comm.send(cube_content, dest=i, tag=WORK_TAG)
#}
self.logger.debug("Sent multinest-cube to nodes with rank > 0.")
likelihood = self._core_likelihood(cube_content)
if likelihood < self.likelihood_threshold or \
not self.check_threshold:
if likelihood < self.likelihood_threshold or not self.check_threshold:
break
else:
self.logger.error("Positive log-likelihood value encountered!"
"Redoing calculation.")
self.logger.error("Positive log-likelihood value encountered! Redoing calculation.")
#}
return likelihood
# on slave nodes
def _listen_for_likelihood_calls(self):
status = MPI.Status()
while True:
while True:#{
cube = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if status == DIE_TAG:
if status == DIE_TAG:#{
self.logger.debug("Received DIE_TAG from rank 0.")
break
#}
self.logger.debug("Received cube from rank 0.")
self._core_likelihood(cube)
#}
def _core_likelihood(self, cube):
self.logger.debug("Beginning Likelihood-calculation for %s." %
str(cube))
self.logger.debug("Beginning Likelihood-calculation for %s." % str(cube))
# translate cube to variables
variables = {}
for i, av in enumerate(self.active_variables):
for i, av in enumerate(self.active_variables):#{
variables[av] = cube[i]
#}
# create magnetic field
self.logger.debug("Creating magnetic field.")
b_field = self.magnetic_field_factory.generate(
variables=variables,
ensemble_size=self.ensemble_size,
random_seed=self.fixed_random_seed)
# create observables
self.logger.debug("Creating observables.")
observables = self.observer(b_field)
# add up individual log-likelihood terms
self.logger.debug("Evaluating likelihood(s).")
likelihood = ()
total_likelihood = 0
for like in self.likelihood:
for like in self.likelihood:#{
current_likelihood = like(observables)
likelihood += (current_likelihood, )
total_likelihood += current_likelihood
#}
self.logger.info("Evaluated likelihood: %f for %s" % (total_likelihood, str(cube)))
self.logger.info("Evaluated likelihood: %f for %s" %
(total_likelihood, str(cube)))
if self.sample_callback is not None:
if self.sample_callback is not None:#{
self.logger.debug("Creating sample-object.")
sample = Sample(variables=variables,
magnetic_field=b_field,
......@@ -237,34 +231,36 @@ class Pipeline(Loggable, object):
likelihood=likelihood,
total_likelihood=total_likelihood)
self.sample_callback(sample)
#}
return total_likelihood * self.likelihood_rescaler
</