...

Commits (4)
 ## likelihood module ### likelihood provides enssential functions related to calculating likelihood ### simple likelihood provides the trivial likelihood calculation ### ensemble likelihood provides ensemble likelihood calculation
 ... ... @@ -41,11 +41,9 @@ class EnsembleLikelihood(Likelihood): def _process_simple_field(self, observable, measured_data, data_covariance): # https://en.wikipedia.org/wiki/Sherman%E2%80%93Morrison_formula#Generalization # B = A^{-1} + U U^dagger # A = data_covariance # B^{-1} c = (A_inv - # A_inv U (I_k + U^dagger A_inv U)^{-1} U^dagger A_inv) c # Theo's scheme of inversing matrices now modifined by Jiaxin # use numpy.linalg.solve(A,b) to get inv(A)*b # thus safely avoid any possible difficulties brought by inversing matrices data_covariance = data_covariance.copy() k = observable.shape[0] n = observable.shape[1] ... ... @@ -54,82 +52,33 @@ class EnsembleLikelihood(Likelihood): obs_mean = observable.ensemble_mean().val.get_full_data() U = obs_val - obs_mean U *= np.sqrt(n) # compute quantities for OAS estimator mu = np.vdot(U, U)/k/n alpha = (np.einsum(U, [0, 1], U, [2, 1])**2).sum() alpha /= k**2 numerator = (1 - 2./n)*alpha + (mu*n)**2 denominator = (k + 1 - 2./n) * (alpha - ((mu*n)**2)/n) # OAS calculation modifined by Jiaxin # emperical S S = np.vdot(U,U)/k # trace of S TrS = np.trace(S) # trace of S^2 TrS2 = np.trace(np.dot(S,S)) numerator = (1.-2./n)*TrS2 + TrS**2 denominator = (k+1.-2./n)*(TrS2-(TrS**2)/n) if denominator == 0: rho = 1 else: rho = np.min([1, numerator/denominator]) self.logger.debug("rho: %f = %f / %f" % (rho, numerator, denominator)) # rescale U half/half V = U * np.sqrt(1-rho) / np.sqrt(k) self.logger.info(('data_cov', np.mean(data_covariance), 'rho*mu', rho*mu, 'rho', rho, 'mu', mu, 'alpha', alpha)) B = data_covariance + rho*mu V_B = V/B # build middle-matrix (kxk) middle = (np.eye(k) + np.einsum(V.conjugate(), [0, 1], V_B, [2, 1])) middle = np.linalg.inv(middle) c = measured_data - obs_mean # total covariance AC = data_covariance + (1-rho)*S + np.eye(n)*rho*TrS/n # obs - mean(sim) dc = measured_data - obs_mean # If the data was incomplete, i.e. contains np.NANs, set those values # to zero. c = np.nan_to_num(c) # assuming that A == A^dagger, this can be shortend # a_c = A.inverse_times(c) # u_a_c = a_c.dot(U, spaces=1) # u_a_c = u_a_c.conjugate() # and: double conjugate shouldn't make a difference # u_a_c = c.conjugate().dot(a_u, spaces=1).conjugate() # Pure NIFTy is # u_a_c = c.dot(a_u, spaces=1) # u_a_c_val = u_a_c.val.get_full_data() V_B_c = np.einsum(c, [1], V_B, [0, 1]) first_summand_val = c/B second_summand_val = np.einsum(middle, [0, 1], V_B_c, [1]) second_summand_val = np.einsum(V_B, [0, 1], second_summand_val, [0]) # # second_summand_val *= -1 # second_summand = first_summand.copy_empty() # second_summand.val = second_summand_val result_1 = np.vdot(c, first_summand_val) result_2 = -np.vdot(c, second_summand_val) # compute regularizing determinant of the covariance # det(A + UV^T) = det(A) det(I + V^T A^-1 U) if self.use_determinant: log_det = np.sum(np.log(data_covariance + np.sum((obs_val-obs_mean)**2, axis=0)/k))/n else: log_det = 0. result = -0.5*(result_1 + result_2 + log_det) self.logger.info("Calculated (%s): -1/2(%g + %g + %g) = %g" % (self.observable_name, result_1, result_2, log_det, result)) # result_array[i] = result # total_result = result_array.mean() dc = np.nan_to_num(dc) # calculate likelihood (sign_AC, logdet_AC) = np.linalg.slogdet(AC*2.*np.pi) result = -0.5*(dc*np.linalg.solve(AC,dc)) - 0.5*sing_AC*logdet_AC return result
 ... ... @@ -37,7 +37,9 @@ class SimpleLikelihood(Likelihood): diff = data - obs_mean if self.data_covariance is not None: right = diff/self.data_covariance # modified by Jiaxin # it is not a good practice to directly inverse matrices right = np.linalg.solve (self.data_covariance,diff) else: right = diff return -0.5 * np.vdot(diff, right)
 ... ... @@ -18,8 +18,8 @@ from imagine.magnetic_fields.magnetic_field import MagneticField class ConstantMagneticField(MagneticField): @property def parameter_list(self): parameter_list = ['b_x', 'b_y', 'b_z'] ... ...
 ... ... @@ -17,14 +17,11 @@ # and financially supported by the Studienstiftung des deutschen Volkes. import numpy as np from imagine.magnetic_fields.magnetic_field.magnetic_field_factory \ import MagneticFieldFactory from imagine.magnetic_fields.magnetic_field.magnetic_field_factory import MagneticFieldFactory from constant_magnetic_field import ConstantMagneticField class ConstantMagneticFieldFactory(MagneticFieldFactory): @property def magnetic_field_class(self): return ConstantMagneticField ... ...
 ... ... @@ -17,9 +17,7 @@ # and financially supported by the Studienstiftung des deutschen Volkes. from mpi4py import MPI import simplejson as json import numpy as np from nifty import Field, FieldArray, RGSpace ... ... @@ -29,17 +27,16 @@ comm = MPI.COMM_WORLD size = comm.size rank = comm.rank class MagneticField(Field): def __init__(self, parameters={}, domain=None, val=None, dtype=None, distribution_strategy=None, copy=False, random_seed=None): super(MagneticField, self).__init__( domain=domain, val=val, dtype=dtype, distribution_strategy=distribution_strategy, copy=copy) super(MagneticField, self).__init__(domain=domain, val=val, dtype=dtype, distribution_strategy=distribution_strategy, copy=copy) assert(len(self.domain) == 3) assert(isinstance(self.domain[0], FieldArray)) ... ... @@ -47,19 +44,17 @@ class MagneticField(Field): assert(isinstance(self.domain[2], FieldArray)) self._parameters = {} for p in self.descriptor_lookup: for p in self.descriptor_lookup:#{ self._parameters[p] = np.float(parameters[p]) casted_random_seed = distributed_data_object( global_shape=(self.shape[0],), dtype=np.int, distribution_strategy=distribution_strategy) if random_seed is None: #} casted_random_seed = distributed_data_object(global_shape=(self.shape[0],), dtype=np.int, distribution_strategy=distribution_strategy) if random_seed is None:#{ random_seed = np.random.randint(np.uint32(-1)/3, size=self.shape[0]) random_seed = comm.bcast(random_seed, root=0) #} casted_random_seed[:] = random_seed self.random_seed = casted_random_seed ... ... @@ -72,9 +67,9 @@ class MagneticField(Field): return self._parameters def set_val(self, new_val=None, copy=False): if new_val is not None: raise RuntimeError("Setting the field values explicitly is not " "supported by MagneticField.") if new_val is not None:#{ raise RuntimeError("Setting the field values explicitly is not supported by MagneticField.") #} self._val = self._create_field() def _to_hdf5(self, hdf5_group): ... ...
 ... ... @@ -17,16 +17,11 @@ # and financially supported by the Studienstiftung des deutschen Volkes. import numpy as np from keepers import Loggable from imagine.carrier_mapper import unity_mapper from nifty import FieldArray, RGSpace from magnetic_field import MagneticField class MagneticFieldFactory(Loggable, object): def __init__(self, box_dimensions, resolution): ... ... @@ -34,9 +29,7 @@ class MagneticFieldFactory(Loggable, object): self.box_dimensions = box_dimensions self.resolution = resolution self._parameter_defaults = self._initial_parameter_defaults self._variable_to_parameter_mappings = \ self._initial_variable_to_parameter_mappings self._variable_to_parameter_mappings = self._initial_variable_to_parameter_mappings distances = np.array(self.box_dimensions) / np.array(self.resolution) self._grid_space = RGSpace(shape=self.resolution, distances=distances) ... ... @@ -44,9 +37,9 @@ class MagneticFieldFactory(Loggable, object): self._ensemble_cache = {} def _get_ensemble(self, ensemble_size): if ensemble_size not in self._ensemble_cache: self._ensemble_cache[ensemble_size] = \ FieldArray(shape=(ensemble_size,)) if ensemble_size not in self._ensemble_cache:#{ self._ensemble_cache[ensemble_size] = FieldArray(shape=(ensemble_size,)) #} return self._ensemble_cache[ensemble_size] @property ... ... @@ -56,8 +49,9 @@ class MagneticFieldFactory(Loggable, object): @box_dimensions.setter def box_dimensions(self, box_dimensions): dim = tuple(np.array(box_dimensions, dtype=np.float)) if len(dim) != 3: if len(dim) != 3:#{ raise ValueError("Input of box_dimensions must have length three.") #} self._box_dimensions = dim @property ... ... @@ -67,8 +61,9 @@ class MagneticFieldFactory(Loggable, object): @resolution.setter def resolution(self, resolution): resolution = tuple(np.array(resolution, dtype=np.int)) if len(resolution) != 3: if len(resolution) != 3:#{ raise ValueError("Input for resolution must have length three.") #} self._resolution = resolution @property ... ... @@ -104,10 +99,11 @@ class MagneticFieldFactory(Loggable, object): @property def variable_defaults(self): variable_defaults = {} for parameter in self.parameter_defaults: for parameter in self.parameter_defaults:#{ low, high = self.variable_to_parameter_mappings[parameter] default = self.parameter_defaults[parameter] variable_defaults[parameter] = (default - low)/(high - low) #} return variable_defaults @property ... ... @@ -122,17 +118,17 @@ class MagneticFieldFactory(Loggable, object): value: [min, max] """ for k, v in new_mapping.items(): if k in self._variable_to_parameter_mappings: if k in self._variable_to_parameter_mappings:#{ key = str(k) value = [np.float(v[0]), np.float(v[1])] self._variable_to_parameter_mappings.update({key: value}) self.logger.debug("Updated variable_to_parameter_mapping %s " "to %s" % (key, str(value))) self.logger.debug("Updated variable_to_parameter_mapping %s to %s" % (key, str(value))) #} def _map_variables_to_parameters(self, variables): parameter_dict = {} for variable_name in variables: if variable_name in self.variable_to_parameter_mappings: if variable_name in self.variable_to_parameter_mappings:#{ mapping = self.variable_to_parameter_mappings[variable_name] mapped_variable = unity_mapper(variables[variable_name], a=mapping[0], ... ... @@ -141,8 +137,10 @@ class MagneticFieldFactory(Loggable, object): # a=mapping[0], # m=mapping[1], # b=mapping[2]) else: #} else:#{ mapped_variable = np.float(variables[variable_name]) #} parameter_dict[variable_name] = mapped_variable return parameter_dict ... ... @@ -150,16 +148,12 @@ class MagneticFieldFactory(Loggable, object): mapped_variables = self._map_variables_to_parameters(variables) work_parameters = self.parameter_defaults.copy() work_parameters.update(mapped_variables) domain = (self._get_ensemble(ensemble_size), self._grid_space, self._vector) result_magnetic_field = self.magnetic_field_class( domain=domain, parameters=work_parameters, distribution_strategy='equal', random_seed=random_seed) self.logger.debug("Generated magnetic field with work-parameters %s" % work_parameters) result_magnetic_field = self.magnetic_field_class(domain=domain, parameters=work_parameters, distribution_strategy='equal', random_seed=random_seed) self.logger.debug("Generated magnetic field with work-parameters %s" % work_parameters) return result_magnetic_field
 ... ... @@ -18,20 +18,21 @@ from imagine.magnetic_fields.magnetic_field import MagneticField class WMAP3yrMagneticField(MagneticField): @property def descriptor_lookup(self): lookup = \ {'b0': ['./MagneticField/Regular/WMAP/b0', 'value'], 'psi0': ['./MagneticField/Regular/WMAP/psi0', 'value'], 'psi1': ['./MagneticField/Regular/WMAP/psi1', 'value'], 'chi0': ['./MagneticField/Regular/WMAP/chi0', 'value'], 'random_rms': ['./MagneticField/Random/Global/rms', 'value'], 'random_rho': ['./MagneticField/Random/Global/rho', 'value'], 'random_a0': ['./MagneticField/Random/Global/a0', 'value'] } lookup = {'b0': ['./MagneticField/Regular/WMAP/b0', 'value'], 'psi0': ['./MagneticField/Regular/WMAP/psi0', 'value'], 'psi1': ['./MagneticField/Regular/WMAP/psi1', 'value'], 'chi0': ['./MagneticField/Regular/WMAP/chi0', 'value'], 'random_rms': ['./MagneticField/Random/Global/ES/rms', 'value'], 'random_rho': ['./MagneticField/Random/Global/ES/rho', 'value'], 'random_a0': ['./MagneticField/Random/Global/ES/a0', 'value'], 'random_k0': ['./MagneticField/Random/Global/ES/k0', 'value'], 'random_r0': ['./MagneticField/Random/Global/ES/r0', 'value'], 'random_z0': ['./MagneticField/Random/Global/ES/z0', 'value'] } return lookup def _create_field(self): ... ...
 ... ... @@ -16,13 +16,11 @@ # IMAGINE is being developed at the Max-Planck-Institut fuer Astrophysik # and financially supported by the Studienstiftung des deutschen Volkes. from imagine.magnetic_fields.magnetic_field.magnetic_field_factory \ import MagneticFieldFactory from imagine.magnetic_fields.magnetic_field.magnetic_field_factory import MagneticFieldFactory from wmap3yr_magnetic_field import WMAP3yrMagneticField class WMAP3yrMagneticFieldFactory(MagneticFieldFactory): @property def magnetic_field_class(self): return WMAP3yrMagneticField ... ... @@ -33,9 +31,13 @@ class WMAP3yrMagneticFieldFactory(MagneticFieldFactory): 'psi0': 27.0, 'psi1': 0.9, 'chi0': 25.0, # using ES random field generator in hammurabiX 'random_rms': 2.0, 'random_rho': 0.5, 'random_a0': 1.7} 'random_a0': 1.7, 'random_k0': 0.5, 'random_r0': 8.0, 'random_z0': 1.0} return defaults @property ... ... @@ -43,13 +45,15 @@ class WMAP3yrMagneticFieldFactory(MagneticFieldFactory): return self._generate_variable_to_parameter_mapping_defaults(n=3) def _generate_variable_to_parameter_mapping_defaults(self, n): defaults = { 'b0': self._positive_interval(6.0, 1.9, n), # b0 astro-ph/0603450 'psi0': self._positive_interval(27.0, 7.0, n), # psi0 astro-ph/0603450 'psi1': self._positive_interval(0.9, 5.0, n), # psi1 astro-ph/0603450 'chi0': self._positive_interval(25, 8.0, n), # xsi0 astro-ph/0603450 'random_rms': self._positive_interval(2.0, 0.6, n), 'random_rho': self._positive_interval(0.5, 0.166, n), 'random_a0': self._positive_interval(1.7, 0.5, n) } defaults = {'b0': self._positive_interval(6.0, 1.9, n), # b0 astro-ph/0603450 'psi0': self._positive_interval(27.0, 7.0, n), # psi0 astro-ph/0603450 'psi1': self._positive_interval(0.9, 5.0, n), # psi1 astro-ph/0603450 'chi0': self._positive_interval(25, 8.0, n), # xsi0 astro-ph/0603450 'random_rms': self._positive_interval(2.0, 0.6, n), 'random_rho': self._positive_interval(0.5, 0.166, n), 'random_a0': self._positive_interval(1.7, 0.5, n), 'random_k0': self._positive_interval(0.5, 0.15, n), 'random_r0': self._positive_interval(8.0, 2.0, n), 'random_z0': self._positive_interval(1.0, 0.3, n) } return defaults
 ... ... @@ -18,18 +18,15 @@ from nifty import Field, FieldArray class Observable(Field): def __init__(self, domain=None, val=None, dtype=None, distribution_strategy=None, copy=False): super(Observable, self).__init__( domain=domain, val=val, dtype=dtype, distribution_strategy=distribution_strategy, copy=copy) super(Observable, self).__init__(domain=domain, val=val, dtype=dtype, distribution_strategy=distribution_strategy, copy=copy) assert(len(self.domain) == 2) assert(isinstance(self.domain[0], FieldArray)) ... ... @@ -42,8 +39,8 @@ class Observable(Field): return self._ensemble_mean def _to_hdf5(self, hdf5_group): if hasattr(self, '_ensemble_mean'): return_dict = {'ensemble_mean': self._ensemble_mean} if hasattr(self, "_ensemble_mean"): return_dict = {"ensemble_mean": self._ensemble_mean} else: return_dict = {} return_dict.update( ... ... @@ -55,7 +52,7 @@ class Observable(Field): new_field = super(Observable, cls)._from_hdf5(hdf5_group=hdf5_group, repository=repository) try: observable_mean = repository.get('ensemble_mean', hdf5_group) observable_mean = repository.get("ensemble_mean", hdf5_group) new_field._observable_mean = observable_mean except(KeyError): pass ... ...
 ... ... @@ -20,11 +20,9 @@ import os import tempfile import subprocess import xml.etree.ElementTree as et import numpy as np from d2o import distributed_data_object from nifty import HPSpace from imagine.observers.observer import Observer ... ... @@ -32,35 +30,30 @@ from .observable_mixins import ObservableMixin from .model_mixins import MagneticFieldModel from imagine.observables import Observable class Hammurapy(Observer): def __init__(self, hammurabi_executable, magnetic_field_model, observables, input_directory='./input', working_directory_base='.', nside=64): input_directory='./input', working_directory_base='.',nside=64): self.hammurabi_executable = os.path.abspath(hammurabi_executable) if not isinstance(magnetic_field_model, MagneticFieldModel): raise TypeError("magnetic_field_model must be an instance of the " "MagneticField class.") if not isinstance(magnetic_field_model, MagneticFieldModel):#{ raise TypeError("magnetic_field_model must be an instance of the MagneticField class.") #} self.magnetic_field_model = magnetic_field_model if not isinstance(observables, list): if not isinstance(observables, list):#{ if isinstance(observables, tuple): observables = list(observables) else: observables = [observables] for obs in observables: #} for obs in observables:#{ if not isinstance(obs, ObservableMixin): raise TypeError("observables must be an instance of the " "ObservableMixin class.") raise TypeError("observables must be an instance of the ObservableMixin class.") #} self.observables = observables self.input_directory = os.path.abspath(input_directory) self.working_directory_base = os.path.abspath(working_directory_base) self.nside = int(nside) self._hpSpace = HPSpace(nside=self.nside) self.last_call_log = "" @property ... ... @@ -75,7 +68,7 @@ class Hammurapy(Observer): # Try multiple times in order to overcome 'Directory not empty' errors # Hopefully open file handles get closed in the meantime n = 0 while n < 10: while n < 10:#{ temp_process = subprocess.Popen(['rm', '-rf', path], stderr=subprocess.PIPE) # wait until process is finished ... ... @@ -84,21 +77,22 @@ class Hammurapy(Observer): if errlog == '': self.logger.debug("Successfully removed temporary folder.") break #} else: self.logger.warning('Could not delete %s' % path) def _call_hammurabi(self, path): temp_process = subprocess.Popen( [self.hammurabi_executable, 'parameters.xml'], stdout=subprocess.PIPE, cwd=path) temp_process = subprocess.Popen([self.hammurabi_executable, 'parameters.xml'], stdout=subprocess.PIPE, cwd=path) self.last_call_log = temp_process.communicate()[0] def _initialize_observable_dict(self, magnetic_field): observable_dict = {} for observable in self.observables: observable_dict = self._initialize_observable_dict_helper( observable_dict, magnetic_field, observable) observable_dict = self._initialize_observable_dict_helper(observable_dict, magnetic_field, observable) return observable_dict def _initialize_observable_dict_helper(self, observable_dict, ... ... @@ -108,39 +102,31 @@ class Hammurapy(Observer): # It is important to initialize the Observables with an explicit # value. Otherwise the d2o will not instantaneuosly be created # (c.f. lazy object creation). observable_dict[component] = Observable( val=0, domain=(ensemble_space, self._hpSpace), distribution_strategy='equal') observable_dict[component] = Observable(val=0, domain=(ensemble_space, self._hpSpace), distribution_strategy='equal') return observable_dict def _write_parameter_xml(self, magnetic_field, local_ensemble_index, working_directory): # load the default xml try: try:#{ default_parameters_xml = os.path.join(self.input_directory, 'default_parameters.xml') tree = et.parse(default_parameters_xml) except IOError: #} except IOError:#{ import imagine module_path = os.path.split( imagine.observers.hammurapy.__file__)[0] default_parameters_xml = os.path.join( module_path, 'input/default_parameters.xml') module_path = os.path.split(imagine.observers.hammurapy.__file__)[0] default_parameters_xml = os.path.join(module_path, 'input/default_parameters.xml') tree = et.parse(default_parameters_xml) #} root = tree.getroot() # modify the default_parameters.xml custom_parameters = [ ['./Grid/Integration/shell/auto/nside_min', 'value', self.nside], ['./Grid/Integration/nside_sim', 'value', self.nside], # ['./Interface/fe_grid', 'read', '1'], # ['./Interface/fe_grid', 'filename', # os.path.join(self.input_directory, 'fe_grid.bin')] custom_parameters = [['./Grid/Shell/layer/auto/nside_min', 'value', self.nside], ['./Grid/Shell/nside_sim', 'value', self.nside], ] # access the magnetic-field's random-eed d2o directly, since we # know that the distribution strategy is the same for the # randam samples and the magnetic field itself ... ... @@ -156,27 +142,24 @@ class Hammurapy(Observer): # set up grid parameters grid_space = magnetic_field.domain[1] lx, ly, lz = \ np.array(grid_space.shape)*np.array(grid_space.distances)/2. lx, ly, lz = np.array(grid_space.shape)*np.array(grid_space.distances)/2. nx, ny, nz = grid_space.shape custom_parameters += [['./Grid/Box/x_min', 'value', -lx], ['./Grid/Box/x_max', 'value', lx], ['./Grid/Box/y_min', 'value', -ly], ['./Grid/Box/y_max', 'value', ly], ['./Grid/Box/z_min', 'value', -lz], ['./Grid/Box/z_max', 'value', lz], ['./Grid/Box/nx', 'value', nx], ['./Grid/Box/ny', 'value', ny], ['./Grid/Box/nz', 'value', nz]] for parameter in custom_parameters: custom_parameters += [['./Grid/Box_GMF/x_min', 'value', -lx], ['./Grid/Box_GMF/x_max', 'value', lx], ['./Grid/Box_GMF/y_min', 'value', -ly], ['./Grid/Box_GMF/y_max', 'value', ly], ['./Grid/Box_GMF/z_min', 'value', -lz], ['./Grid/Box_GMF/z_max', 'value', lz], ['./Grid/Box_GMF/nx', 'value', nx], ['./Grid/Box_GMF/ny', 'value', ny], ['./Grid/Box_GMF/nz', 'value', nz]] for parameter in custom_parameters:#{ root.find(parameter[0]).set(parameter[1], str(parameter[2])) #} self.magnetic_field_model.update_parameter_xml(root) for observable in self.observables: for observable in self.observables:#{ observable.update_parameter_xml(root) #} parameters_file_path = os.path.join(working_directory, 'parameters.xml') tree.write(parameters_file_path) ... ... @@ -184,54 +167,41 @@ class Hammurapy(Observer): def _fill_observable_dict(self, observable_dict, working_directory, local_ensemble_index): for observable in self.observables: observable.fill_observable_dict( observable_dict=observable_dict, working_directory=working_directory, local_ensemble_index=local_ensemble_index, nside=self.nside) observable.fill_observable_dict(observable_dict=observable_dict, working_directory=working_directory, local_ensemble_index=local_ensemble_index, nside=self.nside) return observable_dict def __call__(self, magnetic_field): if not isinstance(magnetic_field, self.magnetic_field_class): raise ValueError("Given magnetic field is not a subclass of" + " %s" % str(self.magnetic_field_class)) observable_dict = self._initialize_observable_dict( magnetic_field=magnetic_field) raise ValueError("Given magnetic field is not a subclass of %s" % str(self.magnetic_field_class)) observable_dict = self._initialize_observable_dict(magnetic_field=magnetic_field) # iterate over ensemble and put result into result_observable # get the local shape by creating a dummy d2o ensemble_number = magnetic_field.shape[0] dummy = distributed_data_object(global_shape=(ensemble_number,), distribution_strategy='equal', dtype=np.float) local_length = dummy.distributor.local_length for local_ensemble_index in xrange(local_length): self.logger.debug("Processing local_ensemble_index %i." % local_ensemble_index) self.logger.debug("Processing local_ensemble_index %i." % local_ensemble_index) # create a temporary folder working_directory = self._make_temp_folder() self._write_parameter_xml( magnetic_field=magnetic_field, local_ensemble_index=local_ensemble_index, working_directory=working_directory) self._write_parameter_xml(magnetic_field=magnetic_field, local_ensemble_index=local_ensemble_index, working_directory=working_directory) # call hammurabi self._call_hammurabi(working_directory) # if hammurabi failed, _fill_observable_dict will fail try: self._fill_observable_dict(observable_dict, working_directory, local_ensemble_index) except: self.logger.critical("Hammurabi failed! Last call log:\n" + self.last_call_log) self.logger.critical("Hammurabi failed! Last call log:\n" + self.last_call_log) raise finally: self._remove_folder(working_directory) return observable_dict
 ... ... @@ -30,8 +30,8 @@ ... ... @@ -43,20 +43,52 @@ ... ... @@ -64,16 +96,16 @@ ... ... @@ -123,46 +155,54 @@ ... ... @@ -185,16 +225,31 @@ ... ... @@ -237,29 +292,35 @@ ... ... @@ -272,35 +333,17 @@
 ... ... @@ -19,13 +19,14 @@ from .magnetic_field_model import MagneticFieldModel from imagine.magnetic_fields.wmap3yr_magnetic_field import WMAP3yrMagneticField class WMAP3yrMixin(MagneticFieldModel): def update_parameter_xml(self, root): custom_parameters = [ ['./MagneticField/Regular', 'type', 'WMAP'], ['./MagneticField/Random', 'cue', '1'], ['./MagneticField/Random', 'type', 'Global']] custom_parameters = [['./MagneticField/Regular', 'type', 'WMAP'], ['./MagneticField/Regular','cue','1'] ['./MagneticField/Random', 'cue', '1'], ['./MagneticField/Random', 'type', 'Global'], ['./MagneticField/Random/Global','type','ES']] for parameter in custom_parameters: root.find(parameter[0]).set(parameter[1], str(parameter[2])) ... ...
 ... ... @@ -17,11 +17,10 @@ # and financially supported by the Studienstiftung des deutschen Volkes. import xml.etree.ElementTree as et from .observable_mixin import ObservableMixin class DMMixin(ObservableMixin): @property def obs_name(self): return 'dm' ... ... @@ -31,7 +30,6 @@ class DMMixin(ObservableMixin): return ['dm'] def update_parameter_xml(self, root): element = et.Element('DM', {'cue': '1', 'filename': self.obs_name+'.fits'}) element = et.Element('DM', {'cue': '1', 'filename': self.obs_name+'.fits'}) output = root.find('Obsout') output.append(element)
 ... ... @@ -17,11 +17,10 @@ # and financially supported by the Studienstiftung des deutschen Volkes. import xml.etree.ElementTree as et from .observable_mixin import ObservableMixin class FDMixin(ObservableMixin): @property def obs_name(self): return 'fd' ... ... @@ -31,7 +30,6 @@ class FDMixin(ObservableMixin): return ['fd'] def update_parameter_xml(self, root): element = et.Element('Faraday', {'cue': '1', 'filename': self.obs_name+'.fits'}) element = et.Element('Faraday', {'cue': '1', 'filename': self.obs_name+'.fits'}) output = root.find('Obsout') output.append(element)
 ... ... @@ -18,13 +18,12 @@ import abc import os import healpy from keepers import Loggable class ObservableMixin(Loggable, object): @abc.abstractproperty def obs_name(self): raise NotImplementedError ... ... @@ -42,10 +41,10 @@ class ObservableMixin(Loggable, object): map_list = self._read_fits_file(path=working_directory, name=self.obs_name + '.fits', nside=nside) for i, map_component in enumerate(map_list): for i, map_component in enumerate(map_list):#{ temp_obs = observable_dict[self.component_names[i]] temp_obs.val.data[local_ensemble_index] = map_component #} def _read_fits_file(self, path, name, nside): map_path = os.path.join(path, name) ... ... @@ -53,8 +52,7 @@ class ObservableMixin(Loggable, object): i = 0 while True: try: loaded_map = healpy.read_map(map_path, verbose=False, field=i) loaded_map = healpy.read_map(map_path, verbose=False, field=i) # loaded_map = healpy.ud_grade(loaded_map, nside_out=nside) result_list += [loaded_map] i += 1 ... ...
 ... ... @@ -17,11 +17,10 @@ # and financially supported by the Studienstiftung des deutschen Volkes. import xml.etree.ElementTree as et from .observable_mixin import ObservableMixin class SyncMixin(ObservableMixin): def __init__(self, frequency='23'): self.frequency = str(frequency) ... ...
 ... ... @@ -18,11 +18,8 @@ import os import numpy as np from scipy import optimize from mpi4py import MPI from keepers import Loggable from imagine.likelihoods import Likelihood ... ... @@ -39,7 +36,6 @@ rank = comm.rank WORK_TAG = 0 DIE_TAG = 1 class Pipeline(Loggable, object): """ The pipeline ... ... @@ -62,19 +58,19 @@ class Pipeline(Loggable, object): self.prior = prior self.active_variables = active_variables self.ensemble_size = ensemble_size # rescaling total likelihood in _core_likelihood self.likelihood_rescaler = 1. # setting defaults for pymultinest self.pymultinest_parameters = {'verbose': True, 'n_iter_before_update': 1, 'n_live_points': 100} self.pymultinest_parameters = {"verbose": True, "n_iter_before_update": 1, "n_live_points": 100} self.pymultinest_parameters.update(pymultinest_parameters) self.sample_callback = sample_callback # seed for generating random field self.fixed_random_seed = None self.likelihood_threshold = 0. # checking likelihood threshold in _multinest_likelihood self.check_threshold = False self.likelihood_threshold = 0. @property def observer(self): ... ... @@ -82,8 +78,9 @@ class Pipeline(Loggable, object): @observer.setter def observer(self, observer): if not isinstance(observer, Observer): if not isinstance(observer, Observer):#{ raise TypeError("observer must be an instance of Observer-class.") #} self.logger.debug("Setting observer.") self._observer = observer ... ... @@ -95,14 +92,15 @@ class Pipeline(Loggable, object): def likelihood(self, likelihood): self.logger.debug("Setting likelihood.") self._likelihood = () if not (isinstance(likelihood, list) or isinstance(likelihood, tuple)): if not (isinstance(likelihood, list) or isinstance(likelihood, tuple)):#{ likelihood = [likelihood] for l in likelihood: if not isinstance(l, Likelihood): raise TypeError( "likelihood must be an instance of Likelihood-class.") #} for l in likelihood:#{ if not isinstance(l, Likelihood):#{ raise TypeError("likelihood must be an instance of Likelihood-class.") #} self._likelihood += (l,) #} @property def prior(self): ... ... @@ -111,9 +109,9 @@ class Pipeline(Loggable, object): @prior.setter def prior(self, prior): self.logger.debug("Setting prior.") if not isinstance(prior, Prior): raise TypeError( "prior must be an instance of Prior-class.") if not isinstance(prior, Prior):#{ raise TypeError("prior must be an instance of Prior-class.") #} self._prior = prior @property ... ... @@ -122,10 +120,9 @@ class Pipeline(Loggable, object): @magnetic_field_factory.setter def magnetic_field_factory(self, magnetic_field_factory): if not isinstance(magnetic_field_factory, MagneticFieldFactory): raise TypeError( "magnetic_field_factory must be an instance of the " "MagneticFieldFactory-class.") if not isinstance(magnetic_field_factory, MagneticFieldFactory):#{ raise TypeError("magnetic_field_factory must be an instance of the MagneticFieldFactory-class.") #} self.logger.debug("Setting magnetic_field_factory.") self._magnetic_field_factory = magnetic_field_factory ... ... @@ -135,14 +132,14 @@ class Pipeline(Loggable, object): @active_variables.setter def active_variables(self, active_variables): if not isinstance(active_variables, list): raise TypeError( "active_variables must be a list.") self.logger.debug("Resetting active_variables to %s" % str(active_variables)) if not isinstance(active_variables, list):#{ raise TypeError("active_variables must be a list.") #} self.logger.debug("Resetting active_variables to %s" % str(active_variables)) new_active = [] for av in active_variables: for av in active_variables:#{ new_active += [str(av)] #} self._active_variables = new_active @property ... ... @@ -151,10 +148,10 @@ class Pipeline(Loggable, object): @ensemble_size.setter def ensemble_size(self, ensemble_size): ensemble_size = int(ensemble_size) if ensemble_size <= 0: if ensemble_size <= 0:#{ raise ValueError("ensemble_size must be positive!") #} self.logger.debug("Setting ensemble size to %i." % ensemble_size) self._ensemble_size = ensemble_size ... ... @@ -162,74 +159,71 @@ class Pipeline(Loggable, object): cube_content = np.empty(ndim) for i in xrange(ndim): cube_content[i] = cube[i] # heuristic for minimizers: # if a parameter value from outside of the cube is requested, return # the worst possible likelihood value if np.any(cube_content > 1.) or np.any(cube_content < 0.): self.logger.info('Cube %s requested. Returned most negative ' 'possible number.' % cube_content) if np.any(cube_content > 1.) or np.any(cube_content < 0.):#{ self.logger.info("Cube %s requested. Returned most negative possible number." % cube_content) return np.nan_to_num(-np.inf) if rank != 0: raise RuntimeError("_multinest_likelihood must only be called on " "rank==0.") #} if rank != 0:#{ raise RuntimeError("_multinest_likelihood must only be called on rank==0.") #} error_count = 0 while error_count < 5: for i in xrange(1, size): for i in xrange(1, size):#{ comm.send(cube_content, dest=i, tag=WORK_TAG) #} self.logger.debug("Sent multinest-cube to nodes with rank > 0.") likelihood = self._core_likelihood(cube_content) if likelihood < self.likelihood_threshold or \ not self.check_threshold: if likelihood < self.likelihood_threshold or not self.check_threshold: break else: self.logger.error("Positive log-likelihood value encountered!" "Redoing calculation.") self.logger.error("Positive log-likelihood value encountered! Redoing calculation.") #} return likelihood # on slave nodes def _listen_for_likelihood_calls(self): status = MPI.Status() while True: while True:#{ cube = comm.recv(source=0, tag=MPI.ANY_TAG, status=status) if status == DIE_TAG: if status == DIE_TAG:#{ self.logger.debug("Received DIE_TAG from rank 0.") break #} self.logger.debug("Received cube from rank 0.") self._core_likelihood(cube) #} def _core_likelihood(self, cube): self.logger.debug("Beginning Likelihood-calculation for %s." % str(cube)) self.logger.debug("Beginning Likelihood-calculation for %s." % str(cube)) # translate cube to variables variables = {} for i, av in enumerate(self.active_variables): for i, av in enumerate(self.active_variables):#{ variables[av] = cube[i] #} # create magnetic field self.logger.debug("Creating magnetic field.") b_field = self.magnetic_field_factory.generate( variables=variables, ensemble_size=self.ensemble_size, random_seed=self.fixed_random_seed) # create observables self.logger.debug("Creating observables.") observables = self.observer(b_field) # add up individual log-likelihood terms self.logger.debug("Evaluating likelihood(s).") likelihood = () total_likelihood = 0 for like in self.likelihood: for like in self.likelihood:#{ current_likelihood = like(observables) likelihood += (current_likelihood, ) total_likelihood += current_likelihood #} self.logger.info("Evaluated likelihood: %f for %s" % (total_likelihood, str(cube))) self.logger.info("Evaluated likelihood: %f for %s" % (total_likelihood, str(cube))) if self.sample_callback is not None: if self.sample_callback is not None:#{ self.logger.debug("Creating sample-object.") sample = Sample(variables=variables, magnetic_field=b_field, ... ... @@ -237,34 +231,36 @@ class Pipeline(Loggable, object): likelihood=likelihood, total_likelihood=total_likelihood) self.sample_callback(sample) #}