Commit 1974cd70 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

very first run

parent a39d4d69
......@@ -29,7 +29,7 @@ def get_random_LOS(n_los):
if __name__ == '__main__':
# FIXME description of the tutorial
np.random.seed(42)
position_space = ift.RGSpace([128, 128])
position_space = ift.RGSpace([512, 512])
# Setting up an amplitude model
A = ift.AmplitudeModel(position_space, 16, 1, 10, -4., 1, 0., 1.)
......
......@@ -20,11 +20,16 @@ from __future__ import absolute_import, division, print_function
from .compat import *
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
dobj_mpi = False
if dobj_mpi:
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
from .data_objects.numpy_do import *
else:
from .data_objects.distributed_do import *
except ImportError:
from .data_objects.numpy_do import *
else:
from .data_objects.distributed_do import *
except ImportError:
else:
from .data_objects.numpy_do import *
......@@ -6,6 +6,31 @@ from ..linearization import Linearization
from ..operators.scaling_operator import ScalingOperator
from ..operators.block_diagonal_operator import BlockDiagonalOperator
from .. import utilities
from ..field import Field
from ..multi_field import MultiField
from mpi4py import MPI
import numpy as np
_comm = MPI.COMM_WORLD
ntask = _comm.Get_size()
rank = _comm.Get_rank()
master = (rank == 0)
def _shareRange(nwork, nshares, myshare):
nbase = nwork//nshares
additional = nwork % nshares
lo = myshare*nbase + min(myshare, additional)
hi = lo + nbase + int(myshare < additional)
return lo, hi
def np_allreduce_sum(arr):
res = np.empty_like(arr)
_comm.Allreduce(arr, res, MPI.SUM)
return res
def allreduce_sum_field(fld):
if isinstance(fld, Field):
return Field.from_local_data(fld.domain, np_allreduce_sum(fld.local_data))
res = tuple(Field.from_local_data(f.domain, np_allreduce_sum(f.local_data)) for f in fld.values())
return MultiField(fld.domain, res)
raise NotImplementedError
class KL_Energy(Energy):
......@@ -13,13 +38,17 @@ class KL_Energy(Energy):
want_metric=False):
super(KL_Energy, self).__init__(position)
self._h = h
self._nsamp = nsamp
self._constants = constants
self._want_metric = want_metric
if _samples is None:
lo, hi = _shareRange(nsamp, ntask, rank)
met = h(Linearization.make_var(position, True)).metric
_samples = tuple(met.draw_sample(from_inverse=True)
for _ in range(nsamp))
self._samples = _samples
_samples = []
for i in range(lo, hi):
np.random.seed(i)
_samples.append(met.draw_sample(from_inverse=True))
self._samples = tuple(_samples)
if len(constants) == 0:
tmp = Linearization.make_var(position, want_metric)
else:
......@@ -28,14 +57,14 @@ class KL_Energy(Energy):
bdop = BlockDiagonalOperator(position.domain, tuple(ops))
tmp = Linearization(position, bdop, want_metric=want_metric)
mymap = map(lambda v: self._h(tmp+v), self._samples)
tmp = utilities.my_sum(mymap) * (1./len(self._samples))
self._val = tmp.val.local_data[()]
self._grad = tmp.gradient
tmp = utilities.my_sum(mymap) * (1./self._nsamp)
self._val = np_allreduce_sum(tmp.val.local_data)[()]
self._grad = allreduce_sum_field(tmp.gradient)
self._metric = tmp.metric
def at(self, position):
return KL_Energy(position, self._h, 0, self._constants, self._samples,
self._want_metric)
return KL_Energy(position, self._h, self._nsamp, self._constants,
self._samples, self._want_metric)
@property
def value(self):
......@@ -46,7 +75,7 @@ class KL_Energy(Energy):
return self._grad
def apply_metric(self, x):
return self._metric(x)
return allreduce_sum_field(self._metric(x))
@property
def samples(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment