Commit 4ed58632 authored by Martin Reinecke's avatar Martin Reinecke

Merge branch 'immutable_fields' into 'NIFTy_5'

Immutable fields

See merge request ift/nifty-dev!39
parents 8cf302cb 83400bbe
......@@ -430,7 +430,7 @@
"mask = ift.Field.from_global_data(s_space, mask)\n",
"\n",
"R = ift.DiagonalOperator(mask)*HT\n",
"n = n.to_global_data()\n",
"n = n.to_global_data().copy()\n",
"n[l:h] = 0\n",
"n = ift.Field.from_global_data(s_space, n)\n",
"\n",
......@@ -501,7 +501,7 @@
"m_data = HT(m).to_global_data()\n",
"m_var_data = m_var.to_global_data()\n",
"uncertainty = np.sqrt(m_var_data)\n",
"d_data = d.to_global_data()\n",
"d_data = d.to_global_data().copy()\n",
"\n",
"# Set lost data to NaN for proper plotting\n",
"d_data[d_data == 0] = np.nan"
......@@ -586,7 +586,7 @@
"mask = ift.Field.from_global_data(s_space, mask)\n",
"\n",
"R = ift.DiagonalOperator(mask)*HT\n",
"n = n.to_global_data()\n",
"n = n.to_global_data().copy()\n",
"n[l:h, l:h] = 0\n",
"n = ift.Field.from_global_data(s_space, n)\n",
"curv = Curvature(R=R, N=N, Sh=Sh)\n",
......@@ -731,7 +731,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
"version": "3.6.6"
}
},
"nbformat": 4,
......
......@@ -5,7 +5,6 @@ import numpy as np
def get_random_LOS(n_los):
starts = list(np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(np.random.uniform(0, 1, (n_los, 2)).T)
return starts, ends
......@@ -87,20 +86,12 @@ if __name__ == '__main__':
ift.plot([A.at(position).value, A.at(MOCK_POSITION).value],
name='power.pdf')
avrg = 0.
va = 0.
powers = []
sc = ift.StatCalculator()
for sample in samples:
sam = signal.at(sample + position).value
powers.append(A.at(sample+position).value)
avrg += sam
va += sam**2
avrg /= len(samples)
va /= len(samples)
va -= avrg**2
std = ift.sqrt(va)
ift.plot(avrg, name='avrg.pdf')
ift.plot(std, name='std.pdf')
sc.add(signal.at(sample+position).value)
ift.plot(sc.mean, name='avrg.pdf')
ift.plot(ift.sqrt(sc.var), name='std.pdf')
powers = [A.at(s+position).value for s in samples]
ift.plot([A.at(position).value, A.at(MOCK_POSITION).value]+powers,
name='power.pdf')
......@@ -142,12 +142,11 @@ There is also a set of convenience functions to generate fields with constant
values or fields filled with random numbers according to a user-specified
distribution.
Fields are the only fundamental NIFTy objects which can change state after they
have been constructed: while their data type, domain, and array shape cannot
be modified, the actual data content of the array may be manipulated during the
lifetime of the object. This is a slight deviation from the philosophy that all
NIFTy objects should be immutable, but this choice offers considerable
performance benefits.
Like almost all NIFTy objects, fields are immutable: their value or any other
attribute cannot be modified after construction. To manipulate a field in ways
that are not covered by the provided standard operations, its data content must
be extracted first, then changed, and a new field has to be created from the
result.
Linear Operators
......
......@@ -54,7 +54,8 @@ extensions = [
'sphinx.ext.napoleon',
# 'sphinx.ext.coverage',
# 'sphinx.ext.todo',
'sphinx.ext.mathjax',
# 'sphinx.ext.mathjax',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode'
]
......@@ -82,9 +83,9 @@ author = u'Theo Steininger / Martin Reinecke'
# built documents.
#
# The short X.Y version.
version = u'4.0'
version = u'5.0'
# The full version, including alpha/beta/rc tags.
release = u'4.0.0'
release = u'5.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......
......@@ -89,7 +89,7 @@ from .library.bernoulli_energy import BernoulliEnergy
from . import extra
from .utilities import memo
from .utilities import memo, frozendict
from .logger import logger
......
......@@ -16,11 +16,13 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from __future__ import (absolute_import, division, print_function)
from builtins import *
from functools import reduce
import numpy as np
from .random import Random
from mpi4py import MPI
import sys
from functools import reduce
_comm = MPI.COMM_WORLD
ntask = _comm.Get_size()
......@@ -62,6 +64,9 @@ class data_object(object):
if local_shape(self._shape, self._distaxis) != self._data.shape:
raise ValueError("shape mismatch")
def copy(self):
return data_object(self._shape, self._data.copy(), self._distaxis)
# def _sanity_checks(self):
# # check whether the distaxis is consistent
# if self._distaxis < -1 or self._distaxis >= len(self._shape):
......
......@@ -16,6 +16,8 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from __future__ import (absolute_import, division, print_function)
from builtins import *
from functools import reduce
from .domains.domain import Domain
......@@ -104,6 +106,16 @@ class DomainTuple(object):
"""
return self._shape
@property
def local_shape(self):
"""tuple of int: number of pixels along each axis on the local task
The shape of the array-like object required to store information
living on part of the domain which is stored on the local MPI task.
"""
from .dobj import local_shape
return local_shape(self._shape)
@property
def size(self):
"""int : total number of pixels.
......
......@@ -88,6 +88,16 @@ class Domain(NiftyMetaBase()):
"""
raise NotImplementedError
@property
def local_shape(self):
"""tuple of int: number of pixels along each axis on the local task
The shape of the array-like object required to store information
living on part of the domain which is stored on the local MPI task.
"""
from ..dobj import local_shape
return local_shape(self.shape)
@abc.abstractproperty
def size(self):
"""int: total number of pixels.
......
......@@ -101,12 +101,7 @@ class LMSpace(StructuredDomain):
# by Challinor et al.
# http://arxiv.org/abs/astro-ph/0008228
from ..sugar import exp
res = x+1.
res *= x
res *= -0.5*sigma*sigma
exp(res, out=res)
return res
return exp((x+1.) * x * (-0.5*sigma*sigma))
def get_fft_smoothing_kernel_function(self, sigma):
return lambda x: self._kernel(x, sigma)
......
......@@ -3,7 +3,7 @@ from ..sugar import exp
import numpy as np
from ..dobj import ibegin
from .. import dobj
from ..field import Field
from .structured_domain import StructuredDomain
......@@ -62,26 +62,22 @@ class LogRGSpace(StructuredDomain):
np.zeros(len(self.shape)), True)
def get_k_length_array(self):
out = Field(self, dtype=np.float64)
oloc = out.local_data
ib = ibegin(out.val)
res = np.arange(oloc.shape[0], dtype=np.float64) + ib[0]
ib = dobj.ibegin_from_shape(self._shape)
res = np.arange(self.local_shape[0], dtype=np.float64) + ib[0]
res = np.minimum(res, self.shape[0]-res)*self.bindistances[0]
if len(self.shape) == 1:
oloc[()] = res
return out
return Field.from_local_data(self, res)
res *= res
for i in range(1, len(self.shape)):
tmp = np.arange(oloc.shape[i], dtype=np.float64) + ib[i]
tmp = np.arange(self.local_shape[i], dtype=np.float64) + ib[i]
tmp = np.minimum(tmp, self.shape[i]-tmp)*self.bindistances[i]
tmp *= tmp
res = np.add.outer(res, tmp)
oloc[()] = np.sqrt(res)
return out
return Field.from_local_data(self, np.sqrt(res))
def get_expk_length_array(self):
# FIXME This is a hack! Only for plotting. Seems not to be the final version.
out = exp(self.get_k_length_array())
out.val[1:] = out.val[:-1]
out.val[0] = 0
return out
out = exp(self.get_k_length_array()).to_global_data().copy()
out[1:] = out[:-1]
out[0] = 0
return Field.from_global_data(self, out)
......@@ -95,22 +95,18 @@ class RGSpace(StructuredDomain):
def get_k_length_array(self):
if (not self.harmonic):
raise NotImplementedError
out = Field(self, dtype=np.float64)
oloc = out.local_data
ibegin = dobj.ibegin(out.val)
res = np.arange(oloc.shape[0], dtype=np.float64) + ibegin[0]
ibegin = dobj.ibegin_from_shape(self._shape)
res = np.arange(self.local_shape[0], dtype=np.float64) + ibegin[0]
res = np.minimum(res, self.shape[0]-res)*self.distances[0]
if len(self.shape) == 1:
oloc[()] = res
return out
return Field.from_local_data(self, res)
res *= res
for i in range(1, len(self.shape)):
tmp = np.arange(oloc.shape[i], dtype=np.float64) + ibegin[i]
tmp = np.arange(self.local_shape[i], dtype=np.float64) + ibegin[i]
tmp = np.minimum(tmp, self.shape[i]-tmp)*self.distances[i]
tmp *= tmp
res = np.add.outer(res, tmp)
oloc[()] = np.sqrt(res)
return out
return Field.from_local_data(self, np.sqrt(res))
def get_unique_k_lengths(self):
if (not self.harmonic):
......@@ -145,10 +141,7 @@ class RGSpace(StructuredDomain):
@staticmethod
def _kernel(x, sigma):
from ..sugar import exp
tmp = x*x
tmp *= -2.*np.pi*np.pi*sigma*sigma
exp(tmp, out=tmp)
return tmp
return exp(x*x * (-2.*np.pi*np.pi*sigma*sigma))
def get_fft_smoothing_kernel_function(self, sigma):
if (not self.harmonic):
......
......@@ -6,6 +6,7 @@ from ..utilities import memo, my_sum
class SampledKullbachLeiblerDivergence(Energy):
def __init__(self, h, res_samples):
"""
# MR FIXME: does h have to be a Hamiltonian? Couldn't it be any energy?
h: Hamiltonian
N: Number of samples to be used
"""
......
......@@ -16,6 +16,8 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from __future__ import (absolute_import, division, print_function)
from builtins import *
import numpy as np
from ..sugar import from_random
from ..minimization.energy import Energy
......@@ -31,7 +33,7 @@ def _get_acceptable_model(M):
raise ValueError('Initial Model value must be finite')
dir = from_random("normal", M.position.domain)
dirder = M.jacobian(dir)
dir *= val/(dirder).norm()*1e-5
dir = dir * val * (1e-5/dirder.norm())
# Find a step length that leads to a "reasonable" Model
for i in range(50):
try:
......@@ -40,7 +42,7 @@ def _get_acceptable_model(M):
break
except FloatingPointError:
pass
dir *= 0.5
dir = dir*0.5
else:
raise ValueError("could not find a reasonable initial step")
return M2
......@@ -52,7 +54,7 @@ def _get_acceptable_energy(E):
raise ValueError('Initial Energy must be finite')
dir = from_random("normal", E.position.domain)
dirder = E.gradient.vdot(dir)
dir *= np.abs(val)/np.abs(dirder)*1e-5
dir = dir * (np.abs(val)/np.abs(dirder)*1e-5)
# Find a step length that leads to a "reasonable" energy
for i in range(50):
try:
......@@ -61,7 +63,7 @@ def _get_acceptable_energy(E):
break
except FloatingPointError:
pass
dir *= 0.5
dir = dir*0.5
else:
raise ValueError("could not find a reasonable initial step")
return E2
......@@ -92,7 +94,7 @@ def check_value_gradient_consistency(E, tol=1e-8, ntries=100):
xtol = tol*Emid.gradient_norm
if abs(numgrad-dirder) < xtol:
break
dir *= 0.5
dir = dir*0.5
dirnorm *= 0.5
E2 = Emid
else:
......@@ -117,7 +119,7 @@ def check_value_gradient_metric_consistency(E, tol=1e-8, ntries=100):
if abs((E2.value-val)/dirnorm - dirder) < xtol and \
(abs((E2.gradient-E.gradient)/dirnorm-dgrad) < xtol).all():
break
dir *= 0.5
dir = dir*0.5
dirnorm *= 0.5
E2 = Emid
else:
......
......@@ -35,9 +35,9 @@ def adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol):
needed_cap = op.TIMES | op.ADJOINT_TIMES
if (op.capability & needed_cap) != needed_cap:
return
f1 = from_random("normal", op.domain, dtype=domain_dtype).lock()
f2 = from_random("normal", op.target, dtype=target_dtype).lock()
res1 = f1.vdot(op.adjoint_times(f2).lock())
f1 = from_random("normal", op.domain, dtype=domain_dtype)
f2 = from_random("normal", op.target, dtype=target_dtype)
res1 = f1.vdot(op.adjoint_times(f2))
res2 = op.times(f1).vdot(f2)
np.testing.assert_allclose(res1, res2, atol=atol, rtol=rtol)
......@@ -46,12 +46,12 @@ def inverse_implementation(op, domain_dtype, target_dtype, atol, rtol):
needed_cap = op.TIMES | op.INVERSE_TIMES
if (op.capability & needed_cap) != needed_cap:
return
foo = from_random("normal", op.target, dtype=target_dtype).lock()
res = op(op.inverse_times(foo).lock())
foo = from_random("normal", op.target, dtype=target_dtype)
res = op(op.inverse_times(foo))
_assert_allclose(res, foo, atol=atol, rtol=rtol)
foo = from_random("normal", op.domain, dtype=domain_dtype).lock()
res = op.inverse_times(op(foo).lock())
foo = from_random("normal", op.domain, dtype=domain_dtype)
res = op.inverse_times(op(foo))
_assert_allclose(res, foo, atol=atol, rtol=rtol)
......
This diff is collapsed.
......@@ -16,6 +16,9 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from __future__ import (absolute_import, division, print_function)
from builtins import *
def _logger_init():
import logging
......
......@@ -66,7 +66,7 @@ class ConjugateGradient(Minimizer):
return energy, status
r = energy.gradient
d = r.copy() if preconditioner is None else preconditioner(r)
d = r if preconditioner is None else preconditioner(r)
previous_gamma = r.vdot(d).real
if previous_gamma == 0:
......@@ -84,8 +84,7 @@ class ConjugateGradient(Minimizer):
logger.error("Error: ConjugateGradient: alpha<0.")
return energy, controller.ERROR
q *= -alpha
r = r + q
r = r - q*alpha
energy = energy.at_with_grad(energy.position - alpha*d, r)
......@@ -103,7 +102,6 @@ class ConjugateGradient(Minimizer):
if status != controller.CONTINUE:
return energy, status
d *= max(0, gamma/previous_gamma)
d += s
d = d * max(0, gamma/previous_gamma) + s
previous_gamma = gamma
......@@ -52,7 +52,7 @@ class Energy(NiftyMetaBase()):
def __init__(self, position):
super(Energy, self).__init__()
self._position = position.lock()
self._position = position
def at(self, position):
""" Returns a new Energy object, initialized at `position`.
......
......@@ -63,7 +63,7 @@ class EnergySum(Energy):
@memo
def gradient(self):
return my_lincomb(map(lambda v: v.gradient, self._energies),
self._factors).lock()
self._factors)
@property
@memo
......
......@@ -16,10 +16,8 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from __future__ import division
from builtins import range
from __future__ import (absolute_import, division, print_function)
from builtins import *
from ..logger import logger
from .descent_minimizer import DescentMinimizer
from .line_search_strong_wolfe import LineSearchStrongWolfe
......@@ -61,16 +59,16 @@ class L_BFGS(DescentMinimizer):
for i in range(k-1, k-nhist-1, -1):
idx = i % maxhist
alpha[idx] = s[idx].vdot(p)/s[idx].vdot(y[idx])
p -= alpha[idx]*y[idx]
p = p - alpha[idx]*y[idx]
idx = (k-1) % maxhist
fact = s[idx].vdot(y[idx]) / y[idx].vdot(y[idx])
if fact <= 0.:
logger.error("L-BFGS curvature not positive definite!")
p *= fact
p = p*fact
for i in range(k-nhist, k):
idx = i % maxhist
beta = y[idx].vdot(p) / s[idx].vdot(y[idx])
p += (alpha[idx]-beta)*s[idx]
p = p + (alpha[idx]-beta)*s[idx]
self._lastx = x
self._lastgrad = gradient
self._k += 1
......
......@@ -48,7 +48,7 @@ class LineEnergy(object):
def __init__(self, line_position, energy, line_direction, offset=0.):
super(LineEnergy, self).__init__()
self._line_position = float(line_position)
self._line_direction = line_direction.lock()
self._line_direction = line_direction
if self._line_position == float(offset):
self._energy = energy
......
......@@ -35,7 +35,6 @@ class QuadraticEnergy(Energy):
else:
Ax = self._A(self.position)
self._grad = Ax if b is None else Ax - b
self._grad.lock()
self._value = 0.5*self.position.vdot(Ax)
if b is not None:
self._value -= b.vdot(self.position)
......
......@@ -33,7 +33,7 @@ def _toFlatNdarray(fld):
def _toField(arr, dom):
return Field.from_global_data(dom, arr.reshape(dom.shape))
return Field.from_global_data(dom, arr.reshape(dom.shape).copy())
class _MinHelper(object):
......@@ -44,7 +44,7 @@ class _MinHelper(object):
def _update(self, x):
pos = _toField(x, self._domain)
if (pos != self._energy.position).any():
self._energy = self._energy.at(pos.locked_copy())
self._energy = self._energy.at(pos)
def fun(self, x):
self._update(x)
......
......@@ -16,9 +16,8 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from __future__ import division
from builtins import range
from builtins import object
from __future__ import (absolute_import, division, print_function)
from builtins import *
import numpy as np
from .descent_minimizer import DescentMinimizer
from .line_search_strong_wolfe import LineSearchStrongWolfe
......@@ -67,7 +66,7 @@ class VL_BFGS(DescentMinimizer):
descent_direction = delta[0] * b[0]
for i in range(1, len(delta)):
descent_direction += delta[i] * b[i]
descent_direction = descent_direction + delta[i]*b[i]
return descent_direction
......@@ -109,8 +108,8 @@ class _InformationStore(object):
self.max_history_length = max_history_length
self.s = [None]*max_history_length
self.y = [None]*max_history_length
self.last_x = x0.copy()
self.last_gradient = gradient.copy()
self.last_x = x0
self.last_gradient = gradient
self.k = 0
mmax = max_history_length
......@@ -233,7 +232,7 @@ class _InformationStore(object):
self.s[self.k % mmax] = x - self.last_x
self.y[self.k % mmax] = gradient - self.last_gradient
self.last_x = x.copy()
self.last_gradient = gradient.copy()
self.last_x = x
self.last_gradient = gradient
self.k += 1
......@@ -25,7 +25,7 @@ def _joint_position(model1, model2):
a = model1.position._val
b = model2.position._val
# Note: In python >3.5 one could do {**a, **b}
ab = a.copy()
ab = dict(a)
ab.update(b)
return MultiField(ab)
......
......@@ -26,19 +26,19 @@ from ..utilities import NiftyMetaBase
class Model(NiftyMetaBase()):
"""
The Model object is an implementation of a * which knows:
- position in parameterspace. (Field, MulitField)
- value according to its modelfunction A. A(position)
- position in parameter space. (Field, MultiField)
- value according to its model function A. A(position)
- Jacobian of the model function at the current position.
Parameters
----------
position : Field, MulitField
position : Field, MultiField
The input parameter of the model
Notes
-----
An instance of the model class knows its position, value and Jacobian.
One can 'jump' to a new position, with the help of the 'at' method, whereby
One can 'jump' to a new position with the help of the 'at' method, whereby
one automatically gets the value and Jacobian of the model. The 'at' method
creates a new instance of the class.
"""
......
import collections
from ..domain_tuple import DomainTuple
__all = ["MultiDomain"]
class frozendict(collections.Mapping):
"""
An immutable wrapper around dictionaries that implements the complete
:py:class:`collections.Mapping` interface. It can be used as a drop-in
replacement for dictionaries where immutability is desired.
"""
dict_cls = dict
def __init__(self, *args, **kwargs):
self._dict = self.dict_cls(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
return self._dict[key]
def __contains__(self, key):
return key in self._dict
def copy(self, **add_or_replace):
return self.__class__(self, **add_or_replace)
def __iter__(self):
return iter(self._dict)
def __len__(self):