Commit 7cb08b8f authored by Lukas Platz's avatar Lukas Platz
Browse files

Merge branch 'NIFTy_6' into concise_offset_parametrization

parents 62e1120f 86a5890b
Pipeline #71961 passed with stages
in 19 minutes and 33 seconds
......@@ -178,10 +178,9 @@ class MetricGaussianKL(Energy):
_local_samples = []
sseq = random.spawn_sseq(self._n_samples)
for i in range(self._lo, self._hi):
random.push_sseq(sseq[i])
_local_samples.append(met.draw_sample(from_inverse=True,
dtype=lh_sampling_dtype))
random.pop_sseq()
with random.Context(sseq[i]):
_local_samples.append(met.draw_sample(
from_inverse=True, dtype=lh_sampling_dtype))
_local_samples = tuple(_local_samples)
else:
if len(_local_samples) != self._hi-self._lo:
......@@ -272,9 +271,8 @@ class MetricGaussianKL(Energy):
samp = full(self._hamiltonian.domain, 0.)
sseq = random.spawn_sseq(self._n_samples)
for i, v in enumerate(self._local_samples):
random.push_sseq(sseq[self._lo+i])
samp = samp + self._hamiltonian(lin+v).metric.draw_sample(from_inverse=False, dtype=dtype)
if self._mirror_samples:
samp = samp + self._hamiltonian(lin-v).metric.draw_sample(from_inverse=False, dtype=dtype)
random.pop_sseq()
with random.Context(sseq[self._lo+i]):
samp = samp + self._hamiltonian(lin+v).metric.draw_sample(from_inverse=False, dtype=dtype)
if self._mirror_samples:
samp = samp + self._hamiltonian(lin-v).metric.draw_sample(from_inverse=False, dtype=dtype)
return _allreduce_sum_field(self._comm, samp)/self._n_eff_samples
......@@ -16,12 +16,14 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
from ..domain_tuple import DomainTuple
from ..multi_domain import MultiDomain
from ..domains.unstructured_domain import UnstructuredDomain
from ..field import Field
from ..multi_domain import MultiDomain
from ..multi_field import MultiField
from .endomorphic_operator import EndomorphicOperator
from .linear_operator import LinearOperator
from .endomorphic_operator import EndomorphicOperator
from .. import utilities
import numpy as np
class VdotOperator(LinearOperator):
......@@ -352,29 +354,113 @@ class PartialExtractor(LinearOperator):
class MatrixProductOperator(EndomorphicOperator):
"""Endomorphic matrix multiplication with input field.
This operator supports scipy.sparse matrices and numpy arrays
as the matrix to be applied.
For numpy array matrices, can apply the matrix over a subspace
of the input.
If the input arrays have more than one dimension, for
scipy.sparse matrices the `flatten` keyword argument must be
set to true. This means that the input field will be flattened
before applying the matrix and reshaped to its original shape
afterwards.
Matrices are tested regarding their compatibility with the
called for application method.
Flattening and subspace application are mutually exclusive.
Parameters
----------
domain: :class:`Domain` or :class:`DomainTuple`
Domain of the operator.
If :class:`DomainTuple` it is assumed to have only one entry.
matrix: scipy.sparse matrix or numpy array
Matrix of shape `(domain.shape, domain.shape)`. Needs to support
`dot()` and `transpose()` in the style of numpy arrays.
Quadratic matrix of shape `(domain.shape, domain.shape)`
(if `not flatten`) that supports `matrix.transpose()`.
If it is not a numpy array, needs to be applicable to the val
array of input fields by `matrix.dot()`.
spaces: int or tuple of int, optional
The subdomain(s) of "domain" which the operator acts on.
If None, it acts on all elements.
Only possible for numpy array matrices.
If `len(domain) > 1` and `flatten=False`, this parameter is
mandatory.
flatten: boolean, optional
Whether the input value array should be flattened before
applying the matrix and reshaped to its original shape
afterwards.
Needed for scipy.sparse matrices if `len(domain) > 1`.
"""
def __init__(self, domain, matrix):
self._domain = DomainTuple.make(domain)
shp = self._domain.shape
if len(shp) > 1:
raise TypeError('Only 1D-domain supported yet.')
if matrix.shape != (*shp, *shp):
raise ValueError
def __init__(self, domain, matrix, spaces=None, flatten=False):
self._capability = self.TIMES | self.ADJOINT_TIMES
self._domain = DomainTuple.make(domain)
mat_dim = len(matrix.shape)
if mat_dim % 2 != 0 or \
matrix.shape != (matrix.shape[:mat_dim//2] + matrix.shape[:mat_dim//2]):
raise ValueError("Matrix must be quadratic.")
appl_dim = mat_dim // 2 # matrix application space dimension
# take shortcut for trivial case
if spaces is not None:
if len(self._domain.shape) == 1 and spaces == (0, ):
spaces = None
if spaces is None:
self._spaces = None
self._active_axes = utilities.my_sum(self._domain.axes)
appl_space_shape = self._domain.shape
if flatten:
appl_space_shape = (utilities.my_product(appl_space_shape), )
else:
if flatten:
raise ValueError(
"Cannot flatten input AND apply to a subspace")
if not isinstance(matrix, np.ndarray):
raise ValueError(
"Application to subspaces only supported for numpy array matrices."
)
self._spaces = utilities.parse_spaces(spaces, len(self._domain))
appl_space_shape = []
active_axes = []
for space_idx in spaces:
appl_space_shape += self._domain[space_idx].shape
active_axes += self._domain.axes[space_idx]
appl_space_shape = tuple(appl_space_shape)
self._active_axes = tuple(active_axes)
self._mat_last_n = tuple([-appl_dim + i for i in range(appl_dim)])
self._mat_first_n = np.arange(appl_dim)
# Test if the matrix and the array it will be applied to fit
if matrix.shape[:appl_dim] != appl_space_shape:
raise ValueError(
"Matrix and domain shapes are incompatible under the requested "
+ "application scheme.\n" +
f"Matrix appl shape: {matrix.shape[:appl_dim]}, " +
f"appl_space_shape: {appl_space_shape}.")
self._mat = matrix
self._mat_tr = matrix.transpose().conjugate()
self._flatten = flatten
def apply(self, x, mode):
self._check_input(x, mode)
res = x.val
f = self._mat.dot if mode == self.TIMES else self._mat_tr.dot
res = f(res)
times = (mode == self.TIMES)
m = self._mat if times else self._mat_tr
if self._spaces is None:
if not self._flatten:
res = m.dot(x.val)
else:
res = m.dot(x.val.flatten()).reshape(self._domain.shape)
return Field(self._domain, res)
mat_axes = self._mat_last_n if times else np.flip(self._mat_last_n)
move_axes = self._mat_first_n if times else np.flip(self._mat_first_n)
res = np.tensordot(m, x.val, axes=(mat_axes, self._active_axes))
res = np.moveaxis(res, move_axes, self._active_axes)
return Field(self._domain, res)
......@@ -79,6 +79,31 @@ _sseq = [np.random.SeedSequence(42)]
_rng = [np.random.default_rng(_sseq[-1])]
def getState():
"""Returns the full internal state of the module. Intended for pickling.
Returns
-------
state : unspecified
"""
import pickle
return pickle.dumps((_sseq, _rng))
def setState(state):
"""Restores the full internal state of the module. Intended for unpickling.
Parameters
----------
state : unspecified
Result of an earlier call to `getState`.
"""
import pickle
global _sseq, _rng
_sseq, _rng = pickle.loads(state)
def spawn_sseq(n, parent=None):
"""Returns a list of `n` SeedSequence objects which are children of `parent`
......@@ -121,6 +146,11 @@ def push_sseq(sseq):
----------
sseq: SeedSequence
the SeedSequence object to be used from this point
Notes
-----
Make sure that every call to `push_sseq` has a matching call to
`pop_sseq`, otherwise the module's internal stack will grow indefinitely!
"""
_sseq.append(sseq)
_rng.append(np.random.default_rng(_sseq[-1]))
......@@ -136,6 +166,11 @@ def push_sseq_from_seed(seed):
----------
seed: int
the seed from which the new SeedSequence will be built
Notes
-----
Make sure that every call to `push_sseq_from_seed` has a matching call to
`pop_sseq`, otherwise the module's internal stack will grow indefinitely!
"""
_sseq.append(np.random.SeedSequence(seed))
_rng.append(np.random.default_rng(_sseq[-1]))
......@@ -196,3 +231,16 @@ class Random(object):
else:
x = _rng[-1].uniform(low, high, shape)
return x.astype(dtype, copy=False)
class Context(object):
def __init__(self, inp):
if not isinstance(inp, np.random.SeedSequence):
inp = np.random.SeedSequence(inp)
self._sseq = inp
def __enter__(self):
push_sseq(self._sseq)
def __exit__(self, exc_type, exc_value, tb):
return exc_type is None
......@@ -38,11 +38,9 @@ pmp = pytest.mark.parametrize
@pytest.fixture(params=PARAMS)
def field(request):
ift.random.push_sseq_from_seed(request.param[0])
S = ift.ScalingOperator(request.param[1], 1.)
res = S.draw_sample()
ift.random.pop_sseq()
return res
with ift.random.Context(request.param[0]):
S = ift.ScalingOperator(request.param[1], 1.)
return S.draw_sample()
def test_gaussian(field):
......
......@@ -38,38 +38,37 @@ pmp = pytest.mark.parametrize
@pmp('noise', [1, 1e-2, 1e2])
@pmp('seed', [4, 78, 23])
def test_gaussian_energy(space, nonlinearity, noise, seed):
ift.random.push_sseq_from_seed(seed)
dim = len(space.shape)
hspace = space.get_default_codomain()
ht = ift.HarmonicTransformOperator(hspace, target=space)
binbounds = ift.PowerSpace.useful_binbounds(hspace, logarithmic=False)
pspace = ift.PowerSpace(hspace, binbounds=binbounds)
Dist = ift.PowerDistributor(target=hspace, power_space=pspace)
xi0 = ift.Field.from_random(domain=hspace, random_type='normal')
with ift.random.Context(seed):
dim = len(space.shape)
hspace = space.get_default_codomain()
ht = ift.HarmonicTransformOperator(hspace, target=space)
binbounds = ift.PowerSpace.useful_binbounds(hspace, logarithmic=False)
pspace = ift.PowerSpace(hspace, binbounds=binbounds)
Dist = ift.PowerDistributor(target=hspace, power_space=pspace)
xi0 = ift.Field.from_random(domain=hspace, random_type='normal')
def pspec(k):
return 1/(1 + k**2)**dim
def pspec(k):
return 1/(1 + k**2)**dim
pspec = ift.PS_field(pspace, pspec)
A = Dist(ift.sqrt(pspec))
N = ift.ScalingOperator(space, noise)
n = N.draw_sample()
R = ift.ScalingOperator(space, 10.)
pspec = ift.PS_field(pspace, pspec)
A = Dist(ift.sqrt(pspec))
N = ift.ScalingOperator(space, noise)
n = N.draw_sample()
R = ift.ScalingOperator(space, 10.)
def d_model():
if nonlinearity == "":
return R @ ht @ ift.makeOp(A)
else:
tmp = ht @ ift.makeOp(A)
nonlin = getattr(tmp, nonlinearity)()
return R @ nonlin
def d_model():
if nonlinearity == "":
return R @ ht @ ift.makeOp(A)
else:
tmp = ht @ ift.makeOp(A)
nonlin = getattr(tmp, nonlinearity)()
return R @ nonlin
d = d_model()(xi0) + n
d = d_model()(xi0) + n
if noise == 1:
N = None
if noise == 1:
N = None
energy = ift.GaussianEnergy(d, N) @ d_model()
ift.extra.check_jacobian_consistency(
energy, xi0, ntries=10, tol=1e-6)
ift.random.pop_sseq()
energy = ift.GaussianEnergy(d, N) @ d_model()
ift.extra.check_jacobian_consistency(
energy, xi0, ntries=10, tol=1e-6)
......@@ -248,16 +248,15 @@ def testOuter(fdomain, domain):
@pmp('sp', _h_spaces + _p_spaces + _pow_spaces)
@pmp('seed', [12, 3])
def testValueInserter(sp, seed):
ift.random.push_sseq_from_seed(seed)
ind = []
for ss in sp.shape:
if ss == 1:
ind.append(0)
else:
ind.append(int(ift.random.current_rng().integers(0, ss-1)))
op = ift.ValueInserter(sp, ind)
ift.extra.consistency_check(op)
ift.random.pop_sseq()
with ift.random.Context(seed):
ind = []
for ss in sp.shape:
if ss == 1:
ind.append(0)
else:
ind.append(int(ift.random.current_rng().integers(0, ss-1)))
op = ift.ValueInserter(sp, ind)
ift.extra.consistency_check(op)
@pmp('sp', _pow_spaces)
......@@ -277,32 +276,50 @@ def testSpecialSum(sp):
op = ift.library.correlated_fields._SpecialSum(sp)
ift.extra.consistency_check(op)
def metatestMatrixProductOperator(sp, mat_shape, seed, **kwargs):
with ift.random.Context(seed):
mat = ift.random.current_rng().standard_normal(mat_shape)
op = ift.MatrixProductOperator(sp, mat, **kwargs)
ift.extra.consistency_check(op)
mat = mat + 1j*ift.random.current_rng().standard_normal(mat_shape)
op = ift.MatrixProductOperator(sp, mat, **kwargs)
ift.extra.consistency_check(op)
@pmp('sp', [ift.RGSpace(10)])
@pmp('spaces', [None, (0,)])
@pmp('seed', [12, 3])
def testMatrixProductOperator(sp, seed):
ift.random.push_sseq_from_seed(seed)
mat = ift.random.current_rng().standard_normal((*sp.shape, *sp.shape))
op = ift.MatrixProductOperator(sp, mat)
ift.extra.consistency_check(op)
mat = mat + 1j*ift.random.current_rng().standard_normal((*sp.shape, *sp.shape))
op = ift.MatrixProductOperator(sp, mat)
ift.extra.consistency_check(op)
ift.random.pop_sseq()
def testMatrixProductOperator_1d(sp, spaces, seed):
mat_shape = sp.shape * 2
metatestMatrixProductOperator(sp, mat_shape, seed, spaces=spaces)
@pmp('sp', [ift.DomainTuple.make((ift.RGSpace((2)), ift.RGSpace((10))))])
@pmp('spaces', [(0,), (1,), (0, 1)])
@pmp('seed', [12, 3])
def testMatrixProductOperator_2d_spaces(sp, spaces, seed):
appl_shape = []
for sp_idx in spaces:
appl_shape += sp[sp_idx].shape
appl_shape = tuple(appl_shape)
mat_shape = appl_shape * 2
metatestMatrixProductOperator(sp, mat_shape, seed, spaces=spaces)
@pmp('sp', [ift.RGSpace((2, 10))])
@pmp('seed', [12, 3])
def testMatrixProductOperator_2d_flatten(sp, seed):
appl_shape = (ift.utilities.my_product(sp.shape),)
mat_shape = appl_shape * 2
metatestMatrixProductOperator(sp, mat_shape, seed, flatten=True)
@pmp('seed', [12, 3])
def testPartialExtractor(seed):
ift.random.push_sseq_from_seed(seed)
tgt = {'a': ift.RGSpace(1), 'b': ift.RGSpace(2)}
dom = tgt.copy()
dom['c'] = ift.RGSpace(3)
dom = ift.MultiDomain.make(dom)
tgt = ift.MultiDomain.make(tgt)
op = ift.PartialExtractor(dom, tgt)
ift.extra.consistency_check(op)
ift.random.pop_sseq()
with ift.random.Context(seed):
tgt = {'a': ift.RGSpace(1), 'b': ift.RGSpace(2)}
dom = tgt.copy()
dom['c'] = ift.RGSpace(3)
dom = ift.MultiDomain.make(dom)
tgt = ift.MultiDomain.make(tgt)
op = ift.PartialExtractor(dom, tgt)
ift.extra.consistency_check(op)
@pmp('seed', [12, 3])
def testSlowFieldAdapter(seed):
......
......@@ -38,56 +38,57 @@ def testAmplitudesConsistency(rseed, sspace, Astds, offset_std_mean, N, zm_mean)
sc.add(op(s.extract(op.domain)))
return sc.mean.val, sc.var.sqrt().val
ift.random.push_sseq_from_seed(rseed)
nsam = 100
with ift.random.Context(rseed):
nsam = 100
fsspace = ift.RGSpace((12,), (0.4,))
if N == 2:
dofdex1 = [0, 0]
dofdex2 = [1, 0]
dofdex3 = [1, 1]
else:
dofdex1, dofdex2, dofdex3 = None, None, None
fsspace = ift.RGSpace((12,), (0.4,))
if N==2:
dofdex1 = [0,0]
dofdex2 = [1,0]
dofdex3 = [1,1]
else:
dofdex1, dofdex2, dofdex3 = None, None, None
fa = ift.CorrelatedFieldMaker.make(zm_mean, offset_std_mean, 1E-8, '', N, dofdex1)
fa.add_fluctuations(sspace, Astds[0], 1E-8, 1.1, 2., 2.1, .5, -2, 1., 'spatial', dofdex=dofdex2)
fa.add_fluctuations(fsspace, Astds[1], 1E-8, 3.1, 1., .5, .1, -4, 1., 'freq', dofdex=dofdex3)
op = fa.finalize()
fa = ift.CorrelatedFieldMaker.make(zm_mean, offset_std_mean, 1E-8, '', N, dofdex1)
fa.add_fluctuations(sspace, Astds[0], 1E-8, 1.1, 2., 2.1, .5, -2, 1.,
'spatial', dofdex = dofdex2)
fa.add_fluctuations(fsspace, Astds[1], 1E-8, 3.1, 1., .5, .1, -4, 1.,
'freq', dofdex = dofdex3)
op = fa.finalize()
samples = [ift.from_random('normal', op.domain) for _ in range(nsam)]
tot_flm, _ = stats(fa.total_fluctuation, samples)
offset_amp_std, _ = stats(fa.amplitude_total_offset, samples)
intergated_fluct_std0, _ = stats(fa.average_fluctuation(0), samples)
intergated_fluct_std1, _ = stats(fa.average_fluctuation(1), samples)
samples = [ift.from_random('normal', op.domain) for _ in range(nsam)]
tot_flm, _ = stats(fa.total_fluctuation, samples)
offset_amp_std, _ = stats(fa.amplitude_total_offset, samples)
intergated_fluct_std0, _ = stats(fa.average_fluctuation(0), samples)
intergated_fluct_std1, _ = stats(fa.average_fluctuation(1), samples)
slice_fluct_std0, _ = stats(fa.slice_fluctuation(0), samples)
slice_fluct_std1, _ = stats(fa.slice_fluctuation(1), samples)
slice_fluct_std0, _ = stats(fa.slice_fluctuation(0), samples)
slice_fluct_std1, _ = stats(fa.slice_fluctuation(1), samples)
sams = [op(s) for s in samples]
fluct_total = fa.total_fluctuation_realized(sams)
fluct_space = fa.average_fluctuation_realized(sams, 0)
fluct_freq = fa.average_fluctuation_realized(sams, 1)
zm_std_mean = fa.offset_amplitude_realized(sams)
sl_fluct_space = fa.slice_fluctuation_realized(sams, 0)
sl_fluct_freq = fa.slice_fluctuation_realized(sams, 1)
sams = [op(s) for s in samples]
fluct_total = fa.total_fluctuation_realized(sams)
fluct_space = fa.average_fluctuation_realized(sams, 0)
fluct_freq = fa.average_fluctuation_realized(sams, 1)
zm_std_mean = fa.offset_amplitude_realized(sams)
sl_fluct_space = fa.slice_fluctuation_realized(sams, 0)
sl_fluct_freq = fa.slice_fluctuation_realized(sams, 1)
assert_allclose(offset_amp_std, zm_std_mean, rtol=0.5)
assert_allclose(intergated_fluct_std0, fluct_space, rtol=0.5)
assert_allclose(intergated_fluct_std1, fluct_freq, rtol=0.5)
assert_allclose(tot_flm, fluct_total, rtol=0.5)
assert_allclose(slice_fluct_std0, sl_fluct_space, rtol=0.5)
assert_allclose(slice_fluct_std1, sl_fluct_freq, rtol=0.5)
assert_allclose(offset_amp_std, zm_std_mean, rtol=0.5)
assert_allclose(intergated_fluct_std0, fluct_space, rtol=0.5)
assert_allclose(intergated_fluct_std1, fluct_freq, rtol=0.5)
assert_allclose(tot_flm, fluct_total, rtol=0.5)
assert_allclose(slice_fluct_std0, sl_fluct_space, rtol=0.5)
assert_allclose(slice_fluct_std1, sl_fluct_freq, rtol=0.5)
fa = ift.CorrelatedFieldMaker.make(0., offset_std_mean, .1, '', N, dofdex1)
fa.add_fluctuations(fsspace, Astds[1], 1., 3.1, 1., .5, .1, -4, 1., 'freq', dofdex=dofdex3)
m = 3.
x = fa.moment_slice_to_average(m)
fa.add_fluctuations(sspace, x, 1.5, 1.1, 2., 2.1, .5, -2, 1., 'spatial', 0, dofdex=dofdex2)
op = fa.finalize()
em, estd = stats(fa.slice_fluctuation(0), samples)
fa = ift.CorrelatedFieldMaker.make(0., offset_std_mean, .1, '', N, dofdex1)
fa.add_fluctuations(fsspace, Astds[1], 1., 3.1, 1., .5, .1, -4, 1., 'freq', dofdex = dofdex3)
m = 3.
x = fa.moment_slice_to_average(m)
fa.add_fluctuations(sspace, x, 1.5, 1.1, 2., 2.1, .5, -2, 1., 'spatial', 0, dofdex = dofdex2)
op = fa.finalize()
em, estd = stats(fa.slice_fluctuation(0), samples)
assert_allclose(m, em, rtol=0.5)
ift.random.pop_sseq()
assert_allclose(m, em, rtol=0.5)
assert op.target[-2] == sspace
assert op.target[-1] == fsspace
......@@ -38,83 +38,79 @@ seed = list2fixture([4, 78, 23])
def testBasics(space, seed):
ift.random.push_sseq_from_seed(seed)
S = ift.ScalingOperator(space, 1.)
s = S.draw_sample()
var = ift.Linearization.make_var(s)
model = ift.ScalingOperator(var.target, 6.)
ift.extra.check_jacobian_consistency(model, var.val)
ift.random.pop_sseq()
with ift.random.Context(seed):
S = ift.ScalingOperator(space, 1.)
s = S.draw_sample()
var = ift.Linearization.make_var(s)
model = ift.ScalingOperator(var.target, 6.)
ift.extra.check_jacobian_consistency(model, var.val)
@pmp('type1', ['Variable', 'Constant'])
@pmp('type2', ['Variable'])
def testBinary(type1, type2, space, seed):
ift.random.push_sseq_from_seed(seed)
dom1 = ift.MultiDomain.make({'s1': space})
dom2 = ift.MultiDomain.make({'s2': space})
dom = ift.MultiDomain.union((dom1, dom2))
select_s1 = ift.ducktape(None, dom1, "s1")
select_s2 = ift.ducktape(None, dom2, "s2")
model = select_s1*select_s2
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1 + select_s2
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1.scale(3.)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.ScalingOperator(space, 2.456)(select_s1*select_s2)
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.sigmoid(2.456*(select_s1*select_s2))
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
pos = ift.from_random("normal", dom)
model = ift.OuterProduct(pos['s1'], ift.makeDomain(space))
ift.extra.check_jacobian_consistency(model, pos['s2'], ntries=20)
model = select_s1**2
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1.clip(-1, 1)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
f = ift.from_random("normal", space)
model = select_s1.clip(f-0.1, f+1.)