Commit 86a5890b authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge branch 'randomcontext' into 'NIFTy_6'

Even more elegant random numbers?

See merge request !437
parents 35f2684c a5930cf9
Pipeline #71947 passed with stages
in 14 minutes and 56 seconds
......@@ -178,10 +178,9 @@ class MetricGaussianKL(Energy):
_local_samples = []
sseq = random.spawn_sseq(self._n_samples)
for i in range(self._lo, self._hi):
random.push_sseq(sseq[i])
_local_samples.append(met.draw_sample(from_inverse=True,
dtype=lh_sampling_dtype))
random.pop_sseq()
with random.Context(sseq[i]):
_local_samples.append(met.draw_sample(
from_inverse=True, dtype=lh_sampling_dtype))
_local_samples = tuple(_local_samples)
else:
if len(_local_samples) != self._hi-self._lo:
......@@ -272,9 +271,8 @@ class MetricGaussianKL(Energy):
samp = full(self._hamiltonian.domain, 0.)
sseq = random.spawn_sseq(self._n_samples)
for i, v in enumerate(self._local_samples):
random.push_sseq(sseq[self._lo+i])
samp = samp + self._hamiltonian(lin+v).metric.draw_sample(from_inverse=False, dtype=dtype)
if self._mirror_samples:
samp = samp + self._hamiltonian(lin-v).metric.draw_sample(from_inverse=False, dtype=dtype)
random.pop_sseq()
with random.Context(sseq[self._lo+i]):
samp = samp + self._hamiltonian(lin+v).metric.draw_sample(from_inverse=False, dtype=dtype)
if self._mirror_samples:
samp = samp + self._hamiltonian(lin-v).metric.draw_sample(from_inverse=False, dtype=dtype)
return _allreduce_sum_field(self._comm, samp)/self._n_eff_samples
......@@ -231,3 +231,16 @@ class Random(object):
else:
x = _rng[-1].uniform(low, high, shape)
return x.astype(dtype, copy=False)
class Context(object):
def __init__(self, inp):
if not isinstance(inp, np.random.SeedSequence):
inp = np.random.SeedSequence(inp)
self._sseq = inp
def __enter__(self):
push_sseq(self._sseq)
def __exit__(self, exc_type, exc_value, tb):
return exc_type is None
......@@ -38,11 +38,9 @@ pmp = pytest.mark.parametrize
@pytest.fixture(params=PARAMS)
def field(request):
ift.random.push_sseq_from_seed(request.param[0])
S = ift.ScalingOperator(request.param[1], 1.)
res = S.draw_sample()
ift.random.pop_sseq()
return res
with ift.random.Context(request.param[0]):
S = ift.ScalingOperator(request.param[1], 1.)
return S.draw_sample()
def test_gaussian(field):
......
......@@ -38,38 +38,37 @@ pmp = pytest.mark.parametrize
@pmp('noise', [1, 1e-2, 1e2])
@pmp('seed', [4, 78, 23])
def test_gaussian_energy(space, nonlinearity, noise, seed):
ift.random.push_sseq_from_seed(seed)
dim = len(space.shape)
hspace = space.get_default_codomain()
ht = ift.HarmonicTransformOperator(hspace, target=space)
binbounds = ift.PowerSpace.useful_binbounds(hspace, logarithmic=False)
pspace = ift.PowerSpace(hspace, binbounds=binbounds)
Dist = ift.PowerDistributor(target=hspace, power_space=pspace)
xi0 = ift.Field.from_random(domain=hspace, random_type='normal')
with ift.random.Context(seed):
dim = len(space.shape)
hspace = space.get_default_codomain()
ht = ift.HarmonicTransformOperator(hspace, target=space)
binbounds = ift.PowerSpace.useful_binbounds(hspace, logarithmic=False)
pspace = ift.PowerSpace(hspace, binbounds=binbounds)
Dist = ift.PowerDistributor(target=hspace, power_space=pspace)
xi0 = ift.Field.from_random(domain=hspace, random_type='normal')
def pspec(k):
return 1/(1 + k**2)**dim
def pspec(k):
return 1/(1 + k**2)**dim
pspec = ift.PS_field(pspace, pspec)
A = Dist(ift.sqrt(pspec))
N = ift.ScalingOperator(space, noise)
n = N.draw_sample()
R = ift.ScalingOperator(space, 10.)
pspec = ift.PS_field(pspace, pspec)
A = Dist(ift.sqrt(pspec))
N = ift.ScalingOperator(space, noise)
n = N.draw_sample()
R = ift.ScalingOperator(space, 10.)
def d_model():
if nonlinearity == "":
return R @ ht @ ift.makeOp(A)
else:
tmp = ht @ ift.makeOp(A)
nonlin = getattr(tmp, nonlinearity)()
return R @ nonlin
def d_model():
if nonlinearity == "":
return R @ ht @ ift.makeOp(A)
else:
tmp = ht @ ift.makeOp(A)
nonlin = getattr(tmp, nonlinearity)()
return R @ nonlin
d = d_model()(xi0) + n
d = d_model()(xi0) + n
if noise == 1:
N = None
if noise == 1:
N = None
energy = ift.GaussianEnergy(d, N) @ d_model()
ift.extra.check_jacobian_consistency(
energy, xi0, ntries=10, tol=1e-6)
ift.random.pop_sseq()
energy = ift.GaussianEnergy(d, N) @ d_model()
ift.extra.check_jacobian_consistency(
energy, xi0, ntries=10, tol=1e-6)
......@@ -248,16 +248,15 @@ def testOuter(fdomain, domain):
@pmp('sp', _h_spaces + _p_spaces + _pow_spaces)
@pmp('seed', [12, 3])
def testValueInserter(sp, seed):
ift.random.push_sseq_from_seed(seed)
ind = []
for ss in sp.shape:
if ss == 1:
ind.append(0)
else:
ind.append(int(ift.random.current_rng().integers(0, ss-1)))
op = ift.ValueInserter(sp, ind)
ift.extra.consistency_check(op)
ift.random.pop_sseq()
with ift.random.Context(seed):
ind = []
for ss in sp.shape:
if ss == 1:
ind.append(0)
else:
ind.append(int(ift.random.current_rng().integers(0, ss-1)))
op = ift.ValueInserter(sp, ind)
ift.extra.consistency_check(op)
@pmp('sp', _pow_spaces)
......@@ -278,14 +277,13 @@ def testSpecialSum(sp):
ift.extra.consistency_check(op)
def metatestMatrixProductOperator(sp, mat_shape, seed, **kwargs):
ift.random.push_sseq_from_seed(seed)
mat = ift.random.current_rng().standard_normal(mat_shape)
op = ift.MatrixProductOperator(sp, mat, **kwargs)
ift.extra.consistency_check(op)
mat = mat + 1j*ift.random.current_rng().standard_normal(mat_shape)
op = ift.MatrixProductOperator(sp, mat, **kwargs)
ift.extra.consistency_check(op)
ift.random.pop_sseq()
with ift.random.Context(seed):
mat = ift.random.current_rng().standard_normal(mat_shape)
op = ift.MatrixProductOperator(sp, mat, **kwargs)
ift.extra.consistency_check(op)
mat = mat + 1j*ift.random.current_rng().standard_normal(mat_shape)
op = ift.MatrixProductOperator(sp, mat, **kwargs)
ift.extra.consistency_check(op)
@pmp('sp', [ift.RGSpace(10)])
@pmp('spaces', [None, (0,)])
......@@ -314,15 +312,14 @@ def testMatrixProductOperator_2d_flatten(sp, seed):
@pmp('seed', [12, 3])
def testPartialExtractor(seed):
ift.random.push_sseq_from_seed(seed)
tgt = {'a': ift.RGSpace(1), 'b': ift.RGSpace(2)}
dom = tgt.copy()
dom['c'] = ift.RGSpace(3)
dom = ift.MultiDomain.make(dom)
tgt = ift.MultiDomain.make(tgt)
op = ift.PartialExtractor(dom, tgt)
ift.extra.consistency_check(op)
ift.random.pop_sseq()
with ift.random.Context(seed):
tgt = {'a': ift.RGSpace(1), 'b': ift.RGSpace(2)}
dom = tgt.copy()
dom['c'] = ift.RGSpace(3)
dom = ift.MultiDomain.make(dom)
tgt = ift.MultiDomain.make(tgt)
op = ift.PartialExtractor(dom, tgt)
ift.extra.consistency_check(op)
@pmp('seed', [12, 3])
def testSlowFieldAdapter(seed):
......
......@@ -39,59 +39,58 @@ def testAmplitudesConsistency(rseed, sspace, Astds, offset_std, N):
sc.add(op(s.extract(op.domain)))
return sc.mean.val, sc.var.sqrt().val
ift.random.push_sseq_from_seed(rseed)
nsam = 100
with ift.random.Context(rseed):
nsam = 100
fsspace = ift.RGSpace((12,), (0.4,))
if N==2:
dofdex1 = [0,0]
dofdex2 = [1,0]
dofdex3 = [1,1]
else:
dofdex1, dofdex2, dofdex3 = None, None, None
fsspace = ift.RGSpace((12,), (0.4,))
if N==2:
dofdex1 = [0,0]
dofdex2 = [1,0]
dofdex3 = [1,1]
else:
dofdex1, dofdex2, dofdex3 = None, None, None
fa = ift.CorrelatedFieldMaker.make(offset_std, 1E-8, '', N, dofdex1)
fa.add_fluctuations(sspace, Astds[0], 1E-8, 1.1, 2., 2.1, .5, -2, 1.,
'spatial', dofdex = dofdex2)
fa.add_fluctuations(fsspace, Astds[1], 1E-8, 3.1, 1., .5, .1, -4, 1.,
'freq', dofdex = dofdex3)
op = fa.finalize()
fa = ift.CorrelatedFieldMaker.make(offset_std, 1E-8, '', N, dofdex1)
fa.add_fluctuations(sspace, Astds[0], 1E-8, 1.1, 2., 2.1, .5, -2, 1.,
'spatial', dofdex = dofdex2)
fa.add_fluctuations(fsspace, Astds[1], 1E-8, 3.1, 1., .5, .1, -4, 1.,
'freq', dofdex = dofdex3)
op = fa.finalize()
samples = [ift.from_random('normal', op.domain) for _ in range(nsam)]
tot_flm, _ = stats(fa.total_fluctuation, samples)
offset_amp_std, _ = stats(fa.amplitude_total_offset, samples)
intergated_fluct_std0, _ = stats(fa.average_fluctuation(0), samples)
intergated_fluct_std1, _ = stats(fa.average_fluctuation(1), samples)
samples = [ift.from_random('normal', op.domain) for _ in range(nsam)]
tot_flm, _ = stats(fa.total_fluctuation, samples)
offset_amp_std, _ = stats(fa.amplitude_total_offset, samples)
intergated_fluct_std0, _ = stats(fa.average_fluctuation(0), samples)
intergated_fluct_std1, _ = stats(fa.average_fluctuation(1), samples)
slice_fluct_std0, _ = stats(fa.slice_fluctuation(0), samples)
slice_fluct_std1, _ = stats(fa.slice_fluctuation(1), samples)
slice_fluct_std0, _ = stats(fa.slice_fluctuation(0), samples)
slice_fluct_std1, _ = stats(fa.slice_fluctuation(1), samples)
sams = [op(s) for s in samples]
fluct_total = fa.total_fluctuation_realized(sams)
fluct_space = fa.average_fluctuation_realized(sams, 0)
fluct_freq = fa.average_fluctuation_realized(sams, 1)
zm_std_mean = fa.offset_amplitude_realized(sams)
sl_fluct_space = fa.slice_fluctuation_realized(sams, 0)
sl_fluct_freq = fa.slice_fluctuation_realized(sams, 1)
sams = [op(s) for s in samples]
fluct_total = fa.total_fluctuation_realized(sams)
fluct_space = fa.average_fluctuation_realized(sams, 0)
fluct_freq = fa.average_fluctuation_realized(sams, 1)
zm_std_mean = fa.offset_amplitude_realized(sams)
sl_fluct_space = fa.slice_fluctuation_realized(sams, 0)
sl_fluct_freq = fa.slice_fluctuation_realized(sams, 1)
assert_allclose(offset_amp_std, zm_std_mean, rtol=0.5)
assert_allclose(intergated_fluct_std0, fluct_space, rtol=0.5)
assert_allclose(intergated_fluct_std1, fluct_freq, rtol=0.5)
assert_allclose(tot_flm, fluct_total, rtol=0.5)
assert_allclose(slice_fluct_std0, sl_fluct_space, rtol=0.5)
assert_allclose(slice_fluct_std1, sl_fluct_freq, rtol=0.5)
assert_allclose(offset_amp_std, zm_std_mean, rtol=0.5)
assert_allclose(intergated_fluct_std0, fluct_space, rtol=0.5)
assert_allclose(intergated_fluct_std1, fluct_freq, rtol=0.5)
assert_allclose(tot_flm, fluct_total, rtol=0.5)
assert_allclose(slice_fluct_std0, sl_fluct_space, rtol=0.5)
assert_allclose(slice_fluct_std1, sl_fluct_freq, rtol=0.5)
fa = ift.CorrelatedFieldMaker.make(offset_std, .1, '', N, dofdex1)
fa.add_fluctuations(fsspace, Astds[1], 1., 3.1, 1., .5, .1, -4, 1., 'freq', dofdex = dofdex3)
m = 3.
x = fa.moment_slice_to_average(m)
fa.add_fluctuations(sspace, x, 1.5, 1.1, 2., 2.1, .5, -2, 1., 'spatial', 0, dofdex = dofdex2)
op = fa.finalize()
em, estd = stats(fa.slice_fluctuation(0), samples)
fa = ift.CorrelatedFieldMaker.make(offset_std, .1, '', N, dofdex1)
fa.add_fluctuations(fsspace, Astds[1], 1., 3.1, 1., .5, .1, -4, 1., 'freq', dofdex = dofdex3)
m = 3.
x = fa.moment_slice_to_average(m)
fa.add_fluctuations(sspace, x, 1.5, 1.1, 2., 2.1, .5, -2, 1., 'spatial', 0, dofdex = dofdex2)
op = fa.finalize()
em, estd = stats(fa.slice_fluctuation(0), samples)
assert_allclose(m, em, rtol=0.5)
ift.random.pop_sseq()
assert_allclose(m, em, rtol=0.5)
assert op.target[-2] == sspace
assert op.target[-1] == fsspace
......@@ -38,83 +38,79 @@ seed = list2fixture([4, 78, 23])
def testBasics(space, seed):
ift.random.push_sseq_from_seed(seed)
S = ift.ScalingOperator(space, 1.)
s = S.draw_sample()
var = ift.Linearization.make_var(s)
model = ift.ScalingOperator(var.target, 6.)
ift.extra.check_jacobian_consistency(model, var.val)
ift.random.pop_sseq()
with ift.random.Context(seed):
S = ift.ScalingOperator(space, 1.)
s = S.draw_sample()
var = ift.Linearization.make_var(s)
model = ift.ScalingOperator(var.target, 6.)
ift.extra.check_jacobian_consistency(model, var.val)
@pmp('type1', ['Variable', 'Constant'])
@pmp('type2', ['Variable'])
def testBinary(type1, type2, space, seed):
ift.random.push_sseq_from_seed(seed)
dom1 = ift.MultiDomain.make({'s1': space})
dom2 = ift.MultiDomain.make({'s2': space})
dom = ift.MultiDomain.union((dom1, dom2))
select_s1 = ift.ducktape(None, dom1, "s1")
select_s2 = ift.ducktape(None, dom2, "s2")
model = select_s1*select_s2
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1 + select_s2
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1.scale(3.)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.ScalingOperator(space, 2.456)(select_s1*select_s2)
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.sigmoid(2.456*(select_s1*select_s2))
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
pos = ift.from_random("normal", dom)
model = ift.OuterProduct(pos['s1'], ift.makeDomain(space))
ift.extra.check_jacobian_consistency(model, pos['s2'], ntries=20)
model = select_s1**2
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1.clip(-1, 1)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
f = ift.from_random("normal", space)
model = select_s1.clip(f-0.1, f+1.)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
if isinstance(space, ift.RGSpace):
model = ift.FFTOperator(space)(select_s1*select_s2)
with ift.random.Context(seed):
dom1 = ift.MultiDomain.make({'s1': space})
dom2 = ift.MultiDomain.make({'s2': space})
dom = ift.MultiDomain.union((dom1, dom2))
select_s1 = ift.ducktape(None, dom1, "s1")
select_s2 = ift.ducktape(None, dom2, "s2")
model = select_s1*select_s2
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
ift.random.pop_sseq()
model = select_s1 + select_s2
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1.scale(3.)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.ScalingOperator(space, 2.456)(select_s1*select_s2)
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.sigmoid(2.456*(select_s1*select_s2))
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
pos = ift.from_random("normal", dom)
model = ift.OuterProduct(pos['s1'], ift.makeDomain(space))
ift.extra.check_jacobian_consistency(model, pos['s2'], ntries=20)
model = select_s1**2
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = select_s1.clip(-1, 1)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
f = ift.from_random("normal", space)
model = select_s1.clip(f-0.1, f+1.)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
if isinstance(space, ift.RGSpace):
model = ift.FFTOperator(space)(select_s1*select_s2)
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
def testSpecialDistributionOps(space, seed):
ift.random.push_sseq_from_seed(seed)
S = ift.ScalingOperator(space, 1.)
pos = S.draw_sample()
alpha = 1.5
q = 0.73
model = ift.InverseGammaOperator(space, alpha, q)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.UniformOperator(space, alpha, q)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
ift.random.pop_sseq()
with ift.random.Context(seed):
S = ift.ScalingOperator(space, 1.)
pos = S.draw_sample()
alpha = 1.5
q = 0.73
model = ift.InverseGammaOperator(space, alpha, q)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.UniformOperator(space, alpha, q)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
@pmp('neg', [True, False])
def testAdder(space, seed, neg):
ift.random.push_sseq_from_seed(seed)
S = ift.ScalingOperator(space, 1.)
f = S.draw_sample()
f1 = S.draw_sample()
op = ift.Adder(f1, neg)
ift.extra.check_jacobian_consistency(op, f)
op = ift.Adder(f1.val.ravel()[0], neg=neg, domain=space)
ift.extra.check_jacobian_consistency(op, f)
ift.random.pop_sseq()
with ift.random.Context(seed):
S = ift.ScalingOperator(space, 1.)
f = S.draw_sample()
f1 = S.draw_sample()
op = ift.Adder(f1, neg)
ift.extra.check_jacobian_consistency(op, f)
op = ift.Adder(f1.val.ravel()[0], neg=neg, domain=space)
ift.extra.check_jacobian_consistency(op, f)
@pmp('target', [ift.RGSpace(64, distances=.789, harmonic=True),
......@@ -123,44 +119,43 @@ def testAdder(space, seed, neg):
@pmp('causal', [True, False])
@pmp('minimum_phase', [True, False])
def testDynamicModel(target, causal, minimum_phase, seed):
ift.random.push_sseq_from_seed(seed)
dct = {
'target': target,
'harmonic_padding': None,
'sm_s0': 3.,
'sm_x0': 1.,
'key': 'f',
'causal': causal,
'minimum_phase': minimum_phase
}
model, _ = ift.dynamic_operator(**dct)
S = ift.ScalingOperator(model.domain, 1.)
pos = S.draw_sample()
# FIXME I dont know why smaller tol fails for 3D example
ift.extra.check_jacobian_consistency(model, pos, tol=1e-5, ntries=20)
if len(target.shape) > 1:
with ift.random.Context(seed):
dct = {
'target': target,
'harmonic_padding': None,
'sm_s0': 3.,
'sm_x0': 1.,
'key': 'f',
'lightcone_key': 'c',
'sigc': 1.,
'quant': 5,
'causal': causal,
'minimum_phase': minimum_phase
}
dct['lightcone_key'] = 'c'
dct['sigc'] = 1.
dct['quant'] = 5
model, _ = ift.dynamic_lightcone_operator(**dct)
'target': target,
'harmonic_padding': None,
'sm_s0': 3.,
'sm_x0': 1.,
'key': 'f',
'causal': causal,
'minimum_phase': minimum_phase
}
model, _ = ift.dynamic_operator(**dct)
S = ift.ScalingOperator(model.domain, 1.)
pos = S.draw_sample()
# FIXME I dont know why smaller tol fails for 3D example
ift.extra.check_jacobian_consistency(
model, pos, tol=1e-5, ntries=20)
ift.random.pop_sseq()
ift.extra.check_jacobian_consistency(model, pos, tol=1e-5, ntries=20)
if len(target.shape) > 1:
dct = {
'target': target,
'harmonic_padding': None,
'sm_s0': 3.,
'sm_x0': 1.,
'key': 'f',
'lightcone_key': 'c',
'sigc': 1.,
'quant': 5,
'causal': causal,
'minimum_phase': minimum_phase
}
dct['lightcone_key'] = 'c'
dct['sigc'] = 1.
dct['quant'] = 5
model, _ = ift.dynamic_lightcone_operator(**dct)
S = ift.ScalingOperator(model.domain, 1.)
pos = S.draw_sample()
# FIXME I dont know why smaller tol fails for 3D example
ift.extra.check_jacobian_consistency(
model, pos, tol=1e-5, ntries=20)
@pmp('h_space', _h_spaces)
......
......@@ -206,12 +206,11 @@ def testOuter(fdomain, domain):
@pmp('sp', _h_spaces + _p_spaces + _pow_spaces)
@pmp('seed', [12, 3])
def testValueInserter(sp, seed):
ift.random.push_sseq_from_seed(seed)
ind = []
for ss in sp.shape:
if ss == 1:
ind.append(0)
else:
ind.append(int(ift.random.current_rng().integers(0, ss - 1)))
_check_repr(ift.ValueInserter(sp, ind))
ift.random.pop_sseq()
with ift.random.Context(seed):
ind = []
for ss in sp.shape:
if ss == 1:
ind.append(0)
else:
ind.append(int(ift.random.current_rng().integers(0, ss - 1)))
_check_repr(ift.ValueInserter(sp, ind))
......@@ -32,11 +32,10 @@ from ..common import setup_function, teardown_function
])
@pytest.mark.parametrize('seed', [13, 2])
def test_value_inserter(sp, seed):
ift.random.push_sseq_from_seed(seed)
ind = tuple([int(ift.random.current_rng().integers(0, ss - 1)) for ss in sp.shape])
op = ift.ValueInserter(sp, ind)
f = ift.from_random('normal', op.domain)
ift.random.pop_sseq()
with ift.random.Context(seed):
ind = tuple([int(ift.random.current_rng().integers(0, ss - 1)) for ss in sp.shape])
op = ift.ValueInserter(sp, ind)
f = ift.from_random('normal', op.domain)
inp = f.val
ret = op(f).val
assert_(ret[ind] == inp)
......
......@@ -21,52 +21,41 @@ import nifty6 as ift