Commit b4452b8b authored by Philipp Arras's avatar Philipp Arras
Browse files

Add fast test

parent 55fb6a06
......@@ -153,7 +153,7 @@ class VariableCovarianceGaussianEnergy(EnergyOperator):
Data type of the samples. Usually either 'np.float*' or 'np.complex*'
"""
def __init__(self, domain, residual_key, inverse_covariance_key, sampling_dtype, _debugging_factor=1.):
def __init__(self, domain, residual_key, inverse_covariance_key, sampling_dtype):
self._kr = str(residual_key)
self._ki = str(inverse_covariance_key)
dom = DomainTuple.make(domain)
......@@ -161,7 +161,6 @@ class VariableCovarianceGaussianEnergy(EnergyOperator):
self._dt = {self._kr: sampling_dtype, self._ki: np.float64}
_check_sampling_dtype(self._domain, self._dt)
self._cplx = _iscomplex(sampling_dtype)
self._factor = float(_debugging_factor)
def apply(self, x):
self._check_input(x)
......@@ -173,8 +172,7 @@ class VariableCovarianceGaussianEnergy(EnergyOperator):
if not x.want_metric:
return res
met = i.val if self._cplx else 0.5*i.val
# FIXME DO NOT MERGE THAT
met = MultiField.from_dict({self._kr: i.val, self._ki: self._factor*met**(-2)})
met = MultiField.from_dict({self._kr: i.val, self._ki: met**(-2)})
return res.add_metric(SamplingDtypeSetter(makeOp(met), self._dt))
def _simplify_for_constant_input_nontrivial(self, c_inp):
......
......@@ -142,3 +142,58 @@ def test_VariableCovarianceGaussianEnergy(dtype, factor):
energy_tester(mf, get_noisy_data, E_init)
else:
energy_tester(mf, get_noisy_data, E_init)
def normal(dtype, shape):
return ift.random.Random.normal(dtype, shape)
def test_variablecovenergy_fast(dtype):
dtype = np.float64 # FIXME
npix = 2
shp = (npix,)
ntries = 200000
dom = ift.UnstructuredDomain(shp)
e = ift.VariableCovarianceGaussianEnergy(dom, 'resi', 'icov', dtype)
# Positions
resi = normal(dtype, shp)
icov = normal(np.float64, shp)**2 + 4.
sig = np.sqrt(1/icov)
pos = ift.makeField(e.domain, {'resi': resi, 'icov': icov})
sampled_data = normal(dtype, (ntries, npix))*sig+resi
grad0 = (pos['resi'].val-sampled_data)*icov
fac = 0.5
if np.issubdtype(dtype, np.complexfloating):
fac = 1
grad1 = 0.5*np.abs(pos['resi'].val-sampled_data)**2 - fac/pos['icov'].val
# Test correctness of gradient
etest = e.partial_insert(ift.Adder(ift.makeField(dom, -sampled_data[0])).ducktape('resi').ducktape_left('resi'))
eresult = etest(ift.Linearization.make_var(pos))
grad0ref = eresult.gradient.val['resi']
grad1ref = eresult.gradient.val['icov']
np.testing.assert_allclose(grad0[0], grad0ref)
np.testing.assert_allclose(grad1[0], grad1ref)
# Apply test vector
test_vec_resi = normal(dtype, shp)
test_vec_icov = normal(np.float64, shp)
metric00 = np.sum(grad0*test_vec_resi, axis=1)[:, None] * grad0.conjugate()
metric00 = np.mean(metric00, axis=0)
metric11 = np.sum(grad1*test_vec_icov, axis=1)[:, None] * grad1.conjugate()
metric11 = np.mean(grad1, axis=0)
metric00ref = icov*test_vec_resi
metric11ref = (1/(icov)**2)*test_vec_icov
metric00std = np.std(metric00, axis=0)
metric11std = np.std(metric11, axis=0)
np.testing.assert_allclose(metric00ref/metric00std, metric00/metric00std, atol=0.1)
# np.testing.assert_allclose(metric11ref/metric11std, metric11/metric11std, atol=0.1)
for factor in [0.01, 0.5, 2, 100]:
with pytest.raises(AssertionError):
np.testing.assert_allclose(metric00ref/metric00std, factor*metric00/metric00std, atol=0.1)
with pytest.raises(AssertionError):
np.testing.assert_allclose(metric11ref/metric11std, metric11/metric11std, atol=0.1)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment