Commit 03e2783f authored by Martin Reinecke's avatar Martin Reinecke
Browse files

merge

parents 5257b709 cc084a9b
Pipeline #65032 passed with stages
in 9 minutes and 5 seconds
......@@ -240,7 +240,7 @@
"sh = Sh.draw_sample()\n",
"noiseless_data=R(sh)\n",
"noise_amplitude = np.sqrt(0.2)\n",
"N = ift.ScalingOperator(noise_amplitude**2, s_space)\n",
"N = ift.ScalingOperator(s_space, noise_amplitude**2)\n",
"\n",
"n = ift.Field.from_random(domain=s_space, random_type='normal',\n",
" std=noise_amplitude, mean=0)\n",
......@@ -391,7 +391,7 @@
"source": [
"# Operators\n",
"Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)\n",
"N = ift.ScalingOperator(noise_amplitude**2,s_space)\n",
"N = ift.ScalingOperator(s_space, noise_amplitude**2)\n",
"# R is defined below\n",
"\n",
"# Fields\n",
......@@ -569,7 +569,7 @@
"\n",
"# Operators\n",
"Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)\n",
"N = ift.ScalingOperator(sigma2,s_space)\n",
"N = ift.ScalingOperator(s_space, sigma2)\n",
"\n",
"# Fields and data\n",
"sh = Sh.draw_sample()\n",
......
%% Cell type:markdown id: tags:
# A NIFTy demonstration
%% Cell type:markdown id: tags:
## IFT: Big Picture
IFT starting point:
$$d = Rs+n$$
Typically, $s$ is a continuous field, $d$ a discrete data vector. Particularly, $R$ is not invertible.
IFT aims at **inverting** the above uninvertible problem in the **best possible way** using Bayesian statistics.
## NIFTy
NIFTy (Numerical Information Field Theory) is a Python framework in which IFT problems can be tackled easily.
Main Interfaces:
- **Spaces**: Cartesian, 2-Spheres (Healpix, Gauss-Legendre) and their respective harmonic spaces.
- **Fields**: Defined on spaces.
- **Operators**: Acting on fields.
%% Cell type:markdown id: tags:
## Wiener Filter: Formulae
### Assumptions
- $d=Rs+n$, $R$ linear operator.
- $\mathcal P (s) = \mathcal G (s,S)$, $\mathcal P (n) = \mathcal G (n,N)$ where $S, N$ are positive definite matrices.
### Posterior
The Posterior is given by:
$$\mathcal P (s|d) \propto P(s,d) = \mathcal G(d-Rs,N) \,\mathcal G(s,S) \propto \mathcal G (s-m,D) $$
where
$$\begin{align}
m &= Dj \\
D^{-1}&= (S^{-1} +R^\dagger N^{-1} R )\\
j &= R^\dagger N^{-1} d
\end{align}$$
Let us implement this in NIFTy!
%% Cell type:markdown id: tags:
## Wiener Filter: Example
- We assume statistical homogeneity and isotropy. Therefore the signal covariance $S$ is diagonal in harmonic space, and is described by a one-dimensional power spectrum, assumed here as $$P(k) = P_0\,\left(1+\left(\frac{k}{k_0}\right)^2\right)^{-\gamma /2},$$
with $P_0 = 0.2, k_0 = 5, \gamma = 4$.
- $N = 0.2 \cdot \mathbb{1}$.
- Number of data points $N_{pix} = 512$.
- reconstruction in harmonic space.
- Response operator:
$$R = FFT_{\text{harmonic} \rightarrow \text{position}}$$
%% Cell type:code id: tags:
``` python
N_pixels = 512 # Number of pixels
def pow_spec(k):
P0, k0, gamma = [.2, 5, 4]
return P0 / ((1. + (k/k0)**2)**(gamma / 2))
```
%% Cell type:markdown id: tags:
## Wiener Filter: Implementation
%% Cell type:markdown id: tags:
### Import Modules
%% Cell type:code id: tags:
``` python
import numpy as np
np.random.seed(40)
import nifty6 as ift
import matplotlib.pyplot as plt
%matplotlib inline
```
%% Cell type:markdown id: tags:
### Implement Propagator
%% Cell type:code id: tags:
``` python
def Curvature(R, N, Sh):
IC = ift.GradientNormController(iteration_limit=50000,
tol_abs_gradnorm=0.1)
# WienerFilterCurvature is (R.adjoint*N.inverse*R + Sh.inverse) plus some handy
# helper methods.
return ift.WienerFilterCurvature(R,N,Sh,iteration_controller=IC,iteration_controller_sampling=IC)
```
%% Cell type:markdown id: tags:
### Conjugate Gradient Preconditioning
- $D$ is defined via:
$$D^{-1} = \mathcal S_h^{-1} + R^\dagger N^{-1} R.$$
In the end, we want to apply $D$ to $j$, i.e. we need the inverse action of $D^{-1}$. This is done numerically (algorithm: *Conjugate Gradient*).
<!--
- One can define the *condition number* of a non-singular and normal matrix $A$:
$$\kappa (A) := \frac{|\lambda_{\text{max}}|}{|\lambda_{\text{min}}|},$$
where $\lambda_{\text{max}}$ and $\lambda_{\text{min}}$ are the largest and smallest eigenvalue of $A$, respectively.
- The larger $\kappa$ the slower Conjugate Gradient.
- By default, conjugate gradient solves: $D^{-1} m = j$ for $m$, where $D^{-1}$ can be badly conditioned. If one knows a non-singular matrix $T$ for which $TD^{-1}$ is better conditioned, one can solve the equivalent problem:
$$\tilde A m = \tilde j,$$
where $\tilde A = T D^{-1}$ and $\tilde j = Tj$.
- In our case $S^{-1}$ is responsible for the bad conditioning of $D$ depending on the chosen power spectrum. Thus, we choose
$$T = \mathcal F^\dagger S_h^{-1} \mathcal F.$$
-->
%% Cell type:markdown id: tags:
### Generate Mock data
- Generate a field $s$ and $n$ with given covariances.
- Calculate $d$.
%% Cell type:code id: tags:
``` python
s_space = ift.RGSpace(N_pixels)
h_space = s_space.get_default_codomain()
HT = ift.HarmonicTransformOperator(h_space, target=s_space)
# Operators
Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
R = HT #*ift.create_harmonic_smoothing_operator((h_space,), 0, 0.02)
# Fields and data
sh = Sh.draw_sample()
noiseless_data=R(sh)
noise_amplitude = np.sqrt(0.2)
N = ift.ScalingOperator(noise_amplitude**2, s_space)
N = ift.ScalingOperator(s_space, noise_amplitude**2)
n = ift.Field.from_random(domain=s_space, random_type='normal',
std=noise_amplitude, mean=0)
d = noiseless_data + n
j = R.adjoint_times(N.inverse_times(d))
curv = Curvature(R=R, N=N, Sh=Sh)
D = curv.inverse
```
%% Cell type:markdown id: tags:
### Run Wiener Filter
%% Cell type:code id: tags:
``` python
m = D(j)
```
%% Cell type:markdown id: tags:
### Signal Reconstruction
%% Cell type:code id: tags:
``` python
# Get signal data and reconstruction data
s_data = HT(sh).val
m_data = HT(m).val
d_data = d.val
plt.figure(figsize=(15,10))
plt.plot(s_data, 'r', label="Signal", linewidth=3)
plt.plot(d_data, 'k.', label="Data")
plt.plot(m_data, 'k', label="Reconstruction",linewidth=3)
plt.title("Reconstruction")
plt.legend()
plt.show()
```
%% Cell type:code id: tags:
``` python
plt.figure(figsize=(15,10))
plt.plot(s_data - s_data, 'r', label="Signal", linewidth=3)
plt.plot(d_data - s_data, 'k.', label="Data")
plt.plot(m_data - s_data, 'k', label="Reconstruction",linewidth=3)
plt.axhspan(-noise_amplitude,noise_amplitude, facecolor='0.9', alpha=.5)
plt.title("Residuals")
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Power Spectrum
%% Cell type:code id: tags:
``` python
s_power_data = ift.power_analyze(sh).val
m_power_data = ift.power_analyze(m).val
plt.figure(figsize=(15,10))
plt.loglog()
plt.xlim(1, int(N_pixels/2))
ymin = min(m_power_data)
plt.ylim(ymin, 1)
xs = np.arange(1,int(N_pixels/2),.1)
plt.plot(xs, pow_spec(xs), label="True Power Spectrum", color='k',alpha=0.5)
plt.plot(s_power_data, 'r', label="Signal")
plt.plot(m_power_data, 'k', label="Reconstruction")
plt.axhline(noise_amplitude**2 / N_pixels, color="k", linestyle='--', label="Noise level", alpha=.5)
plt.axhspan(noise_amplitude**2 / N_pixels, ymin, facecolor='0.9', alpha=.5)
plt.title("Power Spectrum")
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
## Wiener Filter on Incomplete Data
%% Cell type:code id: tags:
``` python
# Operators
Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
N = ift.ScalingOperator(noise_amplitude**2,s_space)
N = ift.ScalingOperator(s_space, noise_amplitude**2)
# R is defined below
# Fields
sh = Sh.draw_sample()
s = HT(sh)
n = ift.Field.from_random(domain=s_space, random_type='normal',
std=noise_amplitude, mean=0)
```
%% Cell type:markdown id: tags:
### Partially Lose Data
%% Cell type:code id: tags:
``` python
l = int(N_pixels * 0.2)
h = int(N_pixels * 0.2 * 2)
mask = np.full(s_space.shape, 1.)
mask[l:h] = 0
mask = ift.Field.from_raw(s_space, mask)
R = ift.DiagonalOperator(mask)(HT)
n = n.val_rw()
n[l:h] = 0
n = ift.Field.from_raw(s_space, n)
d = R(sh) + n
```
%% Cell type:code id: tags:
``` python
curv = Curvature(R=R, N=N, Sh=Sh)
D = curv.inverse
j = R.adjoint_times(N.inverse_times(d))
m = D(j)
```
%% Cell type:markdown id: tags:
### Compute Uncertainty
%% Cell type:code id: tags:
``` python
m_mean, m_var = ift.probe_with_posterior_samples(curv, HT, 200)
```
%% Cell type:markdown id: tags:
### Get data
%% Cell type:code id: tags:
``` python
# Get signal data and reconstruction data
s_data = s.val
m_data = HT(m).val
m_var_data = m_var.val
uncertainty = np.sqrt(m_var_data)
d_data = d.val_rw()
# Set lost data to NaN for proper plotting
d_data[d_data == 0] = np.nan
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(15,10))
plt.axvspan(l, h, facecolor='0.8',alpha=0.5)
plt.fill_between(range(N_pixels), m_data - uncertainty, m_data + uncertainty, facecolor='0.5', alpha=0.5)
plt.plot(s_data, 'r', label="Signal", alpha=1, linewidth=3)
plt.plot(d_data, 'k.', label="Data")
plt.plot(m_data, 'k', label="Reconstruction", linewidth=3)
plt.title("Reconstruction of incomplete data")
plt.legend()
```
%% Cell type:markdown id: tags:
# 2d Example
%% Cell type:code id: tags:
``` python
N_pixels = 256 # Number of pixels
sigma2 = 2. # Noise variance
def pow_spec(k):
P0, k0, gamma = [.2, 2, 4]
return P0 * (1. + (k/k0)**2)**(-gamma/2)
s_space = ift.RGSpace([N_pixels, N_pixels])
```
%% Cell type:code id: tags:
``` python
h_space = s_space.get_default_codomain()
HT = ift.HarmonicTransformOperator(h_space,s_space)
# Operators
Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
N = ift.ScalingOperator(sigma2,s_space)
N = ift.ScalingOperator(s_space, sigma2)
# Fields and data
sh = Sh.draw_sample()
n = ift.Field.from_random(domain=s_space, random_type='normal',
std=np.sqrt(sigma2), mean=0)
# Lose some data
l = int(N_pixels * 0.33)
h = int(N_pixels * 0.33 * 2)
mask = np.full(s_space.shape, 1.)
mask[l:h,l:h] = 0.
mask = ift.Field.from_raw(s_space, mask)
R = ift.DiagonalOperator(mask)(HT)
n = n.val_rw()
n[l:h, l:h] = 0
n = ift.Field.from_raw(s_space, n)
curv = Curvature(R=R, N=N, Sh=Sh)
D = curv.inverse
d = R(sh) + n
j = R.adjoint_times(N.inverse_times(d))
# Run Wiener filter
m = D(j)
# Uncertainty
m_mean, m_var = ift.probe_with_posterior_samples(curv, HT, 20)
# Get data
s_data = HT(sh).val
m_data = HT(m).val
m_var_data = m_var.val
d_data = d.val
uncertainty = np.sqrt(np.abs(m_var_data))
```
%% Cell type:code id: tags:
``` python
cm = ['magma', 'inferno', 'plasma', 'viridis'][1]
mi = np.min(s_data)
ma = np.max(s_data)
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
data = [s_data, d_data]
caption = ["Signal", "Data"]
for ax in axes.flat:
im = ax.imshow(data.pop(0), interpolation='nearest', cmap=cm, vmin=mi,
vmax=ma)
ax.set_title(caption.pop(0))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
```
%% Cell type:code id: tags:
``` python
mi = np.min(s_data)
ma = np.max(s_data)
fig, axes = plt.subplots(3, 2, figsize=(15, 22.5))
sample = HT(curv.draw_sample(from_inverse=True)+m).val
post_mean = (m_mean + HT(m)).val
data = [s_data, m_data, post_mean, sample, s_data - m_data, uncertainty]
caption = ["Signal", "Reconstruction", "Posterior mean", "Sample", "Residuals", "Uncertainty Map"]
for ax in axes.flat:
im = ax.imshow(data.pop(0), interpolation='nearest', cmap=cm, vmin=mi, vmax=ma)
ax.set_title(caption.pop(0))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
```
%% Cell type:markdown id: tags:
### Is the uncertainty map reliable?
%% Cell type:code id: tags:
``` python
precise = (np.abs(s_data-m_data) < uncertainty)
print("Error within uncertainty map bounds: " + str(np.sum(precise) * 100 / N_pixels**2) + "%")
plt.figure(figsize=(15,10))
plt.imshow(precise.astype(float), cmap="brg")
plt.colorbar()
```
%% Cell type:markdown id: tags:
# Start Coding
## NIFTy Repository + Installation guide
https://gitlab.mpcdf.mpg.de/ift/NIFTy
NIFTy v5 **more or less stable!**
......
......@@ -113,7 +113,7 @@ if __name__ == '__main__':
# Set the noise covariance N
noise = 5.
N = ift.ScalingOperator(noise, data_space)
N = ift.ScalingOperator(data_space, noise)
# Create mock data
MOCK_SIGNAL = S.draw_sample()
......
......@@ -74,7 +74,7 @@ if __name__ == '__main__':
# Specify noise
data_space = R.target
noise = .001
N = ift.ScalingOperator(noise, data_space)
N = ift.ScalingOperator(data_space, noise)
# Generate mock signal and data
mock_position = ift.from_random('normal', signal_response.domain)
......
......@@ -93,7 +93,7 @@ if __name__ == '__main__':
# Specify noise
data_space = R.target
noise = .001
N = ift.ScalingOperator(noise, data_space)
N = ift.ScalingOperator(data_space, noise)
# Generate mock signal and data
mock_position = ift.from_random('normal', signal_response.domain)
......
......@@ -72,7 +72,7 @@ def make_adjust_variances_hamiltonian(a,
x = (a.conjugate()*a).real
if scaling is not None:
x = ScalingOperator(scaling, x.target)(x)
x = ScalingOperator(x.target, scaling)(x)
return StandardHamiltonian(InverseGammaLikelihood(d_eval/2.)(x),
ic_samp=ic_samp)
......
......@@ -74,7 +74,7 @@ def _make_dynamic_operator(target,
ops['FFT'] = FFT
ops['Real'] = Real
if harmonic_padding is None:
CentralPadd = ScalingOperator(1., FFT.target)
CentralPadd = ScalingOperator(FFT.target, 1.)
else:
if isinstance(harmonic_padding, int):
harmonic_padding = list((harmonic_padding,)*len(FFT.target.shape))
......@@ -123,7 +123,7 @@ def _make_dynamic_operator(target,
c = FieldAdapter(UnstructuredDomain(len(sigc)), keys[1])
c = makeOp(Field(c.target, np.array(sigc)))(c)
lightspeed = ScalingOperator(-0.5, c.target)(c).exp()
lightspeed = ScalingOperator(c.target, -0.5)(c).exp()
scaling = np.array(m.target[0].distances[1:])/m.target[0].distances[0]
scaling = DiagonalOperator(Field(c.target, scaling))
ops['lightspeed'] = scaling(lightspeed)
......
......@@ -406,7 +406,7 @@ class Linearization(object):
the requested Linearization
"""
from .operators.scaling_operator import ScalingOperator
return Linearization(field, ScalingOperator(1., field.domain),
return Linearization(field, ScalingOperator(field.domain, 1.),
want_metric=want_metric)
@staticmethod
......@@ -492,7 +492,7 @@ class Linearization(object):
if len(constants) == 0:
return Linearization.make_var(field, want_metric)
else:
ops = {key: ScalingOperator(0. if key in constants else 1., dom)
ops = {key: ScalingOperator(dom, 0. if key in constants else 1.)
for key, dom in field.domain.items()}
bdop = BlockDiagonalOperator(field.domain, ops)
return Linearization(field, bdop, want_metric=want_metric)
......@@ -72,7 +72,7 @@ class ChainOperator(LinearOperator):
break
if fct != 1 or len(opsnew) == 0:
# have to add the scaling operator at the end
opsnew.append(ScalingOperator(fct, lastdom))
opsnew.append(ScalingOperator(lastdom, fct))
ops = opsnew
# combine DiagonalOperators where possible
opsnew = []
......
......@@ -276,7 +276,7 @@ class StudentTEnergy(EnergyOperator):
return Field.scalar(v)
if not x.want_metric:
return v
met = ScalingOperator((self._theta+1)/(self._theta+3), self.domain)
met = ScalingOperator(self.domain, (self._theta+1) / (self._theta+3))
met = SandwichOperator.make(x.jac, met)
return v.add_metric(met)
......
......@@ -345,7 +345,7 @@ def HarmonicSmoothingOperator(domain, sigma, space=None):
if sigma < 0.:
raise ValueError("sigma must be non-negative")
if sigma == 0.:
return ScalingOperator(1., domain)
return ScalingOperator(domain, 1.)
domain = DomainTuple.make(domain)
space = utilities.infer_space(domain, space)
......
......@@ -60,7 +60,7 @@ class Operator(metaclass=NiftyMeta):
if factor == 1:
return self
from .scaling_operator import ScalingOperator
return ScalingOperator(factor, self.target)(self)
return ScalingOperator(self.target, factor)(self)
def conjugate(self):
from .simple_linear_operators import ConjugationOperator
......
......@@ -57,7 +57,7 @@ class SandwichOperator(EndomorphicOperator):
if cheese is not None and not isinstance(cheese, LinearOperator):
raise TypeError("cheese must be a linear operator or None")
if cheese is None:
cheese = ScalingOperator(1., bun.target)
cheese = ScalingOperator(bun.target, 1.)
op = bun.adjoint(bun)
else:
op = bun.adjoint(cheese(bun))
......
......@@ -26,10 +26,10 @@ class ScalingOperator(EndomorphicOperator):
Parameters
----------
factor : scalar
The multiplication factor
domain : Domain or tuple of Domain or DomainTuple
The domain on which the Operator's input Field is defined.
factor : scalar
The multiplication factor
Notes
-----
......@@ -50,13 +50,13 @@ class ScalingOperator(EndomorphicOperator):
somewhere else.
"""
def __init__(self, factor, domain):
def __init__(self, domain, factor):
from ..sugar import makeDomain
if not np.isscalar(factor):
raise TypeError("Scalar required")
self._factor = factor
self._domain = makeDomain(domain)
self._factor = factor
self._capability = self._all_ops
def apply(self, x, mode):
......@@ -81,7 +81,7 @@ class ScalingOperator(EndomorphicOperator):
fct = np.conj(fct)
if trafo & self.INVERSE_BIT:
fct = 1./fct
return ScalingOperator(fct, self._domain)
return ScalingOperator(self._domain, fct)
def _get_fct(self, from_inverse):
fct = self._factor
......
......@@ -99,7 +99,7 @@ class SumOperator(LinearOperator):
break
if sum != 0 or len(opsnew) == 0:
# have to add the scaling operator at the end
opsnew.append(ScalingOperator(sum, lastdom))
opsnew.append(ScalingOperator(lastdom, sum))
negnew.append(False)
ops = opsnew
......
......@@ -493,7 +493,7 @@ def calculate_position(operator, output):
if output.domain != operator.target:
raise TypeError
cov = 1e-3*output.val.max()**2
invcov = ScalingOperator(cov, output.domain).inverse
invcov = ScalingOperator(output.domain, cov).inverse
d = output + invcov.draw_sample(from_inverse=True)
lh = GaussianEnergy(d, invcov)(operator)
H = StandardHamiltonian(
......
......@@ -37,7 +37,7 @@ PARAMS = product(SEEDS, SPACES)
@pytest.fixture(params=PARAMS)
def field(request):
np.random.seed(request.param[0])
S = ift.ScalingOperator(1., request.param[1])
S = ift.ScalingOperator(request.param[1], 1.)
s = S.draw_sample()
return ift.MultiField.from_dict({'s1': s})['s1']
......@@ -76,7 +76,7 @@ def test_hamiltonian_and_KL(field):
lh = ift.GaussianEnergy(domain=space)
hamiltonian = ift.StandardHamiltonian(lh)
ift.extra.check_jacobian_consistency(hamiltonian, field)
S = ift.ScalingOperator(1., space)
S = ift.ScalingOperator(space, 1.)
samps = [S.draw_sample() for i in range(3)]
kl = ift.AveragedEnergy(hamiltonian, samps)
ift.extra.check_jacobian_consistency(kl, field)
......
......@@ -51,9 +51,9 @@ def test_gaussian_energy(space, nonlinearity, noise, seed):
pspec = ift.PS_field(pspace, pspec)
A = Dist(ift.sqrt(pspec))
N = ift.ScalingOperator(noise, space)
N = ift.ScalingOperator(space, noise)
n = N.draw_sample()
R = ift.ScalingOperator(10., space)
R = ift.ScalingOperator(space, 10.)
def d_model():
if nonlinearity == "":
......
......@@ -58,7 +58,7 @@ def test_dataconv():
def test_blockdiagonal():
op = ift.BlockDiagonalOperator(
dom, {"d1": ift.ScalingOperator(20., dom["d1"])})
dom, {"d1": ift.ScalingOperator(dom["d1"], 20.)})
op2 = op(op)
ift.extra.consistency_check(op2)
assert_equal(type(op2), ift.BlockDiagonalOperator)
......
......@@ -65,7 +65,7 @@ def test_times_inverse_times(space1, space2):
def test_sum(space1):
op1 = ift.makeOp(ift.Field.full(space1, 2.))
op2 = ift.ScalingOperator(3., space1)
op2 = ift.ScalingOperator(space1, 3.)
full_op = op1 + op2 - (op2 - op1) + op1 + op1 + op2
x = ift.Field.full(space1, 1.)
res = full_op(x)
......@@ -75,7 +75,7 @@ def test_sum(space1):
def test_chain(space1):
op1 = ift.makeOp(ift.Field.full(space1, 2.))
op2 = ift.ScalingOperator(3., space1)
op2 = ift.ScalingOperator(space1, 3.,)
full_op = op1(op2)(op2)(op1)(op1)(op1)(op2)
x = ift.Field.full(space1, 1.)
res = full_op(x)
......@@ -85,7 +85,7 @@ def test_chain(space1):
def test_mix(space1):
op1 = ift.makeOp(ift.Field.full(space1, 2.))
op2 = ift.ScalingOperator(3., space1)
op2 = ift.ScalingOperator(space1, 3.)
full_op = op1(op2 + op2)(op1)(op1) - op1(op2)
x = ift.Field.full(space1, 1.)
res = full_op(x)
......
......@@ -39,10 +39,10 @@ seed = list2fixture([4, 78, 23])
def testBasics(space, seed):
np.random.seed(seed)
S = ift.ScalingOperator(1., space)
S = ift.ScalingOperator(space, 1.)
s = S.draw_sample()
var = ift.Linearization.make_var(s)
model = ift.ScalingOperator(6., var.target)
model = ift.ScalingOperator(var.target, 6.)
ift.extra.check_jacobian_consistency(model, var.val)
......@@ -64,7 +64,7 @@ def testBinary(type1, type2, space, seed):
model = select_s1.scale(3.)
pos = ift.from_random("normal", dom1)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.ScalingOperator(2.456, space)(select_s1*select_s2)
model = ift.ScalingOperator(space, 2.456)(select_s1*select_s2)
pos = ift.from_random("normal", dom)
ift.extra.check_jacobian_consistency(model, pos, ntries=20)
model = ift.sigmoid(2.456*(select_s1*select_s2))
......@@ -90,7 +90,7 @@ def testBinary(type1, type2, space, seed):
def testPointModel(space, seed):
S = ift.ScalingOperator(1., space)
S = ift.ScalingOperator(space, 1.)
pos = S.draw_sample()
alpha = 1.5
q = 0.73
......@@ -118,7 +118,7 @@ def testDynamicModel(target, causal, minimum_phase, seed):
'minimum_phase': minimum_phase
}
model, _ = ift.dynamic_operator(**dct)
S = ift.ScalingOperator(1., model.domain)
S = ift.ScalingOperator(model.domain, 1.)
pos = S.draw_sample()
# FIXME I dont know why smaller tol fails for 3D example
ift.extra.check_jacobian_consistency(model, pos, tol=1e-5, ntries=20)
......@@ -139,7 +139,7 @@ def testDynamicModel(target, causal, minimum_phase, seed):
dct['sigc'] = 1.
dct['quant'] = 5
model, _ = ift.dynamic_lightcone_operator(**dct)
S = ift.ScalingOperator(1., model.domain)
S = ift.ScalingOperator(model.domain, 1.)
pos = S.draw_sample()
# FIXME I dont know why smaller tol fails for 3D example
ift.extra.check_jacobian_consistency(
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment