Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Neel Shah
NIFTy
Commits
fce04fb8
Commit
fce04fb8
authored
Jun 22, 2020
by
Philipp Arras
Browse files
Merge remote-tracking branch 'origin/NIFTy_7' into switch_to_ducc
parents
c28c31c2
dc9494c9
Changes
47
Hide whitespace changes
Inline
Side-by-side
ChangeLog.md
View file @
fce04fb8
...
...
@@ -9,6 +9,21 @@ now uses the DUCC package (<https://gitlab.mpcdf.mpg.de/mtr/ducc)>,
which is their successor.
Naming of operator tests
------------------------
The implementation tests for nonlinear operators are now available in
`ift.extra.check_operator()`
and for linear operators
`ift.extra.check_linear_operator()`
.
MetricGaussianKL interface
--------------------------
Users do not instanciate
`MetricGaussianKL`
by its constructor anymore. Rather
`MetricGaussianKL.make()`
shall be used.
Changes since NIFTy 5
=====================
...
...
@@ -70,6 +85,19 @@ print(met)
print(met.draw_sample())
```
New approach for sampling complex numbers
=========================================
When calling draw_sample_with_dtype with a complex dtype,
the variance is now used for the imaginary part and real part separately.
This is done in order to be consistent with the Hamiltonian.
Note that by this,
```
np.std(ift.from_random(domain, 'normal', dtype=np.complex128).val)
```
`
does not give 1, but sqrt(2) as a result.
MPI parallelisation over samples in MetricGaussianKL
----------------------------------------------------
...
...
@@ -85,6 +113,7 @@ the generation of reproducible random numbers in the presence of MPI parallelism
and leads to cleaner code overall. Please see the documentation of
`
nifty7.random
` for details.
Interface Change for from_random and OuterProduct
-------------------------------------------------
...
...
demos/getting_started_3.py
View file @
fce04fb8
...
...
@@ -131,7 +131,7 @@ def main():
# Draw new samples to approximate the KL five times
for
i
in
range
(
5
):
# Draw new samples and minimize KL
KL
=
ift
.
MetricGaussianKL
(
mean
,
H
,
N_samples
)
KL
=
ift
.
MetricGaussianKL
.
make
(
mean
,
H
,
N_samples
)
KL
,
convergence
=
minimizer
(
KL
)
mean
=
KL
.
position
...
...
@@ -144,7 +144,7 @@ def main():
name
=
filename
.
format
(
"loop_{:02d}"
.
format
(
i
)))
# Draw posterior samples
KL
=
ift
.
MetricGaussianKL
(
mean
,
H
,
N_samples
)
KL
=
ift
.
MetricGaussianKL
.
make
(
mean
,
H
,
N_samples
)
sc
=
ift
.
StatCalculator
()
for
sample
in
KL
.
samples
:
sc
.
add
(
signal
(
sample
+
KL
.
position
))
...
...
demos/getting_started_4_CorrelatedFields.ipynb
View file @
fce04fb8
...
...
@@ -152,10 +152,8 @@
"sigmas = [1.0, 0.5, 0.1]\n",
"\n",
"for i in range(3):\n",
" op = ift.library.correlated_fields._LognormalMomentMatching(mean=mean,\n",
" sig=sigmas[i],\n",
" key='foo',\n",
" N_copies=0)\n",
" op = ift.LognormalTransform(mean=mean, sigma=sigmas[i],\n",
" key='foo', N_copies=0)\n",
" op_samples = np.array(\n",
" [op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]])\n",
"\n",
...
...
%% Cell type:markdown id: tags:
# Notebook showcasing the NIFTy 6 Correlated Field model
**Skip to `Parameter Showcases` for the meat/veggies ;)**
The field model roughly works like this:
`f = HT( A * zero_mode * xi ) + offset`
`A`
is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain.
It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding
a representation of the field in harmonic space.
It is then transformed into the target real space and a offset added.
The power spectra
`A`
is constructed of are in turn constructed as the sum of a power law component
and an integrated Wiener process whose amplitude and roughness can be set.
## Setup code
%% Cell type:code id: tags:
```
python
import
nifty7
as
ift
import
matplotlib.pyplot
as
plt
import
numpy
as
np
ift
.
random
.
push_sseq_from_seed
(
43
)
n_pix
=
256
x_space
=
ift
.
RGSpace
(
n_pix
)
```
%% Cell type:code id: tags:
```
python
# Plotting routine
def
plot
(
fields
,
spectra
,
title
=
None
):
# Plotting preparation is normally handled by nifty7.Plot
# It is done manually here to be able to tweak details
# Fields are assumed to have identical domains
fig
=
plt
.
figure
(
tight_layout
=
True
,
figsize
=
(
12
,
3.5
))
if
title
is
not
None
:
fig
.
suptitle
(
title
,
fontsize
=
14
)
# Field
ax1
=
fig
.
add_subplot
(
1
,
2
,
1
)
ax1
.
axhline
(
y
=
0.
,
color
=
'k'
,
linestyle
=
'--'
,
alpha
=
0.25
)
for
field
in
fields
:
dom
=
field
.
domain
[
0
]
xcoord
=
np
.
arange
(
dom
.
shape
[
0
])
*
dom
.
distances
[
0
]
ax1
.
plot
(
xcoord
,
field
.
val
)
ax1
.
set_xlim
(
xcoord
[
0
],
xcoord
[
-
1
])
ax1
.
set_ylim
(
-
5.
,
5.
)
ax1
.
set_xlabel
(
'x'
)
ax1
.
set_ylabel
(
'f(x)'
)
ax1
.
set_title
(
'Field realizations'
)
# Spectrum
ax2
=
fig
.
add_subplot
(
1
,
2
,
2
)
for
spectrum
in
spectra
:
xcoord
=
spectrum
.
domain
[
0
].
k_lengths
ycoord
=
spectrum
.
val_rw
()
ycoord
[
0
]
=
ycoord
[
1
]
ax2
.
plot
(
xcoord
,
ycoord
)
ax2
.
set_ylim
(
1e-6
,
10.
)
ax2
.
set_xscale
(
'log'
)
ax2
.
set_yscale
(
'log'
)
ax2
.
set_xlabel
(
'k'
)
ax2
.
set_ylabel
(
'p(k)'
)
ax2
.
set_title
(
'Power Spectrum'
)
fig
.
align_labels
()
plt
.
show
()
# Helper: draw main sample
main_sample
=
None
def
init_model
(
m_pars
,
fl_pars
):
global
main_sample
cf
=
ift
.
CorrelatedFieldMaker
.
make
(
**
m_pars
)
cf
.
add_fluctuations
(
**
fl_pars
)
field
=
cf
.
finalize
(
prior_info
=
0
)
main_sample
=
ift
.
from_random
(
field
.
domain
)
print
(
"model domain keys:"
,
field
.
domain
.
keys
())
# Helper: field and spectrum from parameter dictionaries + plotting
def
eval_model
(
m_pars
,
fl_pars
,
title
=
None
,
samples
=
None
):
cf
=
ift
.
CorrelatedFieldMaker
.
make
(
**
m_pars
)
cf
.
add_fluctuations
(
**
fl_pars
)
field
=
cf
.
finalize
(
prior_info
=
0
)
spectrum
=
cf
.
amplitude
if
samples
is
None
:
samples
=
[
main_sample
]
field_realizations
=
[
field
(
s
)
for
s
in
samples
]
spectrum_realizations
=
[
spectrum
.
force
(
s
)
for
s
in
samples
]
plot
(
field_realizations
,
spectrum_realizations
,
title
)
def
gen_samples
(
key_to_vary
):
if
key_to_vary
is
None
:
return
[
main_sample
]
dct
=
main_sample
.
to_dict
()
subdom_to_vary
=
dct
.
pop
(
key_to_vary
).
domain
samples
=
[]
for
i
in
range
(
8
):
d
=
dct
.
copy
()
d
[
key_to_vary
]
=
ift
.
from_random
(
subdom_to_vary
)
samples
.
append
(
ift
.
MultiField
.
from_dict
(
d
))
return
samples
def
vary_parameter
(
parameter_key
,
values
,
samples_vary_in
=
None
):
s
=
gen_samples
(
samples_vary_in
)
for
v
in
values
:
if
parameter_key
in
cf_make_pars
.
keys
():
m_pars
=
{
**
cf_make_pars
,
parameter_key
:
v
}
eval_model
(
m_pars
,
cf_x_fluct_pars
,
f
"
{
parameter_key
}
=
{
v
}
"
,
s
)
else
:
fl_pars
=
{
**
cf_x_fluct_pars
,
parameter_key
:
v
}
eval_model
(
cf_make_pars
,
fl_pars
,
f
"
{
parameter_key
}
=
{
v
}
"
,
s
)
```
%% Cell type:markdown id: tags:
## Before the Action: The Moment-Matched Log-Normal Distribution
Many properties of the correlated field are modelled as being lognormally distributed.
The distribution models are parametrized via their means
`_mean`
and standard-deviations
`_stddev`
.
To get a feeling of how the ratio of the
`mean`
and
`stddev`
parameters influences the distribution shape,
here are a few example histograms: (observe the x-axis!)
%% Cell type:code id: tags:
```
python
fig
=
plt
.
figure
(
figsize
=
(
13
,
3.5
))
mean
=
1.0
sigmas
=
[
1.0
,
0.5
,
0.1
]
for
i
in
range
(
3
):
op
=
ift
.
library
.
correlated_fields
.
_LognormalMomentMatching
(
mean
=
mean
,
sig
=
sigmas
[
i
],
key
=
'foo'
,
N_copies
=
0
)
op
=
ift
.
LognormalTransform
(
mean
=
mean
,
sigma
=
sigmas
[
i
],
key
=
'foo'
,
N_copies
=
0
)
op_samples
=
np
.
array
(
[
op
(
s
).
val
for
s
in
[
ift
.
from_random
(
op
.
domain
)
for
i
in
range
(
10000
)]])
ax
=
fig
.
add_subplot
(
1
,
3
,
i
+
1
)
ax
.
hist
(
op_samples
,
bins
=
50
)
ax
.
set_title
(
f
"mean =
{
mean
}
, sigma =
{
sigmas
[
i
]
}
"
)
ax
.
set_xlabel
(
'x'
)
del
op_samples
plt
.
show
()
```
%% Cell type:markdown id: tags:
## The Neutral Field
To demonstrate the effect of all parameters, first a 'neutral' set of parameters
is defined which then are varied one by one, showing the effect of the variation
on the generated field realizations and the underlying power spectrum from which
they were drawn.
As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen.
%% Cell type:code id: tags:
```
python
# Neutral model parameters yielding a quasi-constant field
cf_make_pars
=
{
'offset_mean'
:
0.
,
'offset_std_mean'
:
1e-3
,
'offset_std_std'
:
1e-16
,
'prefix'
:
''
}
cf_x_fluct_pars
=
{
'target_subdomain'
:
x_space
,
'fluctuations_mean'
:
1e-3
,
'fluctuations_stddev'
:
1e-16
,
'flexibility_mean'
:
1e-3
,
'flexibility_stddev'
:
1e-16
,
'asperity_mean'
:
1e-3
,
'asperity_stddev'
:
1e-16
,
'loglogavgslope_mean'
:
0.
,
'loglogavgslope_stddev'
:
1e-16
}
init_model
(
cf_make_pars
,
cf_x_fluct_pars
)
```
%% Cell type:code id: tags:
```
python
# Show neutral field
eval_model
(
cf_make_pars
,
cf_x_fluct_pars
,
"Neutral Field"
)
```
%% Cell type:markdown id: tags:
# Parameter Showcases
## The `fluctuations_` parameters of `add_fluctuations()`
determine the
**amplitude of variations along the field dimension**
for which
`add_fluctuations`
is called.
`fluctuations_mean`
set the _average_ amplitude of the fields fluctuations along the given dimension,
\
`fluctuations_stddev`
sets the width and shape of the amplitude distribution.
The amplitude is modelled as being log-normally distributed,
see
`The Moment-Matched Log-Normal Distribution`
above for details.
#### `fluctuations_mean`:
%% Cell type:code id: tags:
```
python
vary_parameter
(
'fluctuations_mean'
,
[
0.05
,
0.5
,
2.
],
samples_vary_in
=
'xi'
)
```
%% Cell type:markdown id: tags:
#### `fluctuations_stddev`:
%% Cell type:code id: tags:
```
python
cf_x_fluct_pars
[
'fluctuations_mean'
]
=
1.0
vary_parameter
(
'fluctuations_stddev'
,
[
0.01
,
0.1
,
1.0
],
samples_vary_in
=
'fluctuations'
)
```
%% Cell type:markdown id: tags:
## The `loglogavgslope_` parameters of `add_fluctuations()`
determine
**the slope of the loglog-linear (power law) component of the power spectrum**
.
The slope is modelled to be normally distributed.
#### `loglogavgslope_mean`:
%% Cell type:code id: tags:
```
python
vary_parameter
(
'loglogavgslope_mean'
,
[
-
3.
,
-
1.
,
1.
],
samples_vary_in
=
'xi'
)
```
%% Cell type:markdown id: tags:
#### `loglogavgslope_stddev`:
%% Cell type:code id: tags:
```
python
cf_x_fluct_pars
[
'loglogavgslope_mean'
]
=
-
1.0
vary_parameter
(
'loglogavgslope_stddev'
,
[
0.01
,
0.1
,
1.0
],
samples_vary_in
=
'loglogavgslope'
)
```
%% Cell type:markdown id: tags:
## The `flexibility_` parameters of `add_fluctuations()`
determine
**the amplitude of the integrated Wiener process component of the power spectrum**
(how strong the power spectrum varies besides the power-law).
`flexibility_mean`
sets the _average_ amplitude of the i.g.p. component,
`flexibility_stddev`
sets how much the amplitude can vary.
\
These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior.
#### `flexibility_mean`:
%% Cell type:code id: tags:
```
python
vary_parameter
(
'flexibility_mean'
,
[
0.1
,
1.0
,
3.0
],
samples_vary_in
=
'spectrum'
)
```
%% Cell type:markdown id: tags:
#### `flexibility_stddev`:
%% Cell type:code id: tags:
```
python
cf_x_fluct_pars
[
'flexibility_mean'
]
=
2.0
vary_parameter
(
'flexibility_stddev'
,
[
0.01
,
0.1
,
1.0
],
samples_vary_in
=
'flexibility'
)
```
%% Cell type:markdown id: tags:
## The `asperity_` parameters of `add_fluctuations()`
`asperity_`
determines
**how rough the integrated Wiener process component of the power spectrum is**
.
`asperity_mean`
sets the average roughness,
`asperity_stddev`
sets how much the roughness can vary.
\
These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior.
#### `asperity_mean`:
%% Cell type:code id: tags:
```
python
vary_parameter
(
'asperity_mean'
,
[
0.001
,
1.0
,
5.0
],
samples_vary_in
=
'spectrum'
)
```
%% Cell type:markdown id: tags:
#### `asperity_stddev`:
%% Cell type:code id: tags:
```
python
cf_x_fluct_pars
[
'asperity_mean'
]
=
1.0
vary_parameter
(
'asperity_stddev'
,
[
0.01
,
0.1
,
1.0
],
samples_vary_in
=
'asperity'
)
```
%% Cell type:markdown id: tags:
## The `offset_mean` parameter of `CorrelatedFieldMaker.make()`
The
`offset_mean`
parameter defines a global additive offset on the field realizations.
If the field is used for a lognormal model
`f = field.exp()`
, this acts as a global signal magnitude offset.
%% Cell type:code id: tags:
```
python
# Reset model to neutral
cf_x_fluct_pars
[
'fluctuations_mean'
]
=
1e-3
cf_x_fluct_pars
[
'flexibility_mean'
]
=
1e-3
cf_x_fluct_pars
[
'asperity_mean'
]
=
1e-3
cf_x_fluct_pars
[
'loglogavgslope_mean'
]
=
1e-3
```
%% Cell type:code id: tags:
```
python
vary_parameter
(
'offset_mean'
,
[
3.
,
0.
,
-
2.
])
```
%% Cell type:markdown id: tags:
## The `offset_std_` parameters of `CorrelatedFieldMaker.make()`
Variation of the global offset of the field are modelled as being log-normally distributed.
See
`The Moment-Matched Log-Normal Distribution`
above for details.
The
`offset_std_mean`
parameter sets how much NIFTy will vary the offset
*on average*
.
\
The
`offset_std_std`
parameters defines the with and shape of the offset variation distribution.
#### `offset_std_mean`:
%% Cell type:code id: tags:
```
python
vary_parameter
(
'offset_std_mean'
,
[
1e-16
,
0.5
,
2.
],
samples_vary_in
=
'xi'
)
```
%% Cell type:markdown id: tags:
#### `offset_std_std`:
%% Cell type:code id: tags:
```
python
cf_make_pars
[
'offset_std_mean'
]
=
1.0
vary_parameter
(
'offset_std_std'
,
[
0.01
,
0.1
,
1.0
],
samples_vary_in
=
'zeromode'
)
```
...
...
demos/getting_started_5_mf.py
View file @
fce04fb8
...
...
@@ -131,7 +131,7 @@ def main():
for
i
in
range
(
10
):
# Draw new samples and minimize KL
KL
=
ift
.
MetricGaussianKL
(
mean
,
H
,
N_samples
)
KL
=
ift
.
MetricGaussianKL
.
make
(
mean
,
H
,
N_samples
)
KL
,
convergence
=
minimizer
(
KL
)
mean
=
KL
.
position
...
...
@@ -157,7 +157,7 @@ def main():
name
=
filename
.
format
(
"loop_{:02d}"
.
format
(
i
)))
# Done, draw posterior samples
KL
=
ift
.
MetricGaussianKL
(
mean
,
H
,
N_samples
)
KL
=
ift
.
MetricGaussianKL
.
make
(
mean
,
H
,
N_samples
)
sc
=
ift
.
StatCalculator
()
scA1
=
ift
.
StatCalculator
()
scA2
=
ift
.
StatCalculator
()
...
...
demos/mgvi_visualized.py
View file @
fce04fb8
...
...
@@ -34,6 +34,7 @@ from matplotlib.colors import LogNorm
import
nifty7
as
ift
def
main
():
dom
=
ift
.
UnstructuredDomain
(
1
)
scale
=
10
...
...
@@ -90,7 +91,7 @@ def main():
plt
.
figure
(
figsize
=
[
12
,
8
])
for
ii
in
range
(
15
):
if
ii
%
3
==
0
:
mgkl
=
ift
.
MetricGaussianKL
(
pos
,
ham
,
40
)
mgkl
=
ift
.
MetricGaussianKL
.
make
(
pos
,
ham
,
40
)
plt
.
cla
()
plt
.
imshow
(
z
.
T
,
origin
=
'lower'
,
norm
=
LogNorm
(),
vmin
=
1e-3
,
...
...
demos/polynomial_fit.py
View file @
fce04fb8
...
...
@@ -97,7 +97,7 @@ def main():
p_space
=
ift
.
UnstructuredDomain
(
N_params
)
params
=
ift
.
full
(
p_space
,
0.
)
R
=
PolynomialResponse
(
p_space
,
x
)
ift
.
extra
.
c
onsistency_check
(
R
)
ift
.
extra
.
c
heck_linear_operator
(
R
)
d_space
=
R
.
target
d
=
ift
.
makeField
(
d_space
,
y
)
...
...
src/__init__.py
View file @
fce04fb8
...
...
@@ -52,6 +52,7 @@ from .operators.energy_operators import (
BernoulliEnergy
,
StandardHamiltonian
,
AveragedEnergy
,
QuadraticFormOperator
,
Squared2NormOperator
,
StudentTEnergy
,
VariableCovarianceGaussianEnergy
)
from
.operators.convolution_operators
import
FuncConvolutionOperator
from
.operators.normal_operators
import
NormalTransform
,
LognormalTransform
from
.probing
import
probe_with_posterior_samples
,
probe_diagonal
,
\
StatCalculator
,
approximation2endo
...
...
src/extra.py
View file @
fce04fb8
...
...
@@ -15,21 +15,115 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
from
itertools
import
combinations
import
numpy
as
np
from
numpy.testing
import
assert_
from
.
import
random
from
.domain_tuple
import
DomainTuple
from
.field
import
Field
from
.linearization
import
Linearization
from
.multi_domain
import
MultiDomain
from
.multi_field
import
MultiField
from
.operators.energy_operators
import
EnergyOperator
from
.operators.linear_operator
import
LinearOperator
from
.operators.operator
import
Operator
from
.sugar
import
from_random
__all__
=
[
"c
onsistency_check"
,
"check_jacobian_consistency
"
,
__all__
=
[
"c
heck_linear_operator"
,
"check_operator
"
,
"assert_allclose"
]
def
check_linear_operator
(
op
,
domain_dtype
=
np
.
float64
,
target_dtype
=
np
.
float64
,
atol
=
1e-12
,
rtol
=
1e-12
,
only_r_linear
=
False
):
"""
Checks an operator for algebraic consistency of its capabilities.
Checks whether times(), adjoint_times(), inverse_times() and
adjoint_inverse_times() (if in capability list) is implemented
consistently. Additionally, it checks whether the operator is linear.
Parameters
----------
op : LinearOperator
Operator which shall be checked.
domain_dtype : dtype
The data type of the random vectors in the operator's domain. Default
is `np.float64`.
target_dtype : dtype
The data type of the random vectors in the operator's target. Default
is `np.float64`.
atol : float
Absolute tolerance for the check. If rtol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
rtol : float
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if
not
isinstance
(
op
,
LinearOperator
):
raise
TypeError
(
'This test tests only linear operators.'
)
_domain_check_linear
(
op
,
domain_dtype
)
_domain_check_linear
(
op
.
adjoint
,
target_dtype
)
_domain_check_linear
(
op
.
inverse
,
target_dtype
)
_domain_check_linear
(
op
.
adjoint
.
inverse
,
domain_dtype
)
_check_linearity
(
op
,
domain_dtype
,
atol
,
rtol
)
_check_linearity
(
op
.
adjoint
,
target_dtype
,
atol
,
rtol
)
_check_linearity
(
op
.
inverse
,
target_dtype
,
atol
,
rtol
)
_check_linearity
(
op
.
adjoint
.
inverse
,
domain_dtype
,
atol
,
rtol
)
_full_implementation
(
op
,
domain_dtype
,
target_dtype
,
atol
,
rtol
,
only_r_linear
)
_full_implementation
(
op
.
adjoint
,
target_dtype
,
domain_dtype
,
atol
,
rtol
,
only_r_linear
)
_full_implementation
(
op
.
inverse
,
target_dtype
,
domain_dtype
,
atol
,
rtol
,
only_r_linear
)
_full_implementation
(
op
.
adjoint
.
inverse
,
domain_dtype
,
target_dtype
,
atol
,
rtol
,
only_r_linear
)
def
check_operator
(
op
,
loc
,
tol
=
1e-12
,
ntries
=
100
,
perf_check
=
True
,
only_r_differentiable
=
True
,
metric_sampling
=
True
):
"""
Performs various checks of the implementation of linear and nonlinear
operators.
Computes the Jacobian with finite differences and compares it to the
implemented Jacobian.
Parameters
----------
op : Operator
Operator which shall be checked.
loc : Field or MultiField
An Field or MultiField instance which has the same domain
as op. The location at which the gradient is checked
tol : float
Tolerance for the check.
perf_check : Boolean
Do performance check. May be disabled for very unimportant operators.
only_r_differentiable : Boolean
Jacobians of C-differentiable operators need to be C-linear.
Default: True
metric_sampling: Boolean
If op is an EnergyOperator, metric_sampling determines whether the
test shall try to sample from the metric or not.
"""
if
not
isinstance
(
op
,
Operator
):
raise
TypeError
(
'This test tests only linear operators.'
)
_domain_check_nonlinear
(
op
,
loc
)
_performance_check
(
op
,
loc
,
bool
(
perf_check
))
_linearization_value_consistency
(
op
,
loc
)
_jac_vs_finite_differences
(
op
,
loc
,
np
.
sqrt
(
tol
),
ntries
,
only_r_differentiable
)
_check_nontrivial_constant
(
op
,
loc
,
tol
,
ntries
,
only_r_differentiable
,
metric_sampling
)
def
assert_allclose
(
f1
,
f2
,
atol
,
rtol
):
if
isinstance
(
f1
,
Field
):
return
np
.
testing
.
assert_allclose
(
f1
.
val
,
f2
.
val
,
atol
=
atol
,
rtol
=
rtol
)
...
...
@@ -37,6 +131,27 @@ def assert_allclose(f1, f2, atol, rtol):
assert_allclose
(
val
,
f2
[
key
],
atol
=
atol
,
rtol
=
rtol
)
def
assert_equal
(
f1
,
f2
):
if
isinstance
(
f1
,
Field
):
return
np
.
testing
.
assert_equal
(
f1
.
val
,
f2
.
val
)
for
key
,
val
in
f1
.
items
():
assert_equal
(
val
,
f2
[
key
])
def
_nozero
(
fld
):
if
isinstance
(
fld
,
Field
):
return
np
.
testing
.
assert_
((
fld
!=
0
).
s_all
())
for
val
in
fld
.
values
():
_nozero
(
val
)
def
_allzero
(
fld
):
if
isinstance
(
fld
,
Field
):
return
np
.
testing
.
assert_
((
fld
==
0.
).
s_all
())
for
val
in
fld
.
values
():
_allzero
(
val
)
def
_adjoint_implementation
(
op
,
domain_dtype
,
target_dtype
,
atol
,
rtol
,
only_r_linear
):
needed_cap
=
op
.
TIMES
|
op
.
ADJOINT_TIMES
...
...
@@ -83,7 +198,8 @@ def _check_linearity(op, domain_dtype, atol, rtol):
assert_allclose
(
val1
,
val2
,
atol
=
atol
,
rtol
=
rtol
)
def
_actual_domain_check_linear
(
op
,
domain_dtype
=
None
,
inp
=
None
):
def
_domain_check_linear
(
op
,
domain_dtype
=
None
,
inp
=
None
):
_domain_check
(
op
)
needed_cap
=
op
.
TIMES
if
(
op
.
capability
&
needed_cap
)
!=
needed_cap
:
return
...
...
@@ -95,8 +211,9 @@ def _actual_domain_check_linear(op, domain_dtype=None, inp=None):
assert_
(
op
(
inp
).
domain
is
op
.
target
)
def
_actual_domain_check_nonlinear
(
op
,
loc
):
assert
isinstance
(
loc
,
(
Field
,
MultiField
))
def
_domain_check_nonlinear
(
op
,
loc
):
_domain_check
(
op
)
assert_
(
isinstance
(
loc
,
(
Field
,
MultiField
)))
assert_
(
loc
.
domain
is
op
.
domain
)
for
wm
in
[
False
,
True
]:
lin
=
Linearization
.
make_var
(
loc
,
wm
)
...
...
@@ -111,8 +228,8 @@ def _actual_domain_check_nonlinear(op, loc):
assert_
(
reslin
.
jac
.
domain
is
reslin
.
domain
)
assert_
(
reslin
.
jac
.
target
is
reslin
.
target
)
assert_
(
lin
.
want_metric
==
reslin
.
want_metric
)
_actual
_domain_check_linear
(
reslin
.
jac
,
inp
=
loc
)
_actual
_domain_check_linear
(
reslin
.
jac
.
adjoint
,
inp
=
reslin
.
jac
(
loc
))
_domain_check_linear
(
reslin
.
jac
,
inp
=
loc
)
_domain_check_linear
(
reslin
.
jac
.
adjoint
,
inp
=
reslin
.
jac
(
loc
))
if
reslin
.
metric
is
not
None
:
assert_
(
reslin
.
metric
.
domain
is
reslin
.
metric
.
target
)
assert_
(
reslin
.
metric
.
domain
is
op
.
domain
)
...
...
@@ -164,58 +281,6 @@ def _performance_check(op, pos, raise_on_fail):
raise
RuntimeError
(
s
)
def
consistency_check
(
op
,
domain_dtype
=
np
.
float64
,
target_dtype
=
np
.
float64
,
atol
=
0
,
rtol
=
1e-7
,
only_r_linear
=
False
):
"""
Checks an operator for algebraic consistency of its capabilities.
Checks whether times(), adjoint_times(), inverse_times() and
adjoint_inverse_times() (if in capability list) is implemented
consistently. Additionally, it checks whether the operator is linear.
Parameters
----------
op : LinearOperator
Operator which shall be checked.
domain_dtype : dtype
The data type of the random vectors in the operator's domain. Default
is `np.float64`.
target_dtype : dtype
The data type of the random vectors in the operator's target. Default
is `np.float64`.
atol : float
Absolute tolerance for the check. If rtol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
rtol : float
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if
not
isinstance
(
op
,
LinearOperator
):
raise
TypeError
(
'This test tests only linear operators.'
)
_domain_check
(
op
)
_actual_domain_check_linear
(
op
,
domain_dtype
)
_actual_domain_check_linear
(
op
.
adjoint
,
target_dtype
)
_actual_domain_check_linear
(
op
.
inverse
,
target_dtype
)
_actual_domain_check_linear
(
op
.
adjoint
.
inverse
,
domain_dtype
)
_check_linearity
(
op
,
domain_dtype
,
atol
,
rtol
)
_check_linearity
(
op
.
adjoint
,
target_dtype
,
atol
,
rtol
)
_check_linearity
(
op
.
inverse
,
target_dtype
,
atol
,
rtol
)
_check_linearity
(
op
.
adjoint
.
inverse
,
domain_dtype
,
atol
,
rtol
)
_full_implementation
(
op
,
domain_dtype
,
target_dtype
,
atol
,
rtol
,
only_r_linear
)
_full_implementation
(
op
.
adjoint
,
target_dtype
,
domain_dtype
,
atol
,
rtol
,
only_r_linear
)
_full_implementation
(
op
.
inverse
,
target_dtype
,
domain_dtype
,
atol
,
rtol
,
only_r_linear
)
_full_implementation
(
op
.
adjoint
.
inverse
,
domain_dtype
,
target_dtype
,
atol
,
rtol
,
only_r_linear
)
def
_get_acceptable_location
(
op
,
loc
,
lin
):
if
not
np
.
isfinite
(
lin
.
val
.
s_sum
()):
raise
ValueError
(
'Initial value must be finite'
)
...
...
@@ -248,34 +313,51 @@ def _linearization_value_consistency(op, loc):
assert_allclose
(
fld0
,
fld1
,
0
,
1e-7
)
def
check_jacobian_consistency
(
op
,
loc
,
tol
=
1e-8
,
ntries
=
100
,
perf_check
=
True
,
only_r_differentiable
=
True
):
"""
Checks the Jacobian of an operator against its finite difference
approximation.
Computes the Jacobian with finite differences and compares it to the
implemented Jacobian.
Parameters
----------
op : Operator
Operator which shall be checked.
loc : Field or MultiField
An Field or MultiField instance which has the same domain
as op. The location at which the gradient is checked
tol : float
Tolerance for the check.
perf_check : Boolean
Do performance check. May be disabled for very unimportant operators.
only_r_differentiable : Boolean
Jacobians of C-differentiable operators need to be C-linear.
Default: True
"""
_domain_check
(
op
)
_actual_domain_check_nonlinear
(
op
,
loc
)
_performance_check
(
op
,
loc
,
bool
(
perf_check
))
_linearization_value_consistency
(
op
,
loc
)
def
_check_nontrivial_constant
(
op
,
loc
,
tol
,
ntries
,
only_r_differentiable
,
metric_sampling
):
if
isinstance
(
op
.
domain
,
DomainTuple
):
return
keys
=
op
.
domain
.
keys
()
combis
=
[]
if
len
(
keys
)
>
4
:
from
.logger
import
logger
logger
.
warning
(
'Operator domain has more than 4 keys.'
)
logger
.
warning
(
'Check derivatives only with one constant key at a time.'
)
combis
=
[[
kk
]
for
kk
in
keys
]
else
:
for
ll
in
range
(
1
,
len
(
keys
)):
combis
.
extend
(
list
(
combinations
(
keys
,
ll
)))
for
cstkeys
in
combis
:
varkeys
=
set
(
keys
)
-
set
(
cstkeys
)
cstloc
=
loc
.
extract_by_keys
(
cstkeys
)
varloc
=
loc
.
extract_by_keys
(
varkeys
)
val0
=
op
(
loc
)
_
,
op0
=
op
.
simplify_for_constant_input
(
cstloc
)
assert
op0
.
domain
is
varloc
.
domain
val1
=
op0
(
varloc
)
assert_equal
(
val0
,
val1
)
lin
=
Linearization
.
make_partial_var
(
loc
,
cstkeys
,
want_metric
=
True
)
lin0
=
Linearization
.
make_var
(
varloc
,
want_metric
=
True
)
oplin0
=
op0
(
lin0
)
oplin
=
op
(
lin
)
assert
oplin
.
jac
.
target
is
oplin0
.
jac
.
target
rndinp
=
from_random
(
oplin
.
jac
.
target
)
assert_allclose
(
oplin
.
jac
.
adjoint
(
rndinp
).
extract
(
varloc
.
domain
),
oplin0
.
jac
.
adjoint
(
rndinp
),
1e-13
,
1e-13
)
foo
=
oplin
.
jac
.
adjoint
(
rndinp
).
extract
(
cstloc
.
domain
)
assert_equal
(
foo
,
0
*
foo
)
if
isinstance
(
op
,
EnergyOperator
)
and
metric_sampling
:
oplin
.
metric
.
draw_sample
()
# _jac_vs_finite_differences(op0, varloc, np.sqrt(tol), ntries,
# only_r_differentiable)
def
_jac_vs_finite_differences
(
op
,
loc
,
tol
,
ntries
,
only_r_differentiable
):
for
_
in
range
(
ntries
):
lin
=
op
(
Linearization
.
make_var
(
loc
))
loc2
,
lin2
=
_get_acceptable_location
(
op
,
loc
,
lin
)
...
...
@@ -300,8 +382,7 @@ def check_jacobian_consistency(op, loc, tol=1e-8, ntries=100, perf_check=True,
print
(
hist
)
raise
ValueError
(
"gradient and value seem inconsistent"
)
loc
=
locnext
ddtype
=
loc
.
values
()[
0
].
dtype
if
isinstance
(
loc
,
MultiField
)
else
loc
.
dtype
tdtype
=
dirder
.
values
()[
0
].
dtype
if
isinstance
(
dirder
,
MultiField
)
else
dirder
.
dtype
consistency_check
(
linmid
.
jac
,
domain_dtype
=
ddtype
,
target_dtype
=
tdtype
,
only_r_linear
=
only_r_differentiable
)
check_linear_operator
(
linmid
.
jac
,
domain_dtype
=
loc
.
dtype
,
target_dtype
=
dirder
.
dtype
,
only_r_linear
=
only_r_differentiable
,
atol
=
tol
**
2
,
rtol
=
tol
**
2
)
src/field.py
View file @
fce04fb8
...
...
@@ -136,6 +136,8 @@ class Field(Operator):
The domain of the output random Field.
dtype : type
The datatype of the output random Field.
If the datatype is complex, each real and imaginary part
have variance 1
Returns
-------
...
...
src/library/correlated_fields.py
View file @
fce04fb8
...
...
@@ -37,48 +37,11 @@ from ..operators.harmonic_operators import HarmonicTransformOperator
from
..operators.linear_operator
import
LinearOperator
from
..operators.operator
import
Operator
from
..operators.simple_linear_operators
import
ducktape
from
..operators.normal_operators
import
NormalTransform
,
LognormalTransform
from
..probing
import
StatCalculator
from
..sugar
import
full
,
makeDomain
,
makeField
,
makeOp
def
_reshaper
(
x
,
N
):
x
=
np
.
asfarray
(
x
)
if
x
.
shape
in
[(),
(
1
,)]:
return
np
.
full
(
N
,
x
)
if
N
!=
0
else
x
.
reshape
(())
elif
x
.
shape
==
(
N
,):
return
x
else
:
raise
TypeError
(
"Shape of parameters cannot be interpreted"
)
def
_lognormal_moments
(
mean
,
sig
,
N
=
0
):
if
N
==
0
:
mean
,
sig
=
np
.
asfarray
(
mean
),
np
.
asfarray
(
sig
)
else
:
mean
,
sig
=
(
_reshaper
(
param
,
N
)
for
param
in
(
mean
,
sig
))
if
not
np
.
all
(
mean
>
0
):
raise
ValueError
(
"mean must be greater 0; got {!r}"
.
format
(
mean
))
if
not
np
.
all
(
sig
>
0
):
raise
ValueError
(
"sig must be greater 0; got {!r}"
.
format
(
sig
))
logsig
=
np
.
sqrt
(
np
.
log1p
((
sig
/
mean
)
**
2
))
logmean
=
np
.
log
(
mean
)
-
logsig
**
2
/
2
return
logmean
,
logsig
def
_normal
(
mean
,
sig
,
key
,
N
=
0
):
if
N
==
0
:
domain
=
DomainTuple
.
scalar_domain
()
mean
,
sig
=
np
.
asfarray
(
mean
),
np
.
asfarray
(
sig
)
return
Adder
(
makeField
(
domain
,
mean
))
@
(