Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
NIFTy
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
10
Issues
10
List
Boards
Labels
Service Desk
Milestones
Merge Requests
8
Merge Requests
8
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ift
NIFTy
Commits
76130897
Commit
76130897
authored
Dec 06, 2019
by
Torsten Ensslin
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'do_cleanup' into 'NIFTy_6'
Remove standard MPI parallelization See merge request
!387
parents
5765b889
07ef6674
Pipeline
#65084
passed with stages
in 8 minutes and 17 seconds
Changes
80
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
80 changed files
with
462 additions
and
1485 deletions
+462
-1485
.gitlab-ci.yml
.gitlab-ci.yml
+1
-8
ChangeLog
ChangeLog
+21
-0
demos/Wiener_Filter.ipynb
demos/Wiener_Filter.ipynb
+22
-22
demos/bench_gridder.py
demos/bench_gridder.py
+4
-4
demos/bernoulli_demo.py
demos/bernoulli_demo.py
+2
-2
demos/find_amplitude_parameters.py
demos/find_amplitude_parameters.py
+3
-3
demos/getting_started_1.py
demos/getting_started_1.py
+2
-2
demos/getting_started_2.py
demos/getting_started_2.py
+3
-3
demos/getting_started_mf.py
demos/getting_started_mf.py
+1
-1
demos/misc/convolution.py
demos/misc/convolution.py
+4
-4
demos/multi_amplitudes_consistency.py
demos/multi_amplitudes_consistency.py
+8
-8
demos/polynomial_fit.py
demos/polynomial_fit.py
+9
-10
nifty6/__init__.py
nifty6/__init__.py
+0
-7
nifty6/data_objects/__init__.py
nifty6/data_objects/__init__.py
+0
-0
nifty6/data_objects/distributed_do.py
nifty6/data_objects/distributed_do.py
+0
-590
nifty6/data_objects/numpy_do.py
nifty6/data_objects/numpy_do.py
+0
-158
nifty6/dobj.py
nifty6/dobj.py
+0
-33
nifty6/domain_tuple.py
nifty6/domain_tuple.py
+0
-10
nifty6/domains/domain.py
nifty6/domains/domain.py
+0
-13
nifty6/domains/lm_space.py
nifty6/domains/lm_space.py
+5
-5
nifty6/domains/power_space.py
nifty6/domains/power_space.py
+7
-14
nifty6/domains/rg_space.py
nifty6/domains/rg_space.py
+6
-9
nifty6/extra.py
nifty6/extra.py
+1
-2
nifty6/field.py
nifty6/field.py
+28
-67
nifty6/internal_config.py
nifty6/internal_config.py
+0
-25
nifty6/library/correlated_fields.py
nifty6/library/correlated_fields.py
+20
-20
nifty6/library/dynamic_operator.py
nifty6/library/dynamic_operator.py
+2
-2
nifty6/library/gridder.py
nifty6/library/gridder.py
+6
-6
nifty6/library/inverse_gamma_operator.py
nifty6/library/inverse_gamma_operator.py
+7
-7
nifty6/library/light_cone_operator.py
nifty6/library/light_cone_operator.py
+5
-5
nifty6/library/los_response.py
nifty6/library/los_response.py
+14
-23
nifty6/linearization.py
nifty6/linearization.py
+6
-6
nifty6/logger.py
nifty6/logger.py
+3
-7
nifty6/minimization/energy_adapter.py
nifty6/minimization/energy_adapter.py
+1
-1
nifty6/minimization/metric_gaussian_kl.py
nifty6/minimization/metric_gaussian_kl.py
+2
-2
nifty6/minimization/metric_gaussian_kl_mpi.py
nifty6/minimization/metric_gaussian_kl_mpi.py
+5
-6
nifty6/minimization/scipy_minimizer.py
nifty6/minimization/scipy_minimizer.py
+5
-11
nifty6/multi_field.py
nifty6/multi_field.py
+9
-4
nifty6/operator_spectrum.py
nifty6/operator_spectrum.py
+6
-6
nifty6/operators/contraction_operator.py
nifty6/operators/contraction_operator.py
+4
-4
nifty6/operators/diagonal_operator.py
nifty6/operators/diagonal_operator.py
+11
-16
nifty6/operators/distributors.py
nifty6/operators/distributors.py
+11
-25
nifty6/operators/domain_tuple_field_inserter.py
nifty6/operators/domain_tuple_field_inserter.py
+3
-5
nifty6/operators/energy_operators.py
nifty6/operators/energy_operators.py
+4
-5
nifty6/operators/field_zero_padder.py
nifty6/operators/field_zero_padder.py
+14
-16
nifty6/operators/harmonic_operators.py
nifty6/operators/harmonic_operators.py
+7
-79
nifty6/operators/linear_interpolation.py
nifty6/operators/linear_interpolation.py
+2
-2
nifty6/operators/mask_operator.py
nifty6/operators/mask_operator.py
+4
-7
nifty6/operators/outer_product_operator.py
nifty6/operators/outer_product_operator.py
+4
-5
nifty6/operators/regridding_operator.py
nifty6/operators/regridding_operator.py
+8
-11
nifty6/operators/simple_linear_operators.py
nifty6/operators/simple_linear_operators.py
+3
-3
nifty6/operators/value_inserter.py
nifty6/operators/value_inserter.py
+2
-2
nifty6/plot.py
nifty6/plot.py
+8
-12
nifty6/probing.py
nifty6/probing.py
+3
-3
nifty6/random.py
nifty6/random.py
+0
-0
nifty6/sugar.py
nifty6/sugar.py
+7
-33
nifty6/version.py
nifty6/version.py
+1
-1
test/test_energy_gradients.py
test/test_energy_gradients.py
+3
-3
test/test_field.py
test/test_field.py
+49
-50
test/test_kl.py
test/test_kl.py
+5
-5
test/test_linearization.py
test/test_linearization.py
+4
-4
test/test_minimizers.py
test/test_minimizers.py
+25
-25
test/test_multi_field.py
test/test_multi_field.py
+4
-4
test/test_operators/test_adjoint.py
test/test_operators/test_adjoint.py
+3
-3
test/test_operators/test_composed_operator.py
test/test_operators/test_composed_operator.py
+4
-4
test/test_operators/test_convolution_operators.py
test/test_operators/test_convolution_operators.py
+5
-5
test/test_operators/test_correlated_fields.py
test/test_operators/test_correlated_fields.py
+4
-4
test/test_operators/test_diagonal_operator.py
test/test_operators/test_diagonal_operator.py
+2
-2
test/test_operators/test_fft_operator.py
test/test_operators/test_fft_operator.py
+7
-7
test/test_operators/test_harmonic_transform_operator.py
test/test_operators/test_harmonic_transform_operator.py
+1
-1
test/test_operators/test_nft.py
test/test_operators/test_nft.py
+6
-6
test/test_operators/test_regridding.py
test/test_operators/test_regridding.py
+1
-1
test/test_operators/test_representation.py
test/test_operators/test_representation.py
+3
-3
test/test_operators/test_simplification.py
test/test_operators/test_simplification.py
+8
-8
test/test_operators/test_smoothing_operator.py
test/test_operators/test_smoothing_operator.py
+1
-1
test/test_operators/test_value_inserter.py
test/test_operators/test_value_inserter.py
+2
-2
test/test_plot.py
test/test_plot.py
+5
-5
test/test_spaces/test_lm_space.py
test/test_spaces/test_lm_space.py
+1
-2
test/test_spaces/test_power_space.py
test/test_spaces/test_power_space.py
+4
-4
test/test_spaces/test_rg_space.py
test/test_spaces/test_rg_space.py
+1
-1
No files found.
.gitlab-ci.yml
View file @
76130897
...
@@ -39,17 +39,10 @@ test_serial:
...
@@ -39,17 +39,10 @@ test_serial:
script
:
script
:
-
pytest-3 -q --cov=nifty6 test
-
pytest-3 -q --cov=nifty6 test
-
>
-
>
python3 -m coverage report --omit "*plot*
,*distributed_do*
" | tee coverage.txt
python3 -m coverage report --omit "*plot*" | tee coverage.txt
-
>
-
>
grep TOTAL coverage.txt | awk '{ print "TOTAL: "$4; }'
grep TOTAL coverage.txt | awk '{ print "TOTAL: "$4; }'
test_mpi
:
stage
:
test
variables
:
OMPI_MCA_btl_vader_single_copy_mechanism
:
none
script
:
-
mpiexec -n 2 --bind-to none pytest-3 -q test
pages
:
pages
:
stage
:
release
stage
:
release
script
:
script
:
...
...
ChangeLog
View file @
76130897
...
@@ -16,3 +16,24 @@ but it is hard to make explicit tests since the two approaches cannot be mapped
...
@@ -16,3 +16,24 @@ but it is hard to make explicit tests since the two approaches cannot be mapped
onto each other exactly. We experienced that preconditioning in the `MetricGaussianKL`
onto each other exactly. We experienced that preconditioning in the `MetricGaussianKL`
via `napprox` breaks the inference scheme with the new model so `napprox` may not
via `napprox` breaks the inference scheme with the new model so `napprox` may not
be used here.
be used here.
Removal of the standard parallelization scheme:
===============================================
When several MPI tasks are present, NIFTy5 distributes every Field over these
tasks by splitting it along the first axis. This approach to parallelism is not
very efficient, and it has not been used by anyone for several years, so we
decided to remove it, which led to many simplifications within NIFTy.
User-visible changes:
- the methods `to_global_data`, `from_global_data`, `from_local_data` and
the property `local_data` have been removed from `Field` and `MultiField`.
Instead there are now the property `val` (returning a read-only numpy.ndarray
for `Field` and a dictionary of read-only numpy.ndarrays for `MultiField`) and
the method `val_rw()` (returning the same structures with writable copies of
the arrays). Fields and MultiFields can be created from such structures using
the static method `from_raw`
- the functions `from_global_data` and `from_local_data` in `sugar` have been
replaced by a single function called `makeField`
- the property `local_shape` has been removed from `Domain` (and subclasses)
and `DomainTuple`.
demos/Wiener_Filter.ipynb
View file @
76130897
...
@@ -296,9 +296,9 @@
...
@@ -296,9 +296,9 @@
"outputs": [],
"outputs": [],
"source": [
"source": [
"# Get signal data and reconstruction data\n",
"# Get signal data and reconstruction data\n",
"s_data = HT(sh).
to_global_data()
\n",
"s_data = HT(sh).
val
\n",
"m_data = HT(m).
to_global_data()
\n",
"m_data = HT(m).
val
\n",
"d_data = d.
to_global_data()
\n",
"d_data = d.
val
\n",
"\n",
"\n",
"plt.figure(figsize=(15,10))\n",
"plt.figure(figsize=(15,10))\n",
"plt.plot(s_data, 'r', label=\"Signal\", linewidth=3)\n",
"plt.plot(s_data, 'r', label=\"Signal\", linewidth=3)\n",
...
@@ -350,8 +350,8 @@
...
@@ -350,8 +350,8 @@
},
},
"outputs": [],
"outputs": [],
"source": [
"source": [
"s_power_data = ift.power_analyze(sh).
to_global_data()
\n",
"s_power_data = ift.power_analyze(sh).
val
\n",
"m_power_data = ift.power_analyze(m).
to_global_data()
\n",
"m_power_data = ift.power_analyze(m).
val
\n",
"plt.figure(figsize=(15,10))\n",
"plt.figure(figsize=(15,10))\n",
"plt.loglog()\n",
"plt.loglog()\n",
"plt.xlim(1, int(N_pixels/2))\n",
"plt.xlim(1, int(N_pixels/2))\n",
...
@@ -427,12 +427,12 @@
...
@@ -427,12 +427,12 @@
"\n",
"\n",
"mask = np.full(s_space.shape, 1.)\n",
"mask = np.full(s_space.shape, 1.)\n",
"mask[l:h] = 0\n",
"mask[l:h] = 0\n",
"mask = ift.Field.from_
global_data
(s_space, mask)\n",
"mask = ift.Field.from_
raw
(s_space, mask)\n",
"\n",
"\n",
"R = ift.DiagonalOperator(mask)(HT)\n",
"R = ift.DiagonalOperator(mask)(HT)\n",
"n = n.
to_global_data
_rw()\n",
"n = n.
val
_rw()\n",
"n[l:h] = 0\n",
"n[l:h] = 0\n",
"n = ift.Field.from_
global_data
(s_space, n)\n",
"n = ift.Field.from_
raw
(s_space, n)\n",
"\n",
"\n",
"d = R(sh) + n"
"d = R(sh) + n"
]
]
...
@@ -497,11 +497,11 @@
...
@@ -497,11 +497,11 @@
"outputs": [],
"outputs": [],
"source": [
"source": [
"# Get signal data and reconstruction data\n",
"# Get signal data and reconstruction data\n",
"s_data = s.
to_global_data()
\n",
"s_data = s.
val
\n",
"m_data = HT(m).
to_global_data()
\n",
"m_data = HT(m).
val
\n",
"m_var_data = m_var.
to_global_data()
\n",
"m_var_data = m_var.
val
\n",
"uncertainty = np.sqrt(m_var_data)\n",
"uncertainty = np.sqrt(m_var_data)\n",
"d_data = d.
to_global_data
_rw()\n",
"d_data = d.
val
_rw()\n",
"\n",
"\n",
"# Set lost data to NaN for proper plotting\n",
"# Set lost data to NaN for proper plotting\n",
"d_data[d_data == 0] = np.nan"
"d_data[d_data == 0] = np.nan"
...
@@ -583,12 +583,12 @@
...
@@ -583,12 +583,12 @@
"\n",
"\n",
"mask = np.full(s_space.shape, 1.)\n",
"mask = np.full(s_space.shape, 1.)\n",
"mask[l:h,l:h] = 0.\n",
"mask[l:h,l:h] = 0.\n",
"mask = ift.Field.from_
global_data
(s_space, mask)\n",
"mask = ift.Field.from_
raw
(s_space, mask)\n",
"\n",
"\n",
"R = ift.DiagonalOperator(mask)(HT)\n",
"R = ift.DiagonalOperator(mask)(HT)\n",
"n = n.
to_global_data
_rw()\n",
"n = n.
val
_rw()\n",
"n[l:h, l:h] = 0\n",
"n[l:h, l:h] = 0\n",
"n = ift.Field.from_
global_data
(s_space, n)\n",
"n = ift.Field.from_
raw
(s_space, n)\n",
"curv = Curvature(R=R, N=N, Sh=Sh)\n",
"curv = Curvature(R=R, N=N, Sh=Sh)\n",
"D = curv.inverse\n",
"D = curv.inverse\n",
"\n",
"\n",
...
@@ -602,10 +602,10 @@
...
@@ -602,10 +602,10 @@
"m_mean, m_var = ift.probe_with_posterior_samples(curv, HT, 20)\n",
"m_mean, m_var = ift.probe_with_posterior_samples(curv, HT, 20)\n",
"\n",
"\n",
"# Get data\n",
"# Get data\n",
"s_data = HT(sh).
to_global_data()
\n",
"s_data = HT(sh).
val
\n",
"m_data = HT(m).
to_global_data()
\n",
"m_data = HT(m).
val
\n",
"m_var_data = m_var.
to_global_data()
\n",
"m_var_data = m_var.
val
\n",
"d_data = d.
to_global_data()
\n",
"d_data = d.
val
\n",
"uncertainty = np.sqrt(np.abs(m_var_data))"
"uncertainty = np.sqrt(np.abs(m_var_data))"
]
]
},
},
...
@@ -653,8 +653,8 @@
...
@@ -653,8 +653,8 @@
"ma = np.max(s_data)\n",
"ma = np.max(s_data)\n",
"\n",
"\n",
"fig, axes = plt.subplots(3, 2, figsize=(15, 22.5))\n",
"fig, axes = plt.subplots(3, 2, figsize=(15, 22.5))\n",
"sample = HT(curv.draw_sample(from_inverse=True)+m).
to_global_data()
\n",
"sample = HT(curv.draw_sample(from_inverse=True)+m).
val
\n",
"post_mean = (m_mean + HT(m)).
to_global_data()
\n",
"post_mean = (m_mean + HT(m)).
val
\n",
"\n",
"\n",
"data = [s_data, m_data, post_mean, sample, s_data - m_data, uncertainty]\n",
"data = [s_data, m_data, post_mean, sample, s_data - m_data, uncertainty]\n",
"caption = [\"Signal\", \"Reconstruction\", \"Posterior mean\", \"Sample\", \"Residuals\", \"Uncertainty Map\"]\n",
"caption = [\"Signal\", \"Reconstruction\", \"Posterior mean\", \"Sample\", \"Residuals\", \"Uncertainty Map\"]\n",
...
@@ -731,7 +731,7 @@
...
@@ -731,7 +731,7 @@
"name": "python",
"name": "python",
"nbconvert_exporter": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"pygments_lexer": "ipython3",
"version": "3.
6.6
"
"version": "3.
7.5
"
}
}
},
},
"nbformat": 4,
"nbformat": 4,
...
...
demos/bench_gridder.py
View file @
76130897
...
@@ -24,16 +24,16 @@ for ii in range(10, 26):
...
@@ -24,16 +24,16 @@ for ii in range(10, 26):
img
=
np
.
random
.
randn
(
nu
*
nv
)
img
=
np
.
random
.
randn
(
nu
*
nv
)
img
=
img
.
reshape
((
nu
,
nv
))
img
=
img
.
reshape
((
nu
,
nv
))
img
=
ift
.
from_global_data
(
uvspace
,
img
)
img
=
ift
.
makeField
(
uvspace
,
img
)
t0
=
time
()
t0
=
time
()
GM
=
ift
.
GridderMaker
(
uvspace
,
eps
=
1e-7
,
uv
=
uv
)
GM
=
ift
.
GridderMaker
(
uvspace
,
eps
=
1e-7
,
uv
=
uv
)
vis
=
ift
.
from_global_data
(
visspace
,
vis
)
vis
=
ift
.
makeField
(
visspace
,
vis
)
op
=
GM
.
getFull
().
adjoint
op
=
GM
.
getFull
().
adjoint
t1
=
time
()
t1
=
time
()
op
(
img
).
to_global_data
()
op
(
img
).
val
t2
=
time
()
t2
=
time
()
op
.
adjoint
(
vis
).
to_global_data
()
op
.
adjoint
(
vis
).
val
t3
=
time
()
t3
=
time
()
print
(
t2
-
t1
,
t3
-
t2
)
print
(
t2
-
t1
,
t3
-
t2
)
N0s
.
append
(
N
)
N0s
.
append
(
N
)
...
...
demos/bernoulli_demo.py
View file @
76130897
...
@@ -61,9 +61,9 @@ if __name__ == '__main__':
...
@@ -61,9 +61,9 @@ if __name__ == '__main__':
# Generate mock data
# Generate mock data
p
=
R
(
sky
)
p
=
R
(
sky
)
mock_position
=
ift
.
from_random
(
'normal'
,
harmonic_space
)
mock_position
=
ift
.
from_random
(
'normal'
,
harmonic_space
)
tmp
=
p
(
mock_position
).
to_global_data
()
.
astype
(
np
.
float64
)
tmp
=
p
(
mock_position
).
val
.
astype
(
np
.
float64
)
data
=
np
.
random
.
binomial
(
1
,
tmp
)
data
=
np
.
random
.
binomial
(
1
,
tmp
)
data
=
ift
.
Field
.
from_
global_data
(
R
.
target
,
data
)
data
=
ift
.
Field
.
from_
raw
(
R
.
target
,
data
)
# Compute likelihood and Hamiltonian
# Compute likelihood and Hamiltonian
position
=
ift
.
from_random
(
'normal'
,
harmonic_space
)
position
=
ift
.
from_random
(
'normal'
,
harmonic_space
)
...
...
demos/find_amplitude_parameters.py
View file @
76130897
...
@@ -57,7 +57,7 @@ if __name__ == '__main__':
...
@@ -57,7 +57,7 @@ if __name__ == '__main__':
for
_
in
range
(
n_samps
):
for
_
in
range
(
n_samps
):
fld
=
pspec
(
ift
.
from_random
(
'normal'
,
pspec
.
domain
))
fld
=
pspec
(
ift
.
from_random
(
'normal'
,
pspec
.
domain
))
klengths
=
fld
.
domain
[
0
].
k_lengths
klengths
=
fld
.
domain
[
0
].
k_lengths
ycoord
=
fld
.
to_global_data
_rw
()
ycoord
=
fld
.
val
_rw
()
ycoord
[
0
]
=
ycoord
[
1
]
ycoord
[
0
]
=
ycoord
[
1
]
ax
.
plot
(
klengths
,
ycoord
,
alpha
=
1
)
ax
.
plot
(
klengths
,
ycoord
,
alpha
=
1
)
...
@@ -80,7 +80,7 @@ if __name__ == '__main__':
...
@@ -80,7 +80,7 @@ if __name__ == '__main__':
foo
=
[]
foo
=
[]
for
ax
in
axs
:
for
ax
in
axs
:
pos
=
ift
.
from_random
(
'normal'
,
correlated_field
.
domain
)
pos
=
ift
.
from_random
(
'normal'
,
correlated_field
.
domain
)
fld
=
correlated_field
(
pos
).
to_global_data
()
fld
=
correlated_field
(
pos
).
val
foo
.
append
((
ax
,
fld
))
foo
.
append
((
ax
,
fld
))
mi
,
ma
=
np
.
inf
,
-
np
.
inf
mi
,
ma
=
np
.
inf
,
-
np
.
inf
for
_
,
fld
in
foo
:
for
_
,
fld
in
foo
:
...
@@ -106,7 +106,7 @@ if __name__ == '__main__':
...
@@ -106,7 +106,7 @@ if __name__ == '__main__':
flds
=
[]
flds
=
[]
for
_
in
range
(
n_samps
):
for
_
in
range
(
n_samps
):
pos
=
ift
.
from_random
(
'normal'
,
correlated_field
.
domain
)
pos
=
ift
.
from_random
(
'normal'
,
correlated_field
.
domain
)
ax
.
plot
(
correlated_field
(
pos
).
to_global_data
()
)
ax
.
plot
(
correlated_field
(
pos
).
val
)
plt
.
savefig
(
'correlated_fields.png'
)
plt
.
savefig
(
'correlated_fields.png'
)
plt
.
close
()
plt
.
close
()
demos/getting_started_1.py
View file @
76130897
...
@@ -42,7 +42,7 @@ def make_random_mask():
...
@@ -42,7 +42,7 @@ def make_random_mask():
# Random mask for spherical mode
# Random mask for spherical mode
mask
=
ift
.
from_random
(
'pm1'
,
position_space
)
mask
=
ift
.
from_random
(
'pm1'
,
position_space
)
mask
=
(
mask
+
1
)
/
2
mask
=
(
mask
+
1
)
/
2
return
mask
.
to_global_data
()
return
mask
.
val
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
@@ -95,7 +95,7 @@ if __name__ == '__main__':
...
@@ -95,7 +95,7 @@ if __name__ == '__main__':
# and harmonic transformaion
# and harmonic transformaion
# Masking operator to model that parts of the field have not been observed
# Masking operator to model that parts of the field have not been observed
mask
=
ift
.
Field
.
from_
global_data
(
position_space
,
mask
)
mask
=
ift
.
Field
.
from_
raw
(
position_space
,
mask
)
Mask
=
ift
.
MaskOperator
(
mask
)
Mask
=
ift
.
MaskOperator
(
mask
)
# The response operator consists of
# The response operator consists of
...
...
demos/getting_started_2.py
View file @
76130897
...
@@ -40,7 +40,7 @@ def exposure_2d():
...
@@ -40,7 +40,7 @@ def exposure_2d():
exposure
[:,
x_shape
*
4
//
5
:
x_shape
]
*=
.
1
exposure
[:,
x_shape
*
4
//
5
:
x_shape
]
*=
.
1
exposure
[:,
x_shape
//
2
:
x_shape
*
3
//
2
]
*=
3.
exposure
[:,
x_shape
//
2
:
x_shape
*
3
//
2
]
*=
3.
return
ift
.
Field
.
from_
global_data
(
position_space
,
exposure
)
return
ift
.
Field
.
from_
raw
(
position_space
,
exposure
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
@@ -94,8 +94,8 @@ if __name__ == '__main__':
...
@@ -94,8 +94,8 @@ if __name__ == '__main__':
lamb
=
R
(
sky
)
lamb
=
R
(
sky
)
mock_position
=
ift
.
from_random
(
'normal'
,
domain
)
mock_position
=
ift
.
from_random
(
'normal'
,
domain
)
data
=
lamb
(
mock_position
)
data
=
lamb
(
mock_position
)
data
=
np
.
random
.
poisson
(
data
.
to_global_data
()
.
astype
(
np
.
float64
))
data
=
np
.
random
.
poisson
(
data
.
val
.
astype
(
np
.
float64
))
data
=
ift
.
Field
.
from_
global_data
(
d_space
,
data
)
data
=
ift
.
Field
.
from_
raw
(
d_space
,
data
)
likelihood
=
ift
.
PoissonianEnergy
(
data
)(
lamb
)
likelihood
=
ift
.
PoissonianEnergy
(
data
)(
lamb
)
# Settings for minimization
# Settings for minimization
...
...
demos/getting_started_mf.py
View file @
76130897
...
@@ -40,7 +40,7 @@ class SingleDomain(ift.LinearOperator):
...
@@ -40,7 +40,7 @@ class SingleDomain(ift.LinearOperator):
def
apply
(
self
,
x
,
mode
):
def
apply
(
self
,
x
,
mode
):
self
.
_check_input
(
x
,
mode
)
self
.
_check_input
(
x
,
mode
)
return
ift
.
from_global_data
(
self
.
_tgt
(
mode
),
x
.
to_global_data
()
)
return
ift
.
makeField
(
self
.
_tgt
(
mode
),
x
.
val
)
def
random_los
(
n_los
):
def
random_los
(
n_los
):
...
...
demos/misc/convolution.py
View file @
76130897
...
@@ -38,11 +38,11 @@ signal_vals = np.zeros(npix, dtype=np.float64)
...
@@ -38,11 +38,11 @@ signal_vals = np.zeros(npix, dtype=np.float64)
for
i
in
range
(
0
,
npix
,
npix
//
12
+
27
):
for
i
in
range
(
0
,
npix
,
npix
//
12
+
27
):
signal_vals
[
i
]
=
500.
signal_vals
[
i
]
=
500.
signal
=
ift
.
from_global_data
(
domain
,
signal_vals
)
signal
=
ift
.
makeField
(
domain
,
signal_vals
)
delta_vals
=
np
.
zeros
(
npix
,
dtype
=
np
.
float64
)
delta_vals
=
np
.
zeros
(
npix
,
dtype
=
np
.
float64
)
delta_vals
[
0
]
=
1.0
delta_vals
[
0
]
=
1.0
delta
=
ift
.
from_global_data
(
domain
,
delta_vals
)
delta
=
ift
.
makeField
(
domain
,
delta_vals
)
# Define kernel function
# Define kernel function
...
@@ -58,12 +58,12 @@ domain = ift.RGSpace((100, 100))
...
@@ -58,12 +58,12 @@ domain = ift.RGSpace((100, 100))
signal_vals
=
np
.
zeros
(
domain
.
shape
,
dtype
=
np
.
float64
)
signal_vals
=
np
.
zeros
(
domain
.
shape
,
dtype
=
np
.
float64
)
signal_vals
[
35
,
70
]
=
5000.
signal_vals
[
35
,
70
]
=
5000.
signal_vals
[
45
,
8
]
=
5000.
signal_vals
[
45
,
8
]
=
5000.
signal
=
ift
.
from_global_data
(
domain
,
signal_vals
)
signal
=
ift
.
makeField
(
domain
,
signal_vals
)
# Define delta signal, generate kernel image
# Define delta signal, generate kernel image
delta_vals
=
np
.
zeros
(
domain
.
shape
,
dtype
=
np
.
float64
)
delta_vals
=
np
.
zeros
(
domain
.
shape
,
dtype
=
np
.
float64
)
delta_vals
[
0
,
0
]
=
1.0
delta_vals
[
0
,
0
]
=
1.0
delta
=
ift
.
from_global_data
(
domain
,
delta_vals
)
delta
=
ift
.
makeField
(
domain
,
delta_vals
)
# Define kernel function
# Define kernel function
...
...
demos/multi_amplitudes_consistency.py
View file @
76130897
...
@@ -9,12 +9,12 @@ def testAmplitudesConsistency(seed, sspace):
...
@@ -9,12 +9,12 @@ def testAmplitudesConsistency(seed, sspace):
sc
=
ift
.
StatCalculator
()
sc
=
ift
.
StatCalculator
()
for
s
in
samples
:
for
s
in
samples
:
sc
.
add
(
op
(
s
.
extract
(
op
.
domain
)))
sc
.
add
(
op
(
s
.
extract
(
op
.
domain
)))
return
sc
.
mean
.
to_global_data
(),
sc
.
var
.
sqrt
().
to_global_data
()
return
sc
.
mean
.
val
,
sc
.
var
.
sqrt
().
val
np
.
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
offset_std
=
.
1
offset_std
=
.
1
intergated_fluct_std0
=
.
003
intergated_fluct_std0
=
.
003
intergated_fluct_std1
=
0.1
intergated_fluct_std1
=
0.1
nsam
=
1000
nsam
=
1000
...
@@ -32,7 +32,7 @@ def testAmplitudesConsistency(seed, sspace):
...
@@ -32,7 +32,7 @@ def testAmplitudesConsistency(seed, sspace):
offset_std
,
_
=
stats
(
fa
.
amplitude_total_offset
,
samples
)
offset_std
,
_
=
stats
(
fa
.
amplitude_total_offset
,
samples
)
intergated_fluct_std0
,
_
=
stats
(
fa
.
average_fluctuation
(
0
),
samples
)
intergated_fluct_std0
,
_
=
stats
(
fa
.
average_fluctuation
(
0
),
samples
)
intergated_fluct_std1
,
_
=
stats
(
fa
.
average_fluctuation
(
1
),
samples
)
intergated_fluct_std1
,
_
=
stats
(
fa
.
average_fluctuation
(
1
),
samples
)
slice_fluct_std0
,
_
=
stats
(
fa
.
slice_fluctuation
(
0
),
samples
)
slice_fluct_std0
,
_
=
stats
(
fa
.
slice_fluctuation
(
0
),
samples
)
slice_fluct_std1
,
_
=
stats
(
fa
.
slice_fluctuation
(
1
),
samples
)
slice_fluct_std1
,
_
=
stats
(
fa
.
slice_fluctuation
(
1
),
samples
)
...
@@ -54,7 +54,7 @@ def testAmplitudesConsistency(seed, sspace):
...
@@ -54,7 +54,7 @@ def testAmplitudesConsistency(seed, sspace):
print
(
"Expected integrated fluct. frequency Std: "
+
print
(
"Expected integrated fluct. frequency Std: "
+
str
(
intergated_fluct_std1
))
str
(
intergated_fluct_std1
))
print
(
"Estimated integrated fluct. frequency Std: "
+
str
(
fluct_freq
))
print
(
"Estimated integrated fluct. frequency Std: "
+
str
(
fluct_freq
))
print
(
"Expected slice fluct. space Std: "
+
print
(
"Expected slice fluct. space Std: "
+
str
(
slice_fluct_std0
))
str
(
slice_fluct_std0
))
print
(
"Estimated slice fluct. space Std: "
+
str
(
sl_fluct_space
))
print
(
"Estimated slice fluct. space Std: "
+
str
(
sl_fluct_space
))
...
@@ -65,8 +65,8 @@ def testAmplitudesConsistency(seed, sspace):
...
@@ -65,8 +65,8 @@ def testAmplitudesConsistency(seed, sspace):
print
(
"Expected total fluct. Std: "
+
str
(
tot_flm
))
print
(
"Expected total fluct. Std: "
+
str
(
tot_flm
))
print
(
"Estimated total fluct. Std: "
+
str
(
fluct_total
))
print
(
"Estimated total fluct. Std: "
+
str
(
fluct_total
))
np
.
testing
.
assert_allclose
(
offset_std
,
zm_std_mean
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
offset_std
,
zm_std_mean
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
intergated_fluct_std0
,
fluct_space
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
intergated_fluct_std0
,
fluct_space
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
intergated_fluct_std1
,
fluct_freq
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
intergated_fluct_std1
,
fluct_freq
,
rtol
=
0.5
)
...
@@ -74,7 +74,7 @@ def testAmplitudesConsistency(seed, sspace):
...
@@ -74,7 +74,7 @@ def testAmplitudesConsistency(seed, sspace):
np
.
testing
.
assert_allclose
(
slice_fluct_std0
,
sl_fluct_space
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
slice_fluct_std0
,
sl_fluct_space
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
slice_fluct_std1
,
sl_fluct_freq
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
slice_fluct_std1
,
sl_fluct_freq
,
rtol
=
0.5
)
fa
=
ift
.
CorrelatedFieldMaker
.
make
(
offset_std
,
.
1
,
''
)
fa
=
ift
.
CorrelatedFieldMaker
.
make
(
offset_std
,
.
1
,
''
)
fa
.
add_fluctuations
(
fsspace
,
intergated_fluct_std1
,
1.
,
3.1
,
1.
,
.
5
,
.
1
,
fa
.
add_fluctuations
(
fsspace
,
intergated_fluct_std1
,
1.
,
3.1
,
1.
,
.
5
,
.
1
,
-
4
,
1.
,
'freq'
)
-
4
,
1.
,
'freq'
)
...
@@ -87,7 +87,7 @@ def testAmplitudesConsistency(seed, sspace):
...
@@ -87,7 +87,7 @@ def testAmplitudesConsistency(seed, sspace):
print
(
"Forced slice fluct. space Std: "
+
str
(
m
))
print
(
"Forced slice fluct. space Std: "
+
str
(
m
))
print
(
"Expected slice fluct. Std: "
+
str
(
em
))
print
(
"Expected slice fluct. Std: "
+
str
(
em
))
np
.
testing
.
assert_allclose
(
m
,
em
,
rtol
=
0.5
)
np
.
testing
.
assert_allclose
(
m
,
em
,
rtol
=
0.5
)
assert
op
.
target
[
0
]
==
sspace
assert
op
.
target
[
0
]
==
sspace
assert
op
.
target
[
1
]
==
fsspace
assert
op
.
target
[
1
]
==
fsspace
...
...
demos/polynomial_fit.py
View file @
76130897
...
@@ -36,7 +36,7 @@ def polynomial(coefficients, sampling_points):
...
@@ -36,7 +36,7 @@ def polynomial(coefficients, sampling_points):
if
not
(
isinstance
(
coefficients
,
ift
.
Field
)
if
not
(
isinstance
(
coefficients
,
ift
.
Field
)
and
isinstance
(
sampling_points
,
np
.
ndarray
)):
and
isinstance
(
sampling_points
,
np
.
ndarray
)):
raise
TypeError
raise
TypeError
params
=
coefficients
.
to_global_data
()
params
=
coefficients
.
val
out
=
np
.
zeros_like
(
sampling_points
)
out
=
np
.
zeros_like
(
sampling_points
)
for
ii
in
range
(
len
(
params
)):
for
ii
in
range
(
len
(
params
)):
out
+=
params
[
ii
]
*
sampling_points
**
ii
out
+=
params
[
ii
]
*
sampling_points
**
ii
...
@@ -71,14 +71,14 @@ class PolynomialResponse(ift.LinearOperator):
...
@@ -71,14 +71,14 @@ class PolynomialResponse(ift.LinearOperator):
def
apply
(
self
,
x
,
mode
):
def
apply
(
self
,
x
,
mode
):
self
.
_check_input
(
x
,
mode
)
self
.
_check_input
(
x
,
mode
)
val
=
x
.
to_global_data
_rw
()
val
=
x
.
val
_rw
()
if
mode
==
self
.
TIMES
:
if
mode
==
self
.
TIMES
:
# FIXME Use polynomial() here
# FIXME Use polynomial() here
out
=
self
.
_mat
.
dot
(
val
)
out
=
self
.
_mat
.
dot
(
val
)
else
:
else
:
# FIXME Can this be optimized?
# FIXME Can this be optimized?
out
=
self
.
_mat
.
conj
().
T
.
dot
(
val
)
out
=
self
.
_mat
.
conj
().
T
.
dot
(
val
)
return
ift
.
from_global_data
(
self
.
_tgt
(
mode
),
out
)
return
ift
.
makeField
(
self
.
_tgt
(
mode
),
out
)
# Generate some mock data
# Generate some mock data
...
@@ -99,8 +99,8 @@ R = PolynomialResponse(p_space, x)
...
@@ -99,8 +99,8 @@ R = PolynomialResponse(p_space, x)
ift
.
extra
.
consistency_check
(
R
)
ift
.
extra
.
consistency_check
(
R
)
d_space
=
R
.
target
d_space
=
R
.
target
d
=
ift
.
from_global_data
(
d_space
,
y
)
d
=
ift
.
makeField
(
d_space
,
y
)
N
=
ift
.
DiagonalOperator
(
ift
.
from_global_data
(
d_space
,
var
))
N
=
ift
.
DiagonalOperator
(
ift
.
makeField
(
d_space
,
var
))
IC
=
ift
.
DeltaEnergyController
(
tol_rel_deltaE
=
1e-12
,
iteration_limit
=
200
)
IC
=
ift
.
DeltaEnergyController
(
tol_rel_deltaE
=
1e-12
,
iteration_limit
=
200
)
likelihood
=
ift
.
GaussianEnergy
(
d
,
N
)(
R
)
likelihood
=
ift
.
GaussianEnergy
(
d
,
N
)(
R
)
...
@@ -136,9 +136,8 @@ plt.savefig('fit.png')
...
@@ -136,9 +136,8 @@ plt.savefig('fit.png')
plt
.
close
()
plt
.
close
()
# Print parameters
# Print parameters
mean
=
sc
.
mean
.
to_global_data
()
mean
=
sc
.
mean
.
val
sigma
=
np
.
sqrt
(
sc
.
var
.
to_global_data
())
sigma
=
np
.
sqrt
(
sc
.
var
.
val
)
if
ift
.
dobj
.
master
:
for
ii
in
range
(
len
(
mean
)):
for
ii
in
range
(
len
(
mean
)):
print
(
'Coefficient x**{}: {:.2E} +/- {:.2E}'
.
format
(
ii
,
mean
[
ii
],
print
(
'Coefficient x**{}: {:.2E} +/- {:.2E}'
.
format
(
ii
,
mean
[
ii
],
sigma
[
ii
]))
sigma
[
ii
]))
nifty6/__init__.py
View file @
76130897
from
.version
import
__version__
from
.version
import
__version__
from
.
import
dobj
from
.domains.domain
import
Domain
from
.domains.domain
import
Domain
from
.domains.structured_domain
import
StructuredDomain
from
.domains.structured_domain
import
StructuredDomain
from
.domains.unstructured_domain
import
UnstructuredDomain
from
.domains.unstructured_domain
import
UnstructuredDomain
...
@@ -93,10 +91,5 @@ from .linearization import Linearization
...
@@ -93,10 +91,5 @@ from .linearization import Linearization
from
.operator_spectrum
import
operator_spectrum
from
.operator_spectrum
import
operator_spectrum