Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
bc24ba44
Commit
bc24ba44
authored
Jun 13, 2018
by
Philipp Arras
Browse files
Remove GreedySampling
parent
adb19b35
Pipeline
#30836
passed with stages
in 1 minute and 33 seconds
Changes
3
Pipelines
1
Show whitespace changes
Inline
Side-by-side
demos/sampling_demo.py
deleted
100644 → 0
View file @
adb19b35
import
nifty4
as
ift
import
numpy
as
np
import
matplotlib.pyplot
as
plt
from
nifty4.sugar
import
create_power_operator
np
.
random
.
seed
(
41
)
x_space
=
ift
.
RGSpace
(
1024
)
h_space
=
x_space
.
get_default_codomain
()
d_space
=
x_space
N_hat
=
np
.
full
(
d_space
.
shape
,
.
1
)
N_hat
[
300
:
350
]
=
1e-13
N_hat
=
ift
.
Field
.
from_global_data
(
d_space
,
N_hat
)
N
=
ift
.
DiagonalOperator
(
N_hat
)
FFT
=
ift
.
HarmonicTransformOperator
(
h_space
,
x_space
)
R
=
ift
.
ScalingOperator
(
1.
,
x_space
)
def
ampspec
(
k
):
return
1.
/
(
0.2
+
k
**
2.
)
S
=
ift
.
ScalingOperator
(
1.
,
h_space
)
A
=
create_power_operator
(
h_space
,
ampspec
)
s_h
=
S
.
draw_sample
()
sky
=
FFT
*
A
s_x
=
sky
(
s_h
)
n
=
N
.
draw_sample
()
d
=
R
(
s_x
)
+
n
R_p
=
R
*
FFT
*
A
j
=
R_p
.
adjoint
(
N
.
inverse
(
d
))
D_inv
=
ift
.
SandwichOperator
.
make
(
R_p
,
N
.
inverse
)
+
S
.
inverse
N_samps
=
200
N_iter
=
300
IC
=
ift
.
GradientNormController
(
tol_abs_gradnorm
=
1e-5
,
iteration_limit
=
N_iter
)
inverter
=
ift
.
ConjugateGradient
(
IC
)
sampling_inverter
=
ift
.
ConjugateGradient
(
IC
)
D_inv_1
=
ift
.
SamplingEnabler
(
ift
.
SandwichOperator
.
make
(
R_p
,
N
.
inverse
),
S
.
inverse
,
sampling_inverter
)
D_inv_1
=
ift
.
InversionEnabler
(
D_inv_1
,
inverter
)
D_inv_2
=
ift
.
GreedySamplingEnabler
(
D_inv
,
sampling_inverter
)
# GOOD
samps_1
=
[
D_inv_1
.
draw_sample
(
from_inverse
=
True
)
for
i
in
range
(
N_samps
)]
# BAD
samps_2
=
[
D_inv_2
.
draw_sample
(
from_inverse
=
True
)
for
i
in
range
(
N_samps
)]
m
=
D_inv_1
.
inverse_times
(
j
)
m_x
=
sky
(
m
)
pltdict
=
{
'alpha'
:
.
3
,
'linewidth'
:
.
3
}
for
i
in
range
(
N_samps
):
if
i
==
0
:
plt
.
plot
(
sky
(
samps_2
[
i
]).
to_global_data
()[
290
:
360
],
color
=
'b'
,
label
=
'Default samples (residuals)'
,
**
pltdict
)
plt
.
plot
(
sky
(
samps_1
[
i
]).
to_global_data
()[
290
:
360
],
color
=
'r'
,
label
=
'Conservative samples (residuals)'
,
**
pltdict
)
else
:
plt
.
plot
(
sky
(
samps_2
[
i
]).
to_global_data
()[
290
:
360
],
color
=
'b'
,
**
pltdict
)
plt
.
plot
(
sky
(
samps_1
[
i
]).
to_global_data
()[
290
:
360
],
color
=
'r'
,
**
pltdict
)
plt
.
plot
((
s_x
-
m_x
).
to_global_data
()[
290
:
360
],
color
=
'k'
,
label
=
'signal - mean'
)
plt
.
title
(
'Comparison of conservative vs default samples near area of low noise'
)
plt
.
legend
()
plt
.
savefig
(
'residual_sample_comparison.png'
)
plt
.
close
()
D_hat_old
=
ift
.
Field
.
full
(
x_space
,
0.
).
to_global_data
()
D_hat_new
=
ift
.
Field
.
full
(
x_space
,
0.
).
to_global_data
()
for
i
in
range
(
N_samps
):
D_hat_old
+=
sky
(
samps_2
[
i
]).
to_global_data
()
**
2
D_hat_new
+=
sky
(
samps_1
[
i
]).
to_global_data
()
**
2
plt
.
plot
(
np
.
sqrt
(
D_hat_old
/
N_samps
),
'r--'
,
label
=
'Default uncertainty'
)
plt
.
plot
(
-
np
.
sqrt
(
D_hat_old
/
N_samps
),
'r--'
)
plt
.
fill_between
(
range
(
len
(
D_hat_new
)),
-
np
.
sqrt
(
D_hat_new
/
N_samps
),
np
.
sqrt
(
D_hat_new
/
N_samps
),
facecolor
=
'0.5'
,
alpha
=
0.5
,
label
=
'Conservative uncertainty'
)
plt
.
plot
((
s_x
-
m_x
).
to_global_data
(),
alpha
=
0.6
,
color
=
'k'
,
label
=
'signal - mean'
)
plt
.
title
(
'Comparison of uncertainty in position space'
)
plt
.
legend
()
plt
.
savefig
(
'uncertainty_x.png'
)
plt
.
close
()
p_space
=
ift
.
PowerSpace
(
h_space
)
p_new
=
ift
.
power_analyze
(
samps_1
[
0
])
for
i
in
range
(
1
,
N_samps
):
p_new
+=
ift
.
power_analyze
(
samps_1
[
i
])
p_new
/=
N_samps
p_old
=
ift
.
power_analyze
(
samps_2
[
0
])
for
i
in
range
(
1
,
N_samps
):
p_old
+=
ift
.
power_analyze
(
samps_2
[
i
])
p_old
/=
N_samps
plt
.
title
(
"power_analyze"
)
plt
.
plot
(
p_old
.
to_global_data
(),
label
=
'default'
)
plt
.
plot
(
p_new
.
to_global_data
(),
label
=
'conservative'
)
plt
.
xscale
(
'log'
)
plt
.
yscale
(
'log'
)
plt
.
legend
()
plt
.
savefig
(
'sample_mean_power.png'
)
plt
.
close
()
nifty4/operators/__init__.py
View file @
bc24ba44
...
...
@@ -12,11 +12,9 @@ from .power_distributor import PowerDistributor
from
.inversion_enabler
import
InversionEnabler
from
.sandwich_operator
import
SandwichOperator
from
.sampling_enabler
import
SamplingEnabler
from
.sampling_enabler_greedy
import
GreedySamplingEnabler
__all__
=
[
"LinearOperator"
,
"EndomorphicOperator"
,
"ScalingOperator"
,
"DiagonalOperator"
,
"HarmonicTransformOperator"
,
"FFTOperator"
,
"FFTSmoothingOperator"
,
"GeometryRemover"
,
"LaplaceOperator"
,
"SmoothnessOperator"
,
"PowerDistributor"
,
"InversionEnabler"
,
"SandwichOperator"
,
"SamplingEnabler"
,
"GreedySamplingEnabler"
]
"InversionEnabler"
,
"SandwichOperator"
,
"SamplingEnabler"
]
nifty4/operators/sampling_enabler_greedy.py
deleted
100644 → 0
View file @
adb19b35
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2018 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
import
numpy
as
np
from
.endomorphic_operator
import
EndomorphicOperator
from
.inversion_enabler
import
InversionEnabler
class
GreedySamplingEnabler
(
EndomorphicOperator
):
"""Class which augments the capability of another operator object via
numerical inversion.
Parameters
----------
op : :class:`EndomorphicOperator`
The operator to be enhanced.
The InversionEnabler object will support the same operation modes as
`op`, and additionally the inverse set. The newly-added modes will
be computed by iterative inversion.
inverter : :class:`Minimizer`
The minimizer to use for the iterative numerical inversion.
Typically, this is a :class:`ConjugateGradient` object.
approximation : :class:`LinearOperator`, optional
if not None, this operator should be an approximation to `op`, which
supports the operation modes that `op` doesn't have. It is used as a
preconditioner during the iterative inversion, to accelerate
convergence.
"""
def
__init__
(
self
,
op
,
sampling_inverter
,
approximation
=
None
):
super
(
GreedySamplingEnabler
,
self
).
__init__
()
self
.
_op
=
op
self
.
_sampling_op
=
InversionEnabler
(
op
,
sampling_inverter
,
approximation
=
approximation
)
def
draw_sample
(
self
,
from_inverse
=
False
,
dtype
=
np
.
float64
):
try
:
return
self
.
_op
.
draw_sample
(
from_inverse
,
dtype
)
except
NotImplementedError
:
samp
=
self
.
_op
.
draw_sample
(
not
from_inverse
,
dtype
)
return
self
.
_sampling_op
.
inverse_times
(
samp
)
\
if
from_inverse
else
self
(
samp
)
@
property
def
domain
(
self
):
return
self
.
_op
.
domain
@
property
def
capability
(
self
):
return
self
.
_op
.
capability
def
apply
(
self
,
x
,
mode
):
return
self
.
_op
.
apply
(
x
,
mode
)
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment