Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
e3e42447
Commit
e3e42447
authored
Aug 12, 2018
by
Philipp Arras
Browse files
Merge branch 'redesign' into redesigning_resolve
parents
b14a66ba
0f59582e
Changes
28
Hide whitespace changes
Inline
Side-by-side
demos/getting_started_3.py
View file @
e3e42447
...
...
@@ -74,11 +74,12 @@ if __name__ == '__main__':
# set up minimization and inversion schemes
ic_cg
=
ift
.
GradientNormController
(
iteration_limit
=
10
)
ic_sampling
=
ift
.
GradientNormController
(
iteration_limit
=
100
)
ic_newton
=
ift
.
GradientNormController
(
name
=
'Newton'
,
iteration_limit
=
100
)
ic_newton
=
ift
.
DeltaEnergyController
(
name
=
'Newton'
,
tol_rel_deltaE
=
1e-8
,
iteration_limit
=
100
)
minimizer
=
ift
.
RelaxedNewton
(
ic_newton
)
# minimizer = ift.VL_BFGS(ic_newton)
# minimizer = ift.NewtonCG(1e-10,
100,
True)
# minimizer = ift.L_BFGS_B(1e-10, 1e-5,
100, 10,
True)
# minimizer = ift.NewtonCG(
xtol=
1e-10,
maxiter=100, disp=
True)
# minimizer = ift.L_BFGS_B(
ftol=
1e-10,
gtol=
1e-5,
maxiter=100, maxcor=20, disp=
True)
# build model Hamiltonian
H
=
ift
.
Hamiltonian
(
likelihood
,
ic_sampling
)
...
...
nifty5/__init__.py
View file @
e3e42447
...
...
@@ -21,7 +21,7 @@ from .multi_field import MultiField
from
.operators.operator
import
Operator
from
.operators.central_zero_padder
import
CentralZeroPadder
from
.operators.diagonal_operator
import
DiagonalOperator
from
.operators.
dof_
distributor
import
DOFDistributor
from
.operators.distributor
s
import
DOFDistributor
,
PowerDistributor
from
.operators.domain_distributor
import
DomainDistributor
from
.operators.endomorphic_operator
import
EndomorphicOperator
from
.operators.exp_transform
import
ExpTransform
...
...
@@ -33,7 +33,6 @@ from .operators.inversion_enabler import InversionEnabler
from
.operators.laplace_operator
import
LaplaceOperator
from
.operators.linear_operator
import
LinearOperator
from
.operators.mask_operator
import
MaskOperator
from
.operators.power_distributor
import
PowerDistributor
from
.operators.qht_operator
import
QHTOperator
from
.operators.sampling_enabler
import
SamplingEnabler
from
.operators.sandwich_operator
import
SandwichOperator
...
...
@@ -54,8 +53,8 @@ from .probing import probe_with_posterior_samples, probe_diagonal, \
from
.minimization.line_search
import
LineSearch
from
.minimization.line_search_strong_wolfe
import
LineSearchStrongWolfe
from
.minimization.iteration_controller
import
IterationController
from
.minimization.g
radient
_n
orm
_c
ontroller
import
GradientNorm
Controller
from
.minimization.iteration_controller
s
import
(
IterationController
,
G
radient
N
orm
C
ontroller
,
DeltaEnergy
Controller
)
from
.minimization.minimizer
import
Minimizer
from
.minimization.conjugate_gradient
import
ConjugateGradient
from
.minimization.nonlinear_cg
import
NonlinearCG
...
...
nifty5/data_objects/numpy_do2.py
deleted
100644 → 0
View file @
b14a66ba
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2018 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from
__future__
import
absolute_import
,
division
,
print_function
from
..compat
import
*
import
numpy
as
np
from
.random
import
Random
import
sys
ntask
=
1
rank
=
0
master
=
True
def
is_numpy
():
return
False
def
local_shape
(
shape
,
distaxis
=
0
):
return
shape
class
data_object
(
object
):
def
__init__
(
self
,
shape
,
data
):
self
.
_data
=
data
def
copy
(
self
):
return
data_object
(
self
.
_data
.
shape
,
self
.
_data
.
copy
())
@
property
def
dtype
(
self
):
return
self
.
_data
.
dtype
@
property
def
shape
(
self
):
return
self
.
_data
.
shape
@
property
def
size
(
self
):
return
self
.
_data
.
size
@
property
def
real
(
self
):
return
data_object
(
self
.
_data
.
shape
,
self
.
_data
.
real
)
@
property
def
imag
(
self
):
return
data_object
(
self
.
_data
.
shape
,
self
.
_data
.
imag
)
def
conj
(
self
):
return
data_object
(
self
.
_data
.
shape
,
self
.
_data
.
conj
())
def
conjugate
(
self
):
return
data_object
(
self
.
_data
.
shape
,
self
.
_data
.
conjugate
())
def
_contraction_helper
(
self
,
op
,
axis
):
if
axis
is
not
None
:
if
len
(
axis
)
==
len
(
self
.
_data
.
shape
):
axis
=
None
if
axis
is
None
:
return
getattr
(
self
.
_data
,
op
)()
res
=
getattr
(
self
.
_data
,
op
)(
axis
=
axis
)
return
data_object
(
res
.
shape
,
res
)
def
sum
(
self
,
axis
=
None
):
return
self
.
_contraction_helper
(
"sum"
,
axis
)
def
prod
(
self
,
axis
=
None
):
return
self
.
_contraction_helper
(
"prod"
,
axis
)
def
min
(
self
,
axis
=
None
):
return
self
.
_contraction_helper
(
"min"
,
axis
)
def
max
(
self
,
axis
=
None
):
return
self
.
_contraction_helper
(
"max"
,
axis
)
def
mean
(
self
,
axis
=
None
):
if
axis
is
None
:
sz
=
self
.
size
else
:
sz
=
reduce
(
lambda
x
,
y
:
x
*
y
,
[
self
.
shape
[
i
]
for
i
in
axis
])
return
self
.
sum
(
axis
)
/
sz
def
std
(
self
,
axis
=
None
):
return
np
.
sqrt
(
self
.
var
(
axis
))
# FIXME: to be improved!
def
var
(
self
,
axis
=
None
):
if
axis
is
not
None
and
len
(
axis
)
!=
len
(
self
.
shape
):
raise
ValueError
(
"functionality not yet supported"
)
return
(
abs
(
self
-
self
.
mean
())
**
2
).
mean
()
def
_binary_helper
(
self
,
other
,
op
):
a
=
self
if
isinstance
(
other
,
data_object
):
b
=
other
if
a
.
_data
.
shape
!=
b
.
_data
.
shape
:
raise
ValueError
(
"shapes are incompatible."
)
a
=
a
.
_data
b
=
b
.
_data
elif
np
.
isscalar
(
other
):
a
=
a
.
_data
b
=
other
elif
isinstance
(
other
,
np
.
ndarray
):
a
=
a
.
_data
b
=
other
else
:
return
NotImplemented
tval
=
getattr
(
a
,
op
)(
b
)
if
tval
is
a
:
return
self
else
:
return
data_object
(
self
.
_data
.
shape
,
tval
)
def
__neg__
(
self
):
return
data_object
(
self
.
_data
.
shape
,
-
self
.
_data
)
def
__abs__
(
self
):
return
data_object
(
self
.
_data
.
shape
,
abs
(
self
.
_data
))
def
all
(
self
):
return
self
.
sum
()
==
self
.
size
def
any
(
self
):
return
self
.
sum
()
!=
0
def
fill
(
self
,
value
):
self
.
_data
.
fill
(
value
)
for
op
in
[
"__add__"
,
"__radd__"
,
"__iadd__"
,
"__sub__"
,
"__rsub__"
,
"__isub__"
,
"__mul__"
,
"__rmul__"
,
"__imul__"
,
"__div__"
,
"__rdiv__"
,
"__idiv__"
,
"__truediv__"
,
"__rtruediv__"
,
"__itruediv__"
,
"__floordiv__"
,
"__rfloordiv__"
,
"__ifloordiv__"
,
"__pow__"
,
"__rpow__"
,
"__ipow__"
,
"__lt__"
,
"__le__"
,
"__gt__"
,
"__ge__"
,
"__eq__"
,
"__ne__"
]:
def
func
(
op
):
def
func2
(
self
,
other
):
return
self
.
_binary_helper
(
other
,
op
=
op
)
return
func2
setattr
(
data_object
,
op
,
func
(
op
))
def
full
(
shape
,
fill_value
,
dtype
=
None
):
return
data_object
(
shape
,
np
.
full
(
shape
,
fill_value
,
dtype
))
def
empty
(
shape
,
dtype
=
None
):
return
data_object
(
shape
,
np
.
empty
(
shape
,
dtype
))
def
zeros
(
shape
,
dtype
=
None
,
distaxis
=
0
):
return
data_object
(
shape
,
np
.
zeros
(
shape
,
dtype
))
def
ones
(
shape
,
dtype
=
None
,
distaxis
=
0
):
return
data_object
(
shape
,
np
.
ones
(
shape
,
dtype
))
def
empty_like
(
a
,
dtype
=
None
):
return
data_object
(
np
.
empty_like
(
a
.
_data
,
dtype
))
def
vdot
(
a
,
b
):
return
np
.
vdot
(
a
.
_data
,
b
.
_data
)
def
_math_helper
(
x
,
function
,
out
):
function
=
getattr
(
np
,
function
)
if
out
is
not
None
:
function
(
x
.
_data
,
out
=
out
.
_data
)
return
out
else
:
return
data_object
(
x
.
shape
,
function
(
x
.
_data
))
_current_module
=
sys
.
modules
[
__name__
]
for
f
in
[
"sqrt"
,
"exp"
,
"log"
,
"tanh"
,
"conjugate"
]:
def
func
(
f
):
def
func2
(
x
,
out
=
None
):
return
_math_helper
(
x
,
f
,
out
)
return
func2
setattr
(
_current_module
,
f
,
func
(
f
))
def
from_object
(
object
,
dtype
,
copy
,
set_locked
):
if
dtype
is
None
:
dtype
=
object
.
dtype
dtypes_equal
=
dtype
==
object
.
dtype
if
set_locked
and
dtypes_equal
and
locked
(
object
):
return
object
if
not
dtypes_equal
and
not
copy
:
raise
ValueError
(
"cannot change data type without copying"
)
if
set_locked
and
not
copy
:
raise
ValueError
(
"cannot lock object without copying"
)
data
=
np
.
array
(
object
.
_data
,
dtype
=
dtype
,
copy
=
copy
)
if
set_locked
:
data
.
flags
.
writeable
=
False
return
data_object
(
object
.
_shape
,
data
,
distaxis
=
object
.
_distaxis
)
# This function draws all random numbers on all tasks, to produce the same
# array independent on the number of tasks
# MR FIXME: depending on what is really wanted/needed (i.e. same result
# independent of number of tasks, performance etc.) we need to adjust the
# algorithm.
def
from_random
(
random_type
,
shape
,
dtype
=
np
.
float64
,
**
kwargs
):
generator_function
=
getattr
(
Random
,
random_type
)
ldat
=
generator_function
(
dtype
=
dtype
,
shape
=
shape
,
**
kwargs
)
return
from_local_data
(
shape
,
ldat
)
def
local_data
(
arr
):
return
arr
.
_data
def
ibegin_from_shape
(
glob_shape
,
distaxis
=
0
):
return
(
0
,)
*
len
(
glob_shape
)
def
ibegin
(
arr
):
return
(
0
,)
*
arr
.
_data
.
ndim
def
np_allreduce_sum
(
arr
):
return
arr
.
copy
()
def
np_allreduce_min
(
arr
):
return
arr
.
copy
()
def
distaxis
(
arr
):
return
-
1
def
from_local_data
(
shape
,
arr
,
distaxis
=-
1
):
return
data_object
(
shape
,
arr
)
def
from_global_data
(
arr
,
sum_up
=
False
):
return
data_object
(
arr
.
shape
,
arr
)
def
to_global_data
(
arr
):
return
arr
.
_data
def
redistribute
(
arr
,
dist
=
None
,
nodist
=
None
):
return
arr
.
copy
()
def
default_distaxis
():
return
-
1
def
lock
(
arr
):
arr
.
_data
.
flags
.
writeable
=
False
def
locked
(
arr
):
return
not
arr
.
_data
.
flags
.
writeable
nifty5/domain_tuple.py
View file @
e3e42447
...
...
@@ -20,6 +20,7 @@ from __future__ import absolute_import, division, print_function
from
.compat
import
*
from
.domains.domain
import
Domain
from
.
import
utilities
class
DomainTuple
(
object
):
...
...
@@ -153,3 +154,7 @@ class DomainTuple(object):
if
DomainTuple
.
_scalarDomain
is
None
:
DomainTuple
.
_scalarDomain
=
DomainTuple
.
make
(())
return
DomainTuple
.
_scalarDomain
def
__repr__
(
self
):
subs
=
"
\n
"
.
join
(
sub
.
__repr__
()
for
sub
in
self
.
_dom
)
return
"DomainTuple:
\n
"
+
utilities
.
indent
(
subs
)
nifty5/field.py
View file @
e3e42447
...
...
@@ -614,7 +614,10 @@ class Field(object):
return
self
def
unite
(
self
,
other
):
return
self
+
other
return
self
+
other
def
flexible_addsub
(
self
,
other
,
neg
):
return
self
-
other
if
neg
else
self
+
other
def
positive_tanh
(
self
):
return
0.5
*
(
1.
+
self
.
tanh
())
...
...
nifty5/library/correlated_fields.py
View file @
e3e42447
...
...
@@ -24,7 +24,7 @@ from ..multi_field import MultiField
from
..multi_domain
import
MultiDomain
from
..operators.domain_distributor
import
DomainDistributor
from
..operators.harmonic_operators
import
HarmonicTransformOperator
from
..operators.
power_
distributor
import
PowerDistributor
from
..operators.distributor
s
import
PowerDistributor
from
..operators.operator
import
Operator
from
..operators.simple_linear_operators
import
FieldAdapter
...
...
@@ -59,14 +59,14 @@ def CorrelatedField(s_space, amplitude_model):
# h_space = DomainTuple.make((h_space_spatial, h_space_energy))
# ht1 = HarmonicTransformOperator(h_space, space=0)
# ht2 = HarmonicTransformOperator(ht1.target, space=1)
# ht = ht2
*
ht1
# ht = ht2
(
ht1
)
#
# p_space_spatial = amplitude_model_spatial.
value.domain
[0]
# p_space_energy = amplitude_model_energy.
value.domain
[0]
# p_space_spatial = amplitude_model_spatial.
target
[0]
# p_space_energy = amplitude_model_energy.
target
[0]
#
# pd_spatial = PowerDistributor(h_space, p_space_spatial, 0)
# pd_energy = PowerDistributor(pd_spatial.domain, p_space_energy, 1)
# pd = pd_spatial
*
pd_energy
# pd = pd_spatial
(
pd_energy
)
#
# dom_distr_spatial = DomainDistributor(pd.domain, 0)
# dom_distr_energy = DomainDistributor(pd.domain, 1)
...
...
@@ -76,9 +76,7 @@ def CorrelatedField(s_space, amplitude_model):
# a = a_spatial*a_energy
# A = pd(a)
#
# position = MultiField.from_dict(
# {'xi': Field.from_random('normal', h_space)})
# xi = Variable(position)['xi']
# correlated_field_h = A*xi
# correlated_field = ht(correlated_field_h)
# return PointwiseExponential(correlated_field)
# domain = MultiDomain.union(
# (amplitude_model_spatial.domain, amplitude_model_energy.domain,
# MultiDomain.make({"xi": h_space})))
# return exp(ht(A*FieldAdapter(domain, "xi")))
nifty5/linearization.py
View file @
e3e42447
...
...
@@ -61,22 +61,28 @@ class Linearization(object):
def
real
(
self
):
return
Linearization
(
self
.
_val
.
real
,
self
.
_jac
.
real
)
def
_
_
add
__
(
self
,
other
):
def
_
my
add
(
self
,
other
,
neg
):
if
isinstance
(
other
,
Linearization
):
met
=
None
if
self
.
_metric
is
not
None
and
other
.
_metric
is
not
None
:
met
=
self
.
_metric
.
_myadd
(
other
.
_metric
,
False
)
met
=
self
.
_metric
.
_myadd
(
other
.
_metric
,
neg
)
return
Linearization
(
self
.
_val
.
unite
(
other
.
_val
),
self
.
_jac
.
_myadd
(
other
.
_jac
,
False
),
met
)
self
.
_val
.
flexible_addsub
(
other
.
_val
,
neg
),
self
.
_jac
.
_myadd
(
other
.
_jac
,
neg
),
met
)
if
isinstance
(
other
,
(
int
,
float
,
complex
,
Field
,
MultiField
)):
return
Linearization
(
self
.
_val
+
other
,
self
.
_jac
,
self
.
_metric
)
if
neg
:
return
Linearization
(
self
.
_val
-
other
,
self
.
_jac
,
self
.
_metric
)
else
:
return
Linearization
(
self
.
_val
+
other
,
self
.
_jac
,
self
.
_metric
)
def
__add__
(
self
,
other
):
return
self
.
_myadd
(
other
,
False
)
def
__radd__
(
self
,
other
):
return
self
.
_
_
add
__
(
other
)
return
self
.
_
my
add
(
other
,
False
)
def
__sub__
(
self
,
other
):
return
self
.
_
_
add
__
(
-
other
)
return
self
.
_
my
add
(
other
,
True
)
def
__rsub__
(
self
,
other
):
return
(
-
self
).
__add__
(
other
)
...
...
nifty5/minimization/iteration_controller.py
deleted
100644 → 0
View file @
b14a66ba
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2018 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from
__future__
import
absolute_import
,
division
,
print_function
from
..compat
import
*
from
..utilities
import
NiftyMetaBase
class
IterationController
(
NiftyMetaBase
()):
"""The abstract base class for all iteration controllers.
An iteration controller is an object that monitors the progress of a
minimization iteration. At the begin of the minimization, its start()
method is called with the energy object at the initial position.
Afterwards, its check() method is called during every iteration step with
the energy object describing the current position.
Based on that information, the iteration controller has to decide whether
iteration needs to progress further (in this case it returns CONTINUE), or
if sufficient convergence has been reached (in this case it returns
CONVERGED), or if some error has been detected (then it returns ERROR).
The concrete convergence criteria can be chosen by inheriting from this
class; the implementer has full flexibility to use whichever criteria are
appropriate for a particular problem - as long as they can be computed from
the information passed to the controller during the iteration process.
"""
CONVERGED
,
CONTINUE
,
ERROR
=
list
(
range
(
3
))
def
start
(
self
,
energy
):
"""Starts the iteration.
Parameters
----------
energy : Energy object
Energy object at the start of the iteration
Returns
-------
status : integer status, can be CONVERGED, CONTINUE or ERROR
"""
raise
NotImplementedError
def
check
(
self
,
energy
):
"""Checks the state of the iteration. Called after every step.
Parameters
----------
energy : Energy object
Energy object at the start of the iteration
Returns
-------
status : integer status, can be CONVERGED, CONTINUE or ERROR
"""
raise
NotImplementedError
nifty5/minimization/
gradient_norm
_controller.py
→
nifty5/minimization/
iteration
_controller
s
.py
View file @
e3e42447
...
...
@@ -20,7 +20,56 @@ from __future__ import absolute_import, division, print_function
from
..compat
import
*
from
..logger
import
logger
from
.iteration_controller
import
IterationController
from
..utilities
import
NiftyMetaBase
class
IterationController
(
NiftyMetaBase
()):
"""The abstract base class for all iteration controllers.
An iteration controller is an object that monitors the progress of a
minimization iteration. At the begin of the minimization, its start()
method is called with the energy object at the initial position.
Afterwards, its check() method is called during every iteration step with
the energy object describing the current position.
Based on that information, the iteration controller has to decide whether
iteration needs to progress further (in this case it returns CONTINUE), or
if sufficient convergence has been reached (in this case it returns
CONVERGED), or if some error has been detected (then it returns ERROR).
The concrete convergence criteria can be chosen by inheriting from this
class; the implementer has full flexibility to use whichever criteria are
appropriate for a particular problem - as long as they can be computed from
the information passed to the controller during the iteration process.
"""
CONVERGED
,
CONTINUE
,
ERROR
=
list
(
range
(
3
))
def
start
(
self
,
energy
):
"""Starts the iteration.
Parameters
----------
energy : Energy object
Energy object at the start of the iteration
Returns
-------
status : integer status, can be CONVERGED, CONTINUE or ERROR
"""
raise
NotImplementedError
def
check
(
self
,
energy
):
"""Checks the state of the iteration. Called after every step.
Parameters
----------
energy : Energy object
Energy object at the start of the iteration
Returns
-------
status : integer status, can be CONVERGED, CONTINUE or ERROR
"""
raise
NotImplementedError
class
GradientNormController
(
IterationController
):
...
...
@@ -54,20 +103,6 @@ class GradientNormController(IterationController):
self
.
_name
=
name
def
start
(
self
,
energy
):
""" Start a new iteration.
The iteration and convergence counters are set to 0.
Parameters
----------
energy : Energy
The energy functional to be minimized.
Returns
-------
int : iteration status
can be CONVERGED or CONTINUE
"""
self
.
_itcount
=
-
1
self
.
_ccount
=
0
if
self
.
_tol_rel_gradnorm
is
not
None
:
...
...
@@ -76,27 +111,6 @@ class GradientNormController(IterationController):
return
self
.
check
(
energy
)
def
check
(
self
,
energy
):
""" Check for convergence.
- Increase the iteration counter by 1.
- If any of the convergence criteria are fulfilled, increase the
convergence counter by 1; else decrease it by 1 (but not below 0).
- If the convergence counter exceeds the convergence level, return
CONVERGED.
- If the iteration counter exceeds the iteration limit, return
CONVERGED.
- Otherwise return CONTINUE.
Parameters
----------
energy : Energy
The current solution estimate
Returns
-------
int : iteration status
can be CONVERGED or CONTINUE
"""