Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
15f09ea4
Commit
15f09ea4
authored
Oct 14, 2016
by
theos
Browse files
Implemented ConjugateGradient and PropagatorOperator. Still not tested.
parent
7ad6bc06
Changes
2
Show whitespace changes
Inline
Side-by-side
nifty/minimization/conjugate_gradient.py
View file @
15f09ea4
# -*- coding: utf-8 -*-
from
__future__
import
division
import
numpy
as
np
from
nifty
import
Field
import
logging
logger
=
logging
.
getLogger
(
'NIFTy.CG'
)
class
ConjugateGradient
(
object
):
pass
def
__init__
(
self
,
convergence_tolerance
=
1E-4
,
convergence_level
=
1
,
iteration_limit
=-
1
,
reset_count
=-
1
,
preconditioner
=
None
,
callback
=
None
):
"""
Initializes the conjugate_gradient and sets the attributes (except
for `x`).
Parameters
----------
A : {operator, function}
Operator `A` applicable to a field.
b : field
Resulting field of the operation `A(x)`.
W : {operator, function}, *optional*
Operator `W` that is a preconditioner on `A` and is applicable to a
field (default: None).
spam : function, *optional*
Callback function which is given the current `x` and iteration
counter each iteration (default: None).
reset : integer, *optional*
Number of iterations after which to restart; i.e., forget previous
conjugated directions (default: sqrt(b.dim)).
note : bool, *optional*
Indicates whether notes are printed or not (default: False).
"""
self
.
convergence_tolerance
=
np
.
float
(
convergence_tolerance
)
self
.
convergence_level
=
np
.
float
(
convergence_level
)
self
.
iteration_limit
=
int
(
iteration_limit
)
self
.
reset_count
=
int
(
reset_count
)
if
preconditioner
is
None
:
preconditioner
=
lambda
z
:
z
self
.
preconditioner
=
preconditioner
self
.
callback
=
callback
def
__call__
(
self
,
A
,
b
,
x0
=
None
):
"""
Runs the conjugate gradient minimization.
Parameters
----------
x0 : field, *optional*
Starting guess for the minimization.
tol : scalar, *optional*
Tolerance specifying convergence; measured by current relative
residual (default: 1E-4).
clevel : integer, *optional*
Number of times the tolerance should be undershot before
exiting (default: 1).
limii : integer, *optional*
Maximum number of iterations performed (default: 10 * b.dim).
Returns
-------
x : field
Latest `x` of the minimization.
convergence : integer
Latest convergence level indicating whether the minimization
has converged or not.
"""
if
x0
is
None
:
x0
=
Field
(
A
.
domain
,
val
=
0
)
r
=
b
-
A
(
x0
)
d
=
self
.
preconditioner
(
r
)
previous_gamma
=
r
.
dot
(
d
)
if
previous_gamma
==
0
:
self
.
info
(
"The starting guess is already perfect solution for "
"the inverse problem."
)
return
x0
,
self
.
convergence_level
+
1
x
=
x0
convergence
=
0
iteration_number
=
1
logger
.
info
(
"Starting conjugate gradient."
)
while
(
True
):
if
self
.
callback
is
not
None
:
self
.
callback
(
x
,
iteration_number
)
q
=
A
(
d
)
alpha
=
previous_gamma
/
d
.
dot
(
q
)
if
not
np
.
isfinite
(
alpha
):
logger
.
error
(
"Alpha became infinite! Stopping."
)
return
x0
,
0
x
+=
d
*
alpha
reset
=
False
if
alpha
.
real
<
0
:
logger
.
warn
(
"Positive definiteness of A violated!"
)
reset
=
True
if
reset
or
iteration_number
%
self
.
reset_count
==
0
:
logger
.
info
(
"Resetting conjugate directions."
)
r
=
b
-
A
(
x
)
else
:
r
-=
q
*
alpha
s
=
self
.
preconditioner
(
r
)
gamma
=
r
.
dot
(
s
)
if
gamma
.
real
<
0
:
logger
.
warn
(
"Positive definitness of preconditioner violated!"
)
beta
=
max
(
0
,
gamma
/
previous_gamma
)
d
=
s
+
d
*
beta
#delta = previous_delta * np.sqrt(abs(gamma))
delta
=
abs
(
1
-
np
.
sqrt
(
abs
(
previous_gamma
))
/
np
.
sqrt
(
abs
(
gamma
)))
logger
.
debug
(
"Iteration : %08u alpha = %3.1E beta = %3.1E "
"delta = %3.1E"
%
(
iteration_number
,
np
.
real
(
alpha
),
np
.
real
(
beta
),
np
.
real
(
delta
)))
if
gamma
==
0
:
convergence
=
self
.
convergence_level
+
1
self
.
info
(
"Reached infinite convergence."
)
break
elif
abs
(
delta
)
<
self
.
convergence_tolerance
:
convergence
+=
1
self
.
info
(
"Updated convergence level to: %u"
%
convergence
)
if
convergence
==
self
.
convergence_level
:
self
.
info
(
"Reached target convergence level."
)
break
else
:
convergence
=
max
(
0
,
convergence
-
1
)
if
iteration_number
==
self
.
iteration_limit
:
self
.
warn
(
"Reached iteration limit. Stopping."
)
break
iteration_number
+=
1
previous_gamma
=
gamma
return
x
,
convergence
nifty/operators/propagator_operator/propagator_operator.py
View file @
15f09ea4
# -*- coding: utf-8 -*-
from
nifty.minimization
import
ConjugateGradient
from
nifty.operators.linear_operator
import
LinearOperator
from
nifty.nifty_utilities
import
get_default_codomain
from
nifty.operators
import
EndomorphicOperator
,
\
FFTOperator
import
logging
logger
=
logging
.
getLogger
(
'NIFTy.PropagatorOperator'
)
class
PropagatorOperator
(
LinearOperator
):
"""
.. __
.. / /_
.. _______ _____ ______ ______ ____ __ ____ __ ____ __ / _/ ______ _____
.. / _ / / __/ / _ | / _ | / _ / / _ / / _ / / / / _ | / __/
.. / /_/ / / / / /_/ / / /_/ / / /_/ / / /_/ / / /_/ / / /_ / /_/ / / /
.. / ____/ /__/ \______/ / ____/ \______| \___ / \______| \___/ \______/ /__/ operator class
.. /__/ /__/ /______/
NIFTY subclass for propagator operators (of a certain family)
The propagator operators :math:`D` implemented here have an inverse
formulation like :math:`(S^{-1} + M)`, :math:`(S^{-1} + N^{-1})`, or
:math:`(S^{-1} + R^\dagger N^{-1} R)` as appearing in Wiener filter
theory.
Parameters
----------
S : operator
Covariance of the signal prior.
M : operator
Likelihood contribution.
R : operator
Response operator translating signal to (noiseless) data.
N : operator
Covariance of the noise prior or the likelihood, respectively.
See Also
--------
conjugate_gradient
Notes
-----
The propagator will puzzle the operators `S` and `M` or `R`, `N` or
only `N` together in the predefined from, a domain is set
automatically. The application of the inverse is done by invoking a
conjugate gradient.
Note that changes to `S`, `M`, `R` or `N` auto-update the propagator.
Examples
--------
>>> f = field(rg_space(4), val=[2, 4, 6, 8])
>>> S = power_operator(f.target, spec=1)
>>> N = diagonal_operator(f.domain, diag=1)
>>> D = propagator_operator(S=S, N=N) # D^{-1} = S^{-1} + N^{-1}
>>> D(f).val
array([ 1., 2., 3., 4.])
Attributes
----------
domain : space
A space wherein valid arguments live.
codomain : space
An alternative space wherein valid arguments live; commonly the
codomain of the `domain` attribute.
sym : bool
Indicates that the operator is self-adjoint.
uni : bool
Indicates that the operator is not unitary.
imp : bool
Indicates that volume weights are implemented in the `multiply`
instance methods.
target : space
The space wherein the operator output lives.
_A1 : {operator, function}
Application of :math:`S^{-1}` to a field.
_A2 : {operator, function}
Application of all operations not included in `A1` to a field.
RN : {2-tuple of operators}, *optional*
Contains `R` and `N` if given.
"""
class
PropagatorOperator
(
EndomorphicOperator
):
# ---Overwritten properties and methods---
def
__init__
(
self
,
S
=
None
,
M
=
None
,
R
=
None
,
N
=
None
,
inverter
=
None
):
def
__init__
(
self
,
S
=
None
,
M
=
None
,
R
=
None
,
N
=
None
,
inverter
=
None
,
preconditioner
=
None
):
"""
Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
and `RN` if required.
...
...
@@ -100,49 +30,53 @@ class PropagatorOperator(LinearOperator):
Covariance of the noise prior or the likelihood, respectively.
"""
# infer domain, and target
if
M
is
not
None
:
self
.
_domain
=
M
.
domain
self
.
_likelihood_times
=
M
.
times
elif
N
is
None
:
raise
ValueError
(
"Either M or N must be given!"
)
elif
R
is
not
None
:
self
.
_domain
=
R
.
domain
fft_RN
=
FFTOperator
(
self
.
_domain
,
target
=
N
.
domain
)
self
.
_likelihood_times
=
\
lambda
z
:
R
.
adjoint_times
(
fft_RN
.
inverse_times
(
N
.
inverse_times
(
fft_RN
(
R
.
times
(
z
)))))
else
:
self
.
_domain
=
(
get_default_codomain
(
N
.
domain
[
0
]),)
fft_RN
=
FFTOperator
(
self
.
_domain
,
target
=
N
.
domain
)
self
.
_likelihood_times
=
\
lambda
z
:
fft_RN
.
inverse_times
(
N
.
inverse_times
(
fft_RN
(
z
)))
fft_S
=
FFTOperator
(
S
.
domain
,
self
.
_domain
)
self
.
_S_times
=
lambda
z
:
fft_S
.
inverse_times
(
S
(
fft_S
(
z
)))
self
.
_S_inverse_times
=
lambda
z
:
fft_S
.
inverse_times
(
S
.
inverse_times
(
fft_S
(
z
)))
self
.
S
=
S
self
.
S_inverse_times
=
self
.
S
.
inverse_times
# build up the likelihood contribution
(
self
.
M_times
,
M_domain
,
M_field_type
,
M_target
,
M_field_type_target
)
=
self
.
_build_likelihood_contribution
(
M
,
R
,
N
)
# assert that S and M have matching domains
if
not
(
self
.
domain
==
M_domain
and
self
.
field_type
==
M_target
and
self
.
target
==
M_target
and
self
.
field_type_target
==
M_field_type_target
):
raise
ValueError
(
"The domains and targets of the prior "
+
"signal covariance and the likelihood contribution must be "
+
"the same in the sense of '=='."
)
if
preconditioner
is
None
:
preconditioner
=
self
.
_S_times
self
.
preconditioner
=
preconditioner
if
inverter
is
not
None
:
self
.
inverter
=
inverter
else
:
self
.
inverter
=
ConjugateGradient
()
self
.
inverter
=
ConjugateGradient
(
preconditioner
=
self
.
preconditioner
)
# ---Mandatory properties and methods---
@
property
def
domain
(
self
):
return
self
.
S
.
domain
return
self
.
_
domain
@
property
def
field_type
(
self
):
return
self
.
S
.
field_type
@
property
def
target
(
self
):
return
self
.
S
.
target
@
property
def
field_type_target
(
self
):
return
self
.
S
.
field_type_target
return
()
@
property
def
implemented
(
self
):
...
...
@@ -158,45 +92,11 @@ class PropagatorOperator(LinearOperator):
# ---Added properties and methods---
def
_build_likelihood_contribution
(
self
,
M
,
R
,
N
):
# if a M is given, return its times method and its domains
# supplier and discard R and N
if
M
is
not
None
:
return
(
M
.
times
,
M
.
domain
,
M
.
field_type
,
M
.
target
,
M
.
cotarget
)
if
N
is
not
None
:
if
R
is
not
None
:
return
(
lambda
z
:
R
.
adjoint_times
(
N
.
inverse_times
(
R
.
times
(
z
))),
R
.
domain
,
R
.
field_type
,
R
.
domain
,
R
.
field_type
)
else
:
return
(
N
.
inverse_times
,
N
.
domain
,
N
.
field_type
,
N
.
target
,
N
.
field_type_target
)
else
:
raise
ValueError
(
"At least M or N must be given."
)
def
_multiply
(
self
,
x
,
W
=
None
,
spam
=
None
,
reset
=
None
,
note
=
False
,
x0
=
None
,
tol
=
1E-4
,
clevel
=
1
,
limii
=
None
,
**
kwargs
):
if
W
is
None
:
W
=
self
.
S
(
result
,
convergence
)
=
self
.
inverter
(
A
=
self
.
_inverse_multiply
,
b
=
x
,
W
=
W
,
spam
=
spam
,
reset
=
reset
,
note
=
note
,
x0
=
x0
,
tol
=
tol
,
clevel
=
clevel
,
limii
=
limii
)
# evaluate
if
not
convergence
:
logger
.
warn
(
"conjugate gradient failed."
)
def
_times
(
self
,
x
,
spaces
,
types
):
(
result
,
convergence
)
=
self
.
inverter
(
A
=
self
.
_inverse_times
,
b
=
x
)
return
result
def
_inverse_multiply
(
self
,
x
,
**
kwargs
):
result
=
self
.
S_inverse_times
(
x
)
result
+=
self
.
M
_times
(
x
)
result
=
self
.
_
S_inverse_times
(
x
)
result
+=
self
.
_likelihood
_times
(
x
)
return
result
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment