Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
74237b2f
Commit
74237b2f
authored
Jul 26, 2013
by
Marco Selig
Browse files
several minor fixes; version updated.
parent
2395b49a
Changes
4
Hide whitespace changes
Inline
Side-by-side
nifty_core.py
View file @
74237b2f
...
...
@@ -484,7 +484,7 @@ class _about(object): ## nifty support class for global settings
"""
## version
self
.
_version
=
"0.5.
0
"
self
.
_version
=
"0.5.
5
"
## switches and notifications
self
.
_errors
=
notification
(
default
=
True
,
ccode
=
notification
.
_code
)
...
...
@@ -730,9 +730,9 @@ class random(object):
spec
=
domain
.
enforce_power
(
kwargs
.
get
(
"spec"
,
1
),
size
=
len
(
kindex
),
kindex
=
kindex
,
codomain
=
codomain
)
kpack
=
[
codomain
.
power_indices
.
get
(
"pindex"
),
kindex
]
else
:
kindex
=
domain
.
power_indi
x
es
.
get
(
"kindex"
)
kindex
=
domain
.
power_indi
c
es
.
get
(
"kindex"
)
spec
=
domain
.
enforce_power
(
kwargs
.
get
(
"spec"
,
1
),
size
=
len
(
kindex
),
kindex
=
kindex
)
kpack
=
[
domain
.
power_indi
x
es
.
get
(
"pindex"
),
kindex
]
kpack
=
[
domain
.
power_indi
c
es
.
get
(
"pindex"
),
kindex
]
return
[
key
,
spec
,
kpack
]
elif
(
key
==
"uni"
):
...
...
@@ -1126,7 +1126,7 @@ class space(object):
Indexing with the unindexing array undoes the indexing with the
indexing array; i.e., ``power == power[pindex].flatten()[pundex]``.
See
a
lso
See
A
lso
--------
get_power_index
...
...
@@ -1161,7 +1161,7 @@ class space(object):
-------
None
See
a
lso
See
A
lso
--------
get_power_indices
...
...
@@ -1213,7 +1213,7 @@ class space(object):
Indexing with the unindexing array undoes the indexing with the
indexing array; i.e., ``power == power[pindex].flatten()[pundex]``.
See
a
lso
See
A
lso
--------
set_power_indices
...
...
@@ -1489,13 +1489,17 @@ class space(object):
Returns
-------
dot :
float
dot :
scalar
Inner product of the two arrays.
"""
x
=
self
.
enforce_shape
(
np
.
array
(
x
,
dtype
=
self
.
datatype
))
y
=
self
.
enforce_shape
(
np
.
array
(
y
,
dtype
=
self
.
datatype
))
## inner product
return
np
.
dot
(
np
.
conjugate
(
x
),
y
,
out
=
None
)
dot
=
np
.
dot
(
np
.
conjugate
(
x
),
y
,
out
=
None
)
if
(
np
.
isreal
(
dot
)):
return
np
.
real
(
dot
)
else
:
return
dot
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -2130,6 +2134,8 @@ class point_space(space):
if
(
bool
(
kwargs
.
get
(
"save"
,
False
))):
fig
.
savefig
(
str
(
kwargs
.
get
(
"save"
)),
dpi
=
None
,
facecolor
=
None
,
edgecolor
=
None
,
orientation
=
'portrait'
,
papertype
=
None
,
format
=
None
,
transparent
=
False
,
bbox_inches
=
None
,
pad_inches
=
0.1
)
pl
.
close
(
fig
)
else
:
fig
.
canvas
.
draw
()
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -2530,7 +2536,7 @@ class rg_space(space):
-------
None
See
a
lso
See
A
lso
--------
get_power_indices
...
...
@@ -2894,13 +2900,17 @@ class rg_space(space):
Returns
-------
dot :
float
dot :
scalar
Inner product of the two arrays.
"""
x
=
self
.
enforce_shape
(
np
.
array
(
x
,
dtype
=
self
.
datatype
))
y
=
self
.
enforce_shape
(
np
.
array
(
y
,
dtype
=
self
.
datatype
))
## inner product
return
np
.
dot
(
np
.
conjugate
(
x
.
flatten
(
order
=
'C'
)),
y
.
flatten
(
order
=
'C'
),
out
=
None
)
dot
=
np
.
dot
(
np
.
conjugate
(
x
.
flatten
(
order
=
'C'
)),
y
.
flatten
(
order
=
'C'
),
out
=
None
)
if
(
np
.
isreal
(
dot
)):
return
np
.
real
(
dot
)
else
:
return
dot
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -3311,6 +3321,8 @@ class rg_space(space):
if
(
bool
(
kwargs
.
get
(
"save"
,
False
))):
fig
.
savefig
(
str
(
kwargs
.
get
(
"save"
)),
dpi
=
None
,
facecolor
=
None
,
edgecolor
=
None
,
orientation
=
'portrait'
,
papertype
=
None
,
format
=
None
,
transparent
=
False
,
bbox_inches
=
None
,
pad_inches
=
0.1
)
pl
.
close
(
fig
)
else
:
fig
.
canvas
.
draw
()
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -3613,7 +3625,7 @@ class lm_space(space):
-------
None
See
a
lso
See
A
lso
--------
get_power_indices
...
...
@@ -3895,12 +3907,11 @@ class lm_space(space):
Returns
-------
dot :
float
dot :
scalar
Inner product of the two arrays.
"""
x
=
self
.
enforce_shape
(
np
.
array
(
x
,
dtype
=
self
.
datatype
))
y
=
self
.
enforce_shape
(
np
.
array
(
y
,
dtype
=
self
.
datatype
))
## inner product
if
(
self
.
datatype
==
np
.
complex64
):
return
gl
.
dotlm_f
(
x
,
y
,
lmax
=
self
.
para
[
0
],
mmax
=
self
.
para
[
1
])
...
...
@@ -4167,6 +4178,8 @@ class lm_space(space):
if
(
bool
(
kwargs
.
get
(
"save"
,
False
))):
fig
.
savefig
(
str
(
kwargs
.
get
(
"save"
)),
dpi
=
None
,
facecolor
=
None
,
edgecolor
=
None
,
orientation
=
'portrait'
,
papertype
=
None
,
format
=
None
,
transparent
=
False
,
bbox_inches
=
None
,
pad_inches
=
0.1
)
pl
.
close
(
fig
)
else
:
fig
.
canvas
.
draw
()
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -4836,6 +4849,8 @@ class gl_space(space):
if
(
bool
(
kwargs
.
get
(
"save"
,
False
))):
fig
.
savefig
(
str
(
kwargs
.
get
(
"save"
)),
dpi
=
None
,
facecolor
=
None
,
edgecolor
=
None
,
orientation
=
'portrait'
,
papertype
=
None
,
format
=
None
,
transparent
=
False
,
bbox_inches
=
None
,
pad_inches
=
0.1
)
pl
.
close
(
fig
)
else
:
fig
.
canvas
.
draw
()
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -5432,6 +5447,8 @@ class hp_space(space):
if
(
bool
(
kwargs
.
get
(
"save"
,
False
))):
fig
.
savefig
(
str
(
kwargs
.
get
(
"save"
)),
dpi
=
None
,
facecolor
=
None
,
edgecolor
=
None
,
orientation
=
'portrait'
,
papertype
=
None
,
format
=
None
,
transparent
=
False
,
bbox_inches
=
None
,
pad_inches
=
0.1
)
pl
.
close
(
fig
)
else
:
fig
.
canvas
.
draw
()
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -5900,13 +5917,17 @@ class nested_space(space):
Returns
-------
dot :
float
dot :
scalar
Inner product of the two arrays.
"""
x
=
self
.
enforce_shape
(
np
.
array
(
x
,
dtype
=
self
.
datatype
))
y
=
self
.
enforce_shape
(
np
.
array
(
y
,
dtype
=
self
.
datatype
))
## inner product
return
np
.
sum
(
np
.
conjugate
(
x
)
*
y
,
axis
=
None
,
dtype
=
None
,
out
=
None
)
dot
=
np
.
sum
(
np
.
conjugate
(
x
)
*
y
,
axis
=
None
,
dtype
=
None
,
out
=
None
)
if
(
np
.
isreal
(
dot
)):
return
np
.
real
(
dot
)
else
:
return
dot
def
calc_pseudo_dot
(
self
,
x
,
y
,
**
kwargs
):
"""
...
...
@@ -7074,7 +7095,7 @@ def cos(x):
cosx : {scalar, array, field}
Cosine of `x` to the specified base.
See
a
lso
See
A
lso
--------
sin
tan
...
...
@@ -7105,7 +7126,7 @@ def sin(x):
sinx : {scalar, array, field}
Sine of `x` to the specified base.
See
a
lso
See
A
lso
--------
cos
tan
...
...
@@ -7137,7 +7158,7 @@ def cosh(x):
coshx : {scalar, array, field}
cosh of `x` to the specified base.
See
a
lso
See
A
lso
--------
sinh
tanh
...
...
@@ -7169,7 +7190,7 @@ def sinh(x):
sinhx : {scalar, array, field}
sinh of `x` to the specified base.
See
a
lso
See
A
lso
--------
cosh
tanh
...
...
@@ -7201,7 +7222,7 @@ def tan(x):
tanx : {scalar, array, field}
Tangent of `x` to the specified base.
See
a
lso
See
A
lso
--------
cos
sin
...
...
@@ -7233,7 +7254,7 @@ def tanh(x):
tanhx : {scalar, array, field}
tanh of `x` to the specified base.
See
a
lso
See
A
lso
--------
cosh
sinh
...
...
@@ -7264,7 +7285,7 @@ def arccos(x):
arccosx : {scalar, array, field}
arccos of `x` to the specified base.
See
a
lso
See
A
lso
--------
arcsin
arctan
...
...
@@ -7296,7 +7317,7 @@ def arcsin(x):
arcsinx : {scalar, array, field}
Logarithm of `x` to the specified base.
See
a
lso
See
A
lso
--------
arccos
arctan
...
...
@@ -7328,7 +7349,7 @@ def arccosh(x):
arccoshx : {scalar, array, field}
arccos of `x` to the specified base.
See
a
lso
See
A
lso
--------
arcsinh
arctanh
...
...
@@ -7359,7 +7380,7 @@ def arcsinh(x):
arcsinhx : {scalar, array, field}
arcsinh of `x` to the specified base.
See
a
lso
See
A
lso
--------
arccosh
arctanh
...
...
@@ -7390,7 +7411,7 @@ def arctan(x):
arctanx : {scalar, array, field}
arctan of `x` to the specified base.
See
a
lso
See
A
lso
--------
arccos
arcsin
...
...
@@ -7421,7 +7442,7 @@ def arctanh(x):
arctanhx : {scalar, array, field}
arctanh of `x` to the specified base.
See
a
lso
See
A
lso
--------
arccosh
arcsinh
...
...
@@ -7479,7 +7500,7 @@ def exp(x):
expx : {scalar, array, field}
Exponential of `x` to the specified base.
See
a
lso
See
A
lso
--------
log
...
...
@@ -7511,7 +7532,7 @@ def log(x,base=None):
logx : {scalar, array, field}
Logarithm of `x` to the specified base.
See
a
lso
See
A
lso
--------
exp
...
...
@@ -8116,8 +8137,11 @@ class operator(object):
if
(
domain
is
None
):
domain
=
self
.
domain
diag
=
diagonal_probing
(
self
,
function
=
self
.
times
,
domain
=
domain
,
target
=
target
,
random
=
random
,
ncpu
=
ncpu
,
nrun
=
nrun
,
nper
=
nper
,
var
=
var
,
save
=
save
,
path
=
path
,
prefix
=
prefix
,
**
kwargs
)(
loop
=
loop
)
if
(
diag
is
None
):
about
.
warnings
.
cprint
(
"WARNING: 'NoneType' forwarded."
)
return
None
## weight if ...
if
(
not
domain
.
discrete
)
and
(
bare
):
el
if
(
not
domain
.
discrete
)
and
(
bare
):
if
(
isinstance
(
diag
,
tuple
)):
## diag == (diag,variance)
return
domain
.
calc_weight
(
diag
[
0
],
power
=-
1
),
domain
.
calc_weight
(
diag
[
1
],
power
=-
1
)
else
:
...
...
@@ -8193,8 +8217,11 @@ class operator(object):
if
(
domain
is
None
):
domain
=
self
.
target
diag
=
diagonal_probing
(
self
,
function
=
self
.
inverse_times
,
domain
=
domain
,
target
=
target
,
random
=
random
,
ncpu
=
ncpu
,
nrun
=
nrun
,
nper
=
nper
,
var
=
var
,
save
=
save
,
path
=
path
,
prefix
=
prefix
,
**
kwargs
)(
loop
=
loop
)
if
(
diag
is
None
):
about
.
warnings
.
cprint
(
"WARNING: 'NoneType' forwarded."
)
return
None
## weight if ...
if
(
not
domain
.
discrete
)
and
(
bare
):
el
if
(
not
domain
.
discrete
)
and
(
bare
):
if
(
isinstance
(
diag
,
tuple
)):
## diag == (diag,variance)
return
domain
.
calc_weight
(
diag
[
0
],
power
=-
1
),
domain
.
calc_weight
(
diag
[
1
],
power
=-
1
)
else
:
...
...
@@ -8290,7 +8317,10 @@ class operator(object):
"""
if
(
domain
is
None
):
domain
=
self
.
domain
return
field
(
domain
,
val
=
self
.
diag
(
bare
=
bare
,
domain
=
domain
,
target
=
target
,
var
=
False
,
**
kwargs
),
target
=
target
)
diag
=
self
.
diag
(
bare
=
bare
,
domain
=
domain
,
target
=
target
,
var
=
False
,
**
kwargs
)
if
(
diag
is
None
):
return
None
return
field
(
domain
,
val
=
diag
,
target
=
target
)
def
inverse_hat
(
self
,
bare
=
False
,
domain
=
None
,
target
=
None
,
**
kwargs
):
"""
...
...
@@ -8354,7 +8384,10 @@ class operator(object):
"""
if
(
domain
is
None
):
domain
=
self
.
target
return
field
(
domain
,
val
=
self
.
inverse_diag
(
bare
=
bare
,
domain
=
domain
,
target
=
target
,
var
=
False
,
**
kwargs
),
target
=
target
)
diag
=
self
.
inverse_diag
(
bare
=
bare
,
domain
=
domain
,
target
=
target
,
var
=
False
,
**
kwargs
)
if
(
diag
is
None
):
return
None
return
field
(
domain
,
val
=
diag
,
target
=
target
)
def
hathat
(
self
,
domain
=
None
,
**
kwargs
):
"""
...
...
@@ -8413,7 +8446,10 @@ class operator(object):
"""
if
(
domain
is
None
):
domain
=
self
.
domain
return
diagonal_operator
(
domain
=
domain
,
diag
=
self
.
diag
(
bare
=
False
,
domain
=
domain
,
var
=
False
,
**
kwargs
),
bare
=
False
)
diag
=
self
.
diag
(
bare
=
False
,
domain
=
domain
,
var
=
False
,
**
kwargs
)
if
(
diag
is
None
):
return
None
return
diagonal_operator
(
domain
=
domain
,
diag
=
diag
,
bare
=
False
)
def
inverse_hathat
(
self
,
domain
=
None
,
**
kwargs
):
"""
...
...
@@ -8473,7 +8509,10 @@ class operator(object):
"""
if
(
domain
is
None
):
domain
=
self
.
target
return
diagonal_operator
(
domain
=
domain
,
diag
=
self
.
inverse_diag
(
bare
=
False
,
domain
=
domain
,
var
=
False
,
**
kwargs
),
bare
=
False
)
diag
=
self
.
inverse_diag
(
bare
=
False
,
domain
=
domain
,
var
=
False
,
**
kwargs
)
if
(
diag
is
None
):
return
None
return
diagonal_operator
(
domain
=
domain
,
diag
=
diag
,
bare
=
False
)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
...
...
@@ -9740,6 +9779,8 @@ class projection_operator(operator):
if
(
isinstance
(
x
,
operator
)):
## compute non-bare diagonal of the operator x
x
=
x
.
diag
(
bare
=
False
,
domain
=
self
.
domain
,
target
=
x
.
domain
,
var
=
False
,
**
kwargs
)
if
(
x
is
None
):
raise
TypeError
(
about
.
_error
.
cstring
(
"ERROR: 'NoneType' encountered."
))
elif
(
isinstance
(
x
,
field
)):
## check domain
...
...
@@ -10573,6 +10614,7 @@ class probing(object):
"""
if
(
len
(
results
)
==
0
):
about
.
warnings
.
cprint
(
"WARNING: probing failed."
)
return
None
elif
(
self
.
var
):
return
np
.
mean
(
np
.
array
(
results
),
axis
=
0
,
dtype
=
None
,
out
=
None
),
np
.
var
(
np
.
array
(
results
),
axis
=
0
,
dtype
=
None
,
out
=
None
,
ddof
=
0
)
/
(
len
(
results
)
-
1
)
...
...
@@ -11279,6 +11321,7 @@ class diagonal_probing(probing):
"""
num
=
len
(
results
)
if
(
num
==
0
):
about
.
warnings
.
cprint
(
"WARNING: probing failed."
)
return
None
elif
(
self
.
save
is
None
):
if
(
self
.
var
):
...
...
nifty_power.py
View file @
74237b2f
...
...
@@ -401,7 +401,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
derived, and the implications of a certain choise of the perception
tuple (delta,epsilon) are discussed.
The further incorporation of a smoothness prior as detailed in [#]_,
where the underlying formula(s), Eq.(2
7
), of this implementation are
where the underlying formula(s), Eq.(2
6
), of this implementation are
derived and discussed in terms of their applicability.
References
...
...
nifty_tools.py
View file @
74237b2f
...
...
@@ -185,13 +185,14 @@ class invertible_operator(operator):
"""
x_
,
convergence
=
conjugate_gradient
(
self
.
inverse_times
,
x
,
W
=
W
,
spam
=
spam
,
reset
=
reset
,
note
=
note
)(
x0
=
x0
,
tol
=
tol
,
clevel
=
clevel
,
limii
=
limii
)
if
(
not
self
.
imp
):
## continiuos domain/target
x_
.
weight
(
power
=-
1
,
overwrite
=
True
)
## check convergence
if
(
not
convergence
):
if
(
not
force
):
if
(
not
force
)
or
(
x_
is
None
)
:
return
None
about
.
warnings
.
cprint
(
"WARNING: conjugate gradient failed."
)
## weight if ...
if
(
not
self
.
imp
):
## continiuos domain/target
x_
.
weight
(
power
=-
1
,
overwrite
=
True
)
return
x_
def
_inverse_multiply
(
self
,
x
,
force
=
False
,
W
=
None
,
spam
=
None
,
reset
=
None
,
note
=
False
,
x0
=
None
,
tol
=
1E-4
,
clevel
=
1
,
limii
=
None
,
**
kwargs
):
...
...
@@ -242,13 +243,14 @@ class invertible_operator(operator):
"""
x_
,
convergence
=
conjugate_gradient
(
self
.
times
,
x
,
W
=
W
,
spam
=
spam
,
reset
=
reset
,
note
=
note
)(
x0
=
x0
,
tol
=
tol
,
clevel
=
clevel
,
limii
=
limii
)
if
(
not
self
.
imp
):
## continiuos domain/target
x_
.
weight
(
power
=
1
,
overwrite
=
True
)
## check convergence
if
(
not
convergence
):
if
(
not
force
):
if
(
not
force
)
or
(
x_
is
None
)
:
return
None
about
.
warnings
.
cprint
(
"WARNING: conjugate gradient failed."
)
## weight if ...
if
(
not
self
.
imp
):
## continiuos domain/target
x_
.
weight
(
power
=
1
,
overwrite
=
True
)
return
x_
##-----------------------------------------------------------------------------
...
...
@@ -578,6 +580,10 @@ class conjugate_gradient(object):
note : bool, *optional*
Indicates whether notes are printed or not (default: False).
See Also
--------
scipy.sparse.linalg.cg
Notes
-----
After initialization by `__init__`, the minimizer is started by calling
...
...
@@ -585,10 +591,11 @@ class conjugate_gradient(object):
if enabled, will state the iteration number, current step widths
`alpha` and `beta`, the current relative residual `delta` that is
compared to the tolerance, and the convergence level if changed.
The minimizer will exit in two states: QUIT if the maximum number of
iterations is reached, or DONE if convergence is achieved. Returned
will be the latest `x` and the latest convergence level, which can
evaluate ``True`` for all exit states.
The minimizer will exit in three states: DEAD if alpha becomes
infinite, QUIT if the maximum number of iterations is reached, or DONE
if convergence is achieved. Returned will be the latest `x` and the
latest convergence level, which can evaluate ``True`` for the exit
states QUIT and DONE.
References
----------
...
...
@@ -714,12 +721,17 @@ class conjugate_gradient(object):
def
_calc_without
(
self
,
tol
=
1E-4
,
clevel
=
1
,
limii
=
None
):
## > runs cg without preconditioner
clevel
=
int
(
clevel
)
if
(
limii
is
None
):
limii
=
10
*
self
.
b
.
domain
.
dim
(
split
=
False
)
else
:
limii
=
int
(
limii
)
r
=
self
.
b
-
self
.
A
(
self
.
x
)
d
=
field
(
self
.
b
.
domain
,
val
=
np
.
copy
(
r
.
val
),
target
=
self
.
b
.
target
)
gamma
=
r
.
dot
(
d
)
if
(
gamma
==
0
):
return
self
.
x
,
clevel
+
1
delta_
=
np
.
absolute
(
gamma
)
**
(
-
0.5
)
convergence
=
0
...
...
@@ -727,6 +739,9 @@ class conjugate_gradient(object):
while
(
True
):
q
=
self
.
A
(
d
)
alpha
=
gamma
/
d
.
dot
(
q
)
## positive definite
if
(
not
np
.
isfinite
(
alpha
)):
self
.
note
.
cprint
(
"
\n
iteration : %08u alpha = NAN
\n
... dead."
%
ii
)
return
self
.
x
,
0
self
.
x
+=
alpha
*
d
if
(
ii
%
self
.
reset
==
0
)
or
(
np
.
signbit
(
np
.
real
(
alpha
))):
r
=
self
.
b
-
self
.
A
(
self
.
x
)
...
...
@@ -769,8 +784,11 @@ class conjugate_gradient(object):
def
_calc_with
(
self
,
tol
=
1E-4
,
clevel
=
1
,
limii
=
None
):
## > runs cg with preconditioner
clevel
=
int
(
clevel
)
if
(
limii
is
None
):
limii
=
10
*
self
.
b
.
domain
.
dim
(
split
=
False
)
else
:
limii
=
int
(
limii
)
r
=
self
.
b
-
self
.
A
(
self
.
x
)
d
=
self
.
W
(
r
)
...
...
@@ -782,6 +800,9 @@ class conjugate_gradient(object):
while
(
True
):
q
=
self
.
A
(
d
)
alpha
=
gamma
/
d
.
dot
(
q
)
## positive definite
if
(
not
np
.
isfinite
(
alpha
)):
self
.
note
.
cprint
(
"
\n
iteration : %08u alpha = NAN
\n
... dead."
%
ii
)
return
self
.
x
,
0
self
.
x
+=
alpha
*
d
## update
if
(
ii
%
self
.
reset
==
0
)
or
(
np
.
signbit
(
np
.
real
(
alpha
))):
r
=
self
.
b
-
self
.
A
(
self
.
x
)
...
...
@@ -861,6 +882,11 @@ class steepest_descent(object):
note : bool, *optional*
Indicates whether notes are printed or not (default: False).
See Also
--------
scipy.optimize.fmin_cg, scipy.optimize.fmin_ncg,
scipy.optimize.fmin_l_bfgs_b
Notes
-----
After initialization by `__init__`, the minimizer is started by calling
...
...
@@ -982,6 +1008,10 @@ class steepest_descent(object):
if
(
not
isinstance
(
x0
,
field
)):
raise
TypeError
(
about
.
_errors
.
cstring
(
"ERROR: invalid input."
))
self
.
x
=
x0
clevel
=
int
(
clevel
)
limii
=
int
(
limii
)
E
,
g
=
self
.
eggs
(
self
.
x
)
## energy and gradient
norm
=
g
.
norm
()
## gradient norm
...
...
setup.py
View file @
74237b2f
...
...
@@ -23,7 +23,7 @@ from distutils.core import setup
import
os
setup
(
name
=
"nifty"
,
version
=
"0.5.
0
"
,
version
=
"0.5.
5
"
,
description
=
"Numerical Information Field Theory"
,
author
=
"Marco Selig"
,
author_email
=
"mselig@mpa-garching.mpg.de"
,
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment