Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
8fcb0150
Commit
8fcb0150
authored
Jul 08, 2017
by
Theo Steininger
Browse files
Fixed minimizers and their tests.
parent
668c15f7
Pipeline
#14513
passed with stage
in 5 minutes and 53 seconds
Changes
9
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
nifty/minimization/descent_minimizer.py
View file @
8fcb0150
...
@@ -156,24 +156,20 @@ class DescentMinimizer(Loggable, object):
...
@@ -156,24 +156,20 @@ class DescentMinimizer(Loggable, object):
pk
=
descend_direction
,
pk
=
descend_direction
,
f_k_minus_1
=
f_k_minus_1
)
f_k_minus_1
=
f_k_minus_1
)
f_k_minus_1
=
energy
.
value
f_k_minus_1
=
energy
.
value
# check if new energy value is bigger than old energy value
# check if new energy value is bigger than old energy value
if
(
new_energy
.
value
-
energy
.
value
)
>
0
:
if
(
new_energy
.
value
-
energy
.
value
)
>
0
:
self
.
logger
.
info
(
"Line search algorithm was unsuccessful. "
self
.
logger
.
info
(
"Line search algorithm returned a new energy "
"Stopping."
)
"that was larger than the old one. Stopping."
)
new_energy
=
energy
.
at
(
position
=
energy
.
position
+
np
.
random
.
rand
()
/
10
*
(
new_energy
.
position
-
energy
.
position
)
)
energy
=
new_energy
convergence
=
self
.
convergence_level
+
2
break
break
energy
=
new_energy
energy
=
new_energy
# check convergence
# check convergence
delta
=
abs
(
gradient
).
max
()
*
(
step_length
/
gradient_norm
)
delta
=
abs
(
gradient
).
max
()
*
(
step_length
/
gradient_norm
)
self
.
logger
.
debug
(
"Iteration : %08u step_length = %3.1E "
self
.
logger
.
debug
(
"Iteration:%08u step_length=%3.1E "
"delta = %3.1E"
%
"delta=%3.1E energy=%3.1E"
%
(
iteration_number
,
step_length
,
delta
))
(
iteration_number
,
step_length
,
delta
,
energy
.
value
))
if
delta
==
0
:
if
delta
==
0
:
convergence
=
self
.
convergence_level
+
2
convergence
=
self
.
convergence_level
+
2
self
.
logger
.
info
(
"Found minimum according to line-search. "
self
.
logger
.
info
(
"Found minimum according to line-search. "
...
...
nifty/minimization/steepest_descent.py
View file @
8fcb0150
...
@@ -40,8 +40,4 @@ class SteepestDescent(DescentMinimizer):
...
@@ -40,8 +40,4 @@ class SteepestDescent(DescentMinimizer):
"""
"""
descend_direction
=
energy
.
gradient
descend_direction
=
energy
.
gradient
norm
=
descend_direction
.
norm
()
return
descend_direction
*
-
1
if
norm
!=
1
:
return
descend_direction
/
-
norm
else
:
return
descend_direction
*
-
1
nifty/minimization/vl_bfgs.py
View file @
8fcb0150
...
@@ -25,7 +25,7 @@ from .line_searching import LineSearchStrongWolfe
...
@@ -25,7 +25,7 @@ from .line_searching import LineSearchStrongWolfe
class
VL_BFGS
(
DescentMinimizer
):
class
VL_BFGS
(
DescentMinimizer
):
def
__init__
(
self
,
line_searcher
=
LineSearchStrongWolfe
(),
callback
=
None
,
def
__init__
(
self
,
line_searcher
=
LineSearchStrongWolfe
(),
callback
=
None
,
convergence_tolerance
=
1E-4
,
convergence_level
=
3
,
convergence_tolerance
=
1E-4
,
convergence_level
=
3
,
iteration_limit
=
None
,
max_history_length
=
10
):
iteration_limit
=
None
,
max_history_length
=
5
):
super
(
VL_BFGS
,
self
).
__init__
(
super
(
VL_BFGS
,
self
).
__init__
(
line_searcher
=
line_searcher
,
line_searcher
=
line_searcher
,
...
@@ -84,9 +84,6 @@ class VL_BFGS(DescentMinimizer):
...
@@ -84,9 +84,6 @@ class VL_BFGS(DescentMinimizer):
for
i
in
xrange
(
1
,
len
(
delta
)):
for
i
in
xrange
(
1
,
len
(
delta
)):
descend_direction
+=
delta
[
i
]
*
b
[
i
]
descend_direction
+=
delta
[
i
]
*
b
[
i
]
norm
=
descend_direction
.
norm
()
if
norm
!=
1
:
descend_direction
/=
norm
return
descend_direction
return
descend_direction
...
...
test/test_minimization/quadratic_potential.py
0 → 100644
View file @
8fcb0150
# -*- coding: utf-8 -*-
from
nifty
import
Energy
class
QuadraticPotential
(
Energy
):
def
__init__
(
self
,
position
,
eigenvalues
):
super
(
QuadraticPotential
,
self
).
__init__
(
position
)
self
.
eigenvalues
=
eigenvalues
def
at
(
self
,
position
):
return
self
.
__class__
(
position
,
eigenvalues
=
self
.
eigenvalues
)
@
property
def
value
(
self
):
H
=
0.5
*
self
.
position
.
vdot
(
self
.
eigenvalues
(
self
.
position
))
return
H
.
real
@
property
def
gradient
(
self
):
g
=
self
.
eigenvalues
(
self
.
position
)
return
g
@
property
def
curvature
(
self
):
return
self
.
eigenvalues
test/test_minimization/test_conjugate_gradient.py
View file @
8fcb0150
import
unittest
import
unittest
import
numpy
as
np
from
numpy.testing
import
assert_equal
,
assert_almost_equal
from
numpy.testing
import
assert_equal
,
assert_almost_equal
from
nifty
import
*
from
nifty
import
Field
,
DiagonalOperator
,
RGSpace
,
HPSpace
from
nifty
import
ConjugateGradient
from
itertools
import
product
from
test.common
import
expand
from
test.common
import
expand
from
test.common
import
generate_spaces
np
.
random
.
seed
(
42
)
class
ConjugateGradient_Tests
(
unittest
.
TestCase
):
spaces
=
generate_spaces
()
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x0
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
A
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
b
=
Field
(
space
,
val
=
0.
)
minimizer
=
ConjugateGradient
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
position
,
convergence
)
=
minimizer
(
A
=
A
,
x0
=
x0
,
b
=
b
)
if
position
.
domain
[
0
]
!=
space
:
raise
TypeError
if
type
(
convergence
)
!=
int
:
raise
TypeError
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x0
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
test_x
=
Field
(
space
,
val
=
1.
)
A
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
b
=
Field
(
space
,
val
=
1.
)
minimizer
=
ConjugateGradient
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
position
,
convergence
)
=
minimizer
(
A
=
A
,
x0
=
x0
,
b
=
b
)
assert_almost_equal
(
position
.
val
.
get_full_data
(),
test_x
.
val
.
get_full_data
(),
decimal
=
3
)
assert_equal
(
convergence
,
convergence_level
+
1
)
spaces
=
[
RGSpace
([
1024
,
1024
],
distances
=
0.123
),
HPSpace
(
32
)]
class
Test_ConjugateGradient
(
unittest
.
TestCase
):
def
test_interface
(
self
):
iteration_limit
=
100
convergence_level
=
4
convergence_tolerance
=
1E-6
callback
=
lambda
z
:
z
minimizer
=
ConjugateGradient
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
,
callback
=
callback
)
assert_equal
(
minimizer
.
iteration_limit
,
iteration_limit
)
assert_equal
(
minimizer
.
convergence_level
,
convergence_level
)
assert_equal
(
minimizer
.
convergence_tolerance
,
convergence_tolerance
)
assert
(
minimizer
.
callback
is
callback
)
@
expand
([[
space
]
for
space
in
spaces
])
def
test_minimization
(
self
,
space
):
np
.
random
.
seed
(
42
)
starting_point
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
*
10
covariance_diagonal
=
Field
.
from_random
(
'uniform'
,
domain
=
space
)
+
0.5
covariance
=
DiagonalOperator
(
space
,
diagonal
=
covariance_diagonal
)
required_result
=
Field
(
space
,
val
=
1.
)
minimizer
=
ConjugateGradient
()
(
position
,
convergence
)
=
minimizer
(
A
=
covariance
,
x0
=
starting_point
,
b
=
required_result
)
assert_almost_equal
(
position
.
val
.
get_full_data
(),
1.
/
covariance_diagonal
.
val
.
get_full_data
(),
decimal
=
3
)
test/test_minimization/test_descent_minimizers.py
0 → 100644
View file @
8fcb0150
import
unittest
import
numpy
as
np
from
numpy.testing
import
assert_equal
,
assert_almost_equal
from
nifty
import
Field
,
DiagonalOperator
,
RGSpace
,
HPSpace
from
nifty
import
SteepestDescent
,
RelaxedNewton
,
VL_BFGS
from
itertools
import
product
from
test.common
import
expand
from
quadratic_potential
import
QuadraticPotential
from
nifty
import
logger
minimizers
=
[
SteepestDescent
,
RelaxedNewton
,
VL_BFGS
]
spaces
=
[
RGSpace
([
1024
,
1024
],
distances
=
0.123
),
HPSpace
(
32
)]
class
Test_DescentMinimizers
(
unittest
.
TestCase
):
@
expand
([[
minimizer
]
for
minimizer
in
minimizers
])
def
test_interface
(
self
,
minimizer
):
iteration_limit
=
100
convergence_level
=
4
convergence_tolerance
=
1E-6
callback
=
lambda
z
:
z
minimizer
=
minimizer
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
,
callback
=
callback
)
assert_equal
(
minimizer
.
iteration_limit
,
iteration_limit
)
assert_equal
(
minimizer
.
convergence_level
,
convergence_level
)
assert_equal
(
minimizer
.
convergence_tolerance
,
convergence_tolerance
)
assert
(
minimizer
.
callback
is
callback
)
@
expand
(
product
(
minimizers
,
spaces
))
def
test_minimization
(
self
,
minimizer_class
,
space
):
np
.
random
.
seed
(
42
)
starting_point
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
*
10
covariance_diagonal
=
Field
.
from_random
(
'uniform'
,
domain
=
space
)
+
0.5
covariance
=
DiagonalOperator
(
space
,
diagonal
=
covariance_diagonal
)
energy
=
QuadraticPotential
(
position
=
starting_point
,
eigenvalues
=
covariance
)
minimizer
=
minimizer_class
(
iteration_limit
=
30
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
assert_almost_equal
(
energy
.
value
,
0
,
decimal
=
5
)
assert_almost_equal
(
energy
.
position
.
val
.
get_full_data
(),
0.
,
decimal
=
5
)
test/test_minimization/test_relaxed_newton.py
deleted
100644 → 0
View file @
668c15f7
import
unittest
from
numpy.testing
import
assert_equal
,
assert_almost_equal
from
nifty
import
*
from
itertools
import
product
from
test.common
import
expand
from
test.common
import
generate_spaces
np
.
random
.
seed
(
42
)
class
QuadraticPot
(
Energy
):
def
__init__
(
self
,
position
,
N
):
super
(
QuadraticPot
,
self
).
__init__
(
position
)
self
.
N
=
N
def
at
(
self
,
position
):
return
self
.
__class__
(
position
,
N
=
self
.
N
)
@
property
def
value
(
self
):
H
=
0.5
*
self
.
position
.
dot
(
self
.
N
.
inverse_times
(
self
.
position
))
return
H
.
real
@
property
def
gradient
(
self
):
g
=
self
.
N
.
inverse_times
(
self
.
position
)
return_g
=
g
.
copy_empty
(
dtype
=
np
.
float
)
return_g
.
val
=
g
.
val
.
real
return
return_g
@
property
def
curvature
(
self
):
return
self
.
N
class
RelaxedNewton_Tests
(
unittest
.
TestCase
):
spaces
=
generate_spaces
()
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
N
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
energy
=
QuadraticPot
(
position
=
x
,
N
=
N
)
minimizer
=
RelaxedNewton
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
if
energy
.
position
.
domain
[
0
]
!=
space
:
raise
TypeError
if
type
(
convergence
)
!=
int
:
raise
TypeError
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
test_x
=
Field
(
space
,
val
=
0.
)
N
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
energy
=
QuadraticPot
(
position
=
x
,
N
=
N
)
minimizer
=
RelaxedNewton
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
assert_almost_equal
(
energy
.
value
,
0
,
decimal
=
3
)
assert_almost_equal
(
energy
.
position
.
val
.
get_full_data
(),
test_x
.
val
.
get_full_data
(),
decimal
=
3
)
assert_equal
(
convergence
,
convergence_level
+
2
)
test/test_minimization/test_steepest_descent.py
deleted
100644 → 0
View file @
668c15f7
import
unittest
from
numpy.testing
import
assert_equal
,
assert_almost_equal
from
nifty
import
*
from
itertools
import
product
from
test.common
import
expand
from
test.common
import
generate_spaces
np
.
random
.
seed
(
42
)
class
QuadraticPot
(
Energy
):
def
__init__
(
self
,
position
,
N
):
super
(
QuadraticPot
,
self
).
__init__
(
position
)
self
.
N
=
N
def
at
(
self
,
position
):
return
self
.
__class__
(
position
,
N
=
self
.
N
)
@
property
def
value
(
self
):
H
=
0.5
*
self
.
position
.
dot
(
self
.
N
.
inverse_times
(
self
.
position
))
return
H
.
real
@
property
def
gradient
(
self
):
g
=
self
.
N
.
inverse_times
(
self
.
position
)
return_g
=
g
.
copy_empty
(
dtype
=
np
.
float
)
return_g
.
val
=
g
.
val
.
real
return
return_g
@
property
def
curvature
(
self
):
return
self
.
N
class
SteepestDescent_Tests
(
unittest
.
TestCase
):
spaces
=
generate_spaces
()
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
N
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
energy
=
QuadraticPot
(
position
=
x
,
N
=
N
)
minimizer
=
SteepestDescent
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
if
energy
.
position
.
domain
[
0
]
!=
space
:
raise
TypeError
if
type
(
convergence
)
!=
int
:
raise
TypeError
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
test_x
=
Field
(
space
,
val
=
0.
)
N
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
energy
=
QuadraticPot
(
position
=
x
,
N
=
N
)
minimizer
=
SteepestDescent
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
assert_almost_equal
(
energy
.
value
,
0
,
decimal
=
3
)
assert_almost_equal
(
energy
.
position
.
val
.
get_full_data
(),
test_x
.
val
.
get_full_data
(),
decimal
=
3
)
assert_equal
(
convergence
,
convergence_level
+
2
)
test/test_minimization/test_vl_bfgs.py
deleted
100644 → 0
View file @
668c15f7
import
unittest
from
numpy.testing
import
assert_equal
,
assert_almost_equal
from
nifty
import
*
from
itertools
import
product
from
test.common
import
expand
from
test.common
import
generate_spaces
np
.
random
.
seed
(
42
)
class
QuadraticPot
(
Energy
):
def
__init__
(
self
,
position
,
N
):
super
(
QuadraticPot
,
self
).
__init__
(
position
)
self
.
N
=
N
def
at
(
self
,
position
):
return
self
.
__class__
(
position
,
N
=
self
.
N
)
@
property
def
value
(
self
):
H
=
0.5
*
self
.
position
.
dot
(
self
.
N
.
inverse_times
(
self
.
position
))
return
H
.
real
@
property
def
gradient
(
self
):
g
=
self
.
N
.
inverse_times
(
self
.
position
)
return_g
=
g
.
copy_empty
(
dtype
=
np
.
float
)
return_g
.
val
=
g
.
val
.
real
return
return_g
@
property
def
curvature
(
self
):
return
self
.
N
class
VL_BFGS_Tests
(
unittest
.
TestCase
):
spaces
=
generate_spaces
()
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
N
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
energy
=
QuadraticPot
(
position
=
x
,
N
=
N
)
minimizer
=
VL_BFGS
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
if
energy
.
position
.
domain
[
0
]
!=
space
:
raise
TypeError
if
type
(
convergence
)
!=
int
:
raise
TypeError
@
expand
(
product
(
spaces
,
[
10
,
100
,
1000
],
[
1E-3
,
1E-4
,
1E-5
],
[
2
,
3
,
4
]
))
def
test_property
(
self
,
space
,
iteration_limit
,
convergence_tolerance
,
convergence_level
):
x
=
Field
.
from_random
(
'normal'
,
domain
=
space
)
test_x
=
Field
(
space
,
val
=
0.
)
N
=
DiagonalOperator
(
space
,
diagonal
=
1.
)
energy
=
QuadraticPot
(
position
=
x
,
N
=
N
)
minimizer
=
VL_BFGS
(
iteration_limit
=
iteration_limit
,
convergence_tolerance
=
convergence_tolerance
,
convergence_level
=
convergence_level
)
(
energy
,
convergence
)
=
minimizer
(
energy
)
assert_almost_equal
(
energy
.
value
,
0
,
decimal
=
3
)
assert_almost_equal
(
energy
.
position
.
val
.
get_full_data
(),
test_x
.
val
.
get_full_data
(),
decimal
=
3
)
assert_equal
(
convergence
,
convergence_level
+
2
)
Martin Reinecke
@mtr
mentioned in issue
#160 (closed)
·
Jul 08, 2017
mentioned in issue
#160 (closed)
mentioned in issue #160
Toggle commit list
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment