Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
NIFTy
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
13
Issues
13
List
Boards
Labels
Service Desk
Milestones
Merge Requests
13
Merge Requests
13
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ift
NIFTy
Commits
5bdcc718
Commit
5bdcc718
authored
Aug 02, 2013
by
Marco Selig
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
several minor ajustments in docstrings and output.
parent
3711b36b
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
37 additions
and
13 deletions
+37
-13
nifty_core.py
nifty_core.py
+2
-2
nifty_tools.py
nifty_tools.py
+34
-10
setup.py
setup.py
+1
-1
No files found.
nifty_core.py
View file @
5bdcc718
...
...
@@ -484,7 +484,7 @@ class _about(object): ## nifty support class for global settings
"""
## version
self
.
_version
=
"0.5.
5
"
self
.
_version
=
"0.5.
7
"
## switches and notifications
self
.
_errors
=
notification
(
default
=
True
,
ccode
=
notification
.
_code
)
...
...
@@ -2911,7 +2911,7 @@ class rg_space(space):
return
np
.
real
(
dot
)
elif
(
self
.
para
[(
np
.
size
(
self
.
para
)
-
1
)
//
2
]
!=
2
):
## check imaginary part
if
(
dot
.
imag
>
self
.
epsilon
**
2
*
dot
.
real
):
if
(
np
.
absolute
(
dot
.
imag
)
>
self
.
epsilon
**
2
*
np
.
absolute
(
dot
.
real
)
):
about
.
warnings
.
cprint
(
"WARNING: discarding considerable imaginary part."
)
return
np
.
real
(
dot
)
else
:
...
...
nifty_tools.py
View file @
5bdcc718
...
...
@@ -253,6 +253,11 @@ class invertible_operator(operator):
x_
.
weight
(
power
=
1
,
overwrite
=
True
)
return
x_
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
__repr__
(
self
):
return
"<nifty.invertible_operator>"
##-----------------------------------------------------------------------------
##-----------------------------------------------------------------------------
...
...
@@ -540,6 +545,11 @@ class propagator_operator(operator):
## evaluate
return
self
.
_debriefing
(
x
,
x_
,
in_codomain
)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
__repr__
(
self
):
return
"<nifty.propagator_operator>"
##-----------------------------------------------------------------------------
##=============================================================================
...
...
@@ -720,7 +730,6 @@ class conjugate_gradient(object):
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
_calc_without
(
self
,
tol
=
1E-4
,
clevel
=
1
,
limii
=
None
):
## > runs cg without preconditioner
clevel
=
int
(
clevel
)
if
(
limii
is
None
):
limii
=
10
*
self
.
b
.
domain
.
dim
(
split
=
False
)
...
...
@@ -743,7 +752,10 @@ class conjugate_gradient(object):
self
.
note
.
cprint
(
"
\n
iteration : %08u alpha = NAN
\n
... dead."
%
ii
)
return
self
.
x
,
0
self
.
x
+=
alpha
*
d
if
(
ii
%
self
.
reset
==
0
)
or
(
np
.
signbit
(
np
.
real
(
alpha
))):
if
(
np
.
signbit
(
np
.
real
(
alpha
))):
about
.
warnings
.
cprint
(
"WARNING: positive definiteness of A violated."
)
r
=
self
.
b
-
self
.
A
(
self
.
x
)
elif
(
ii
%
self
.
reset
==
0
):
r
=
self
.
b
-
self
.
A
(
self
.
x
)
else
:
r
-=
alpha
*
q
...
...
@@ -757,7 +769,7 @@ class conjugate_gradient(object):
if
(
ii
==
limii
):
self
.
note
.
cprint
(
"
\n
... quit."
)
break
if
(
gamma
==
0
):
el
if
(
gamma
==
0
):
convergence
=
clevel
+
1
self
.
note
.
cprint
(
" convergence level : INF
\n
... done."
)
break
...
...
@@ -793,6 +805,8 @@ class conjugate_gradient(object):
r
=
self
.
b
-
self
.
A
(
self
.
x
)
d
=
self
.
W
(
r
)
gamma
=
r
.
dot
(
d
)
if
(
gamma
==
0
):
return
self
.
x
,
clevel
+
1
delta_
=
np
.
absolute
(
gamma
)
**
(
-
0.5
)
convergence
=
0
...
...
@@ -804,7 +818,10 @@ class conjugate_gradient(object):
self
.
note
.
cprint
(
"
\n
iteration : %08u alpha = NAN
\n
... dead."
%
ii
)
return
self
.
x
,
0
self
.
x
+=
alpha
*
d
## update
if
(
ii
%
self
.
reset
==
0
)
or
(
np
.
signbit
(
np
.
real
(
alpha
))):
if
(
np
.
signbit
(
np
.
real
(
alpha
))):
about
.
warnings
.
cprint
(
"WARNING: positive definiteness of A violated."
)
r
=
self
.
b
-
self
.
A
(
self
.
x
)
elif
(
ii
%
self
.
reset
==
0
):
r
=
self
.
b
-
self
.
A
(
self
.
x
)
else
:
r
-=
alpha
*
q
...
...
@@ -819,8 +836,8 @@ class conjugate_gradient(object):
if
(
ii
==
limii
):
self
.
note
.
cprint
(
"
\n
... quit."
)
break
if
(
gamma
==
0
):
convergence
=
clevel
el
if
(
gamma
==
0
):
convergence
=
clevel
+
1
self
.
note
.
cprint
(
" convergence level : INF
\n
... done."
)
break
elif
(
np
.
absolute
(
delta
)
<
tol
):
...
...
@@ -842,6 +859,11 @@ class conjugate_gradient(object):
return
self
.
x
,
convergence
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
__repr__
(
self
):
return
"<nifty.conjugate_gradient>"
##=============================================================================
...
...
@@ -1038,6 +1060,7 @@ class steepest_descent(object):
convergence
+=
1
self
.
note
.
cflush
(
" convergence level : %u"
%
convergence
)
if
(
convergence
==
clevel
):
convergence
+=
int
(
ii
==
clevel
)
self
.
note
.
cprint
(
"
\n
... done."
)
break
else
:
...
...
@@ -1056,7 +1079,6 @@ class steepest_descent(object):
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
_get_alpha
(
self
,
E
,
g
,
norm
,
alpha
):
## > determines the new alpha
while
(
True
):
## Wolfe conditions
wolfe
,
x_
,
E_
,
g_
,
a
=
self
.
_check_wolfe
(
E
,
g
,
norm
,
alpha
)
...
...
@@ -1071,7 +1093,6 @@ class steepest_descent(object):
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
_check_wolfe
(
self
,
E
,
g
,
norm
,
alpha
):
## > checks the Wolfe conditions
x_
=
self
.
_get_x
(
g
,
norm
,
alpha
)
pg
=
norm
E_
,
g_
=
self
.
eggs
(
x_
)
...
...
@@ -1085,7 +1106,6 @@ class steepest_descent(object):
return
True
,
x_
,
E_
,
g_
,
self
.
a
[
2
]
# def _check_strong_wolfe(self,E,g,norm,alpha): ## > checks the strong Wolfe conditions
#
# x_ = self._get_x(g,norm,alpha)
# pg = norm
# E_,g_ = self.eggs(x_)
...
...
@@ -1101,8 +1121,12 @@ class steepest_descent(object):
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
_get_x
(
self
,
g
,
norm
,
alpha
):
## > updates x
return
self
.
x
-
g
*
(
alpha
/
norm
)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def
__repr__
(
self
):
return
"<nifty.steepest_descent>"
##=============================================================================
setup.py
View file @
5bdcc718
...
...
@@ -23,7 +23,7 @@ from distutils.core import setup
import
os
setup
(
name
=
"nifty"
,
version
=
"0.
5.5
"
,
version
=
"0.
6.0
"
,
description
=
"Numerical Information Field Theory"
,
author
=
"Marco Selig"
,
author_email
=
"mselig@mpa-garching.mpg.de"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment