Commit 89b08ffe authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge branch 'logging' into 'NIFTy_4'

Logging

See merge request ift/NIFTy!232
parents 91287fb0 cdfca359
Pipeline #26121 passed with stages
in 5 minutes and 42 seconds
import nifty4 as ift
from nifty4.library.nonlinearities import Linear, Exponential, Tanh
import numpy as np
np.random.seed(20)
if __name__ == "__main__":
noise_level = 0.3
correlation_length = 0.1
p_spec = lambda k: (1. / (k*correlation_length + 1) ** 4)
nonlinearity = Tanh()
#nonlinearity = Linear()
#nonlinearity = Exponential()
# Set up position space
s_space = ift.RGSpace(1024)
h_space = s_space.get_default_codomain()
# Define harmonic transformation and associated harmonic space
HT = ift.HarmonicTransformOperator(h_space, target=s_space)
S = ift.ScalingOperator(1., h_space)
# Drawing a sample sh from the prior distribution in harmonic space
sh = S.draw_sample()
# Choosing the measurement instrument
# Instrument = SmoothingOperator(s_space, sigma=0.01)
mask = np.ones(s_space.shape)
mask[600:800] = 0.
mask = ift.Field.from_global_data(s_space, mask)
R = ift.GeometryRemover(s_space) * ift.DiagonalOperator(mask)
d_space = R.target
power = ift.sqrt(ift.create_power_operator(h_space, p_spec).diagonal)
# Creating the mock data
true_sky = nonlinearity(HT(power*sh))
noiseless_data = R(true_sky)
noise_amplitude = noiseless_data.val.std()*noise_level
N = ift.ScalingOperator(noise_amplitude**2, d_space)
n = N.draw_sample()
# Creating the mock data
d = noiseless_data + n
IC1 = ift.GradientNormController(name="IC1", iteration_limit=100,
tol_abs_gradnorm=1e-4)
LS = ift.LineSearchStrongWolfe(c2=0.02)
minimizer = ift.RelaxedNewton(IC1, line_searcher=LS)
ICI = ift.GradientNormController(iteration_limit=2000,
tol_abs_gradnorm=1e-3)
inverter = ift.ConjugateGradient(controller=ICI)
# initial guess
m = ift.Field.full(h_space, 1e-7)
map_energy = ift.library.NonlinearWienerFilterEnergy(
m, d, R, nonlinearity, HT, power, N, S, inverter=inverter)
# Minimization with chosen minimizer
map_energy, convergence = minimizer(map_energy)
m = map_energy.position
recsky = nonlinearity(HT(power*m))
data = R.adjoint_times(d)
lo = np.min([true_sky.min(), recsky.min(), data.min()])
hi = np.max([true_sky.max(), recsky.max(), data.max()])
plotdict = {"colormap": "Planck-like", "ymin": lo, "ymax": hi}
ift.plot(true_sky, name="true_sky.png", **plotdict)
ift.plot(recsky, name="reconstructed_sky.png", **plotdict)
ift.plot(data, name="data.png", **plotdict)
...@@ -9,4 +9,4 @@ f = ift.Field.from_random(domain=x, random_type='normal') ...@@ -9,4 +9,4 @@ f = ift.Field.from_random(domain=x, random_type='normal')
diagOp = ift.DiagonalOperator(f) diagOp = ift.DiagonalOperator(f)
diag = ift.probe_diagonal(diagOp, 1000) diag = ift.probe_diagonal(diagOp, 1000)
ift.dobj.mprint((f - diag).norm()) ift.logger.info((f - diag).norm())
import numpy as np import numpy as np
import nifty4 as ift import nifty4 as ift
if __name__ == "__main__": if __name__ == "__main__":
np.random.seed(43) np.random.seed(43)
# Set up physical constants # Set up physical constants
......
...@@ -22,6 +22,8 @@ from . import extra ...@@ -22,6 +22,8 @@ from . import extra
from .utilities import memo from .utilities import memo
from .logger import logger
__all__ = ["__version__", "dobj", "DomainTuple"] + \ __all__ = ["__version__", "dobj", "DomainTuple"] + \
domains.__all__ + operators.__all__ + minimization.__all__ + \ domains.__all__ + operators.__all__ + minimization.__all__ + \
["DomainTuple", "Field", "sqrt", "exp", "log"] ["DomainTuple", "Field", "sqrt", "exp", "log"]
...@@ -31,11 +31,6 @@ def is_numpy(): ...@@ -31,11 +31,6 @@ def is_numpy():
return False return False
def mprint(*args):
if master:
print(*args)
def _shareSize(nwork, nshares, myshare): def _shareSize(nwork, nshares, myshare):
return (nwork//nshares) + int(myshare < nwork % nshares) return (nwork//nshares) + int(myshare < nwork % nshares)
......
...@@ -34,10 +34,6 @@ def is_numpy(): ...@@ -34,10 +34,6 @@ def is_numpy():
return True return True
def mprint(*args):
print(*args)
def from_object(object, dtype, copy, set_locked): def from_object(object, dtype, copy, set_locked):
if dtype is None: if dtype is None:
dtype = object.dtype dtype = object.dtype
......
...@@ -20,18 +20,15 @@ try: ...@@ -20,18 +20,15 @@ try:
from mpi4py import MPI from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1: if MPI.COMM_WORLD.Get_size() == 1:
from .data_objects.numpy_do import * from .data_objects.numpy_do import *
# mprint("MPI found, but only with one task, using numpy_do...")
else: else:
from .data_objects.distributed_do import * from .data_objects.distributed_do import *
# mprint("MPI with multiple tasks found, using distributed_do...")
except ImportError: except ImportError:
from .data_objects.numpy_do import * from .data_objects.numpy_do import *
# mprint("MPI not found, using numpy_do...")
__all__ = ["ntask", "rank", "master", "local_shape", "data_object", "full", __all__ = ["ntask", "rank", "master", "local_shape", "data_object", "full",
"empty", "zeros", "ones", "empty_like", "vdot", "abs", "exp", "empty", "zeros", "ones", "empty_like", "vdot", "abs", "exp",
"log", "tanh", "sqrt", "from_object", "from_random", "log", "tanh", "sqrt", "from_object", "from_random",
"local_data", "ibegin", "ibegin_from_shape", "np_allreduce_sum", "local_data", "ibegin", "ibegin_from_shape", "np_allreduce_sum",
"distaxis", "from_local_data", "from_global_data", "to_global_data", "distaxis", "from_local_data", "from_global_data", "to_global_data",
"redistribute", "default_distaxis", "mprint", "is_numpy", "redistribute", "default_distaxis", "is_numpy",
"lock", "locked"] "lock", "locked"]
...@@ -31,7 +31,7 @@ class NonlinearWienerFilterEnergy(Energy): ...@@ -31,7 +31,7 @@ class NonlinearWienerFilterEnergy(Energy):
self.nonlinearity = nonlinearity self.nonlinearity = nonlinearity
self.ht = ht self.ht = ht
self.power = power self.power = power
m = self.ht(self.power * self.position) m = self.ht(self.power*self.position)
self.LinearizedResponse = LinearizedSignalResponse( self.LinearizedResponse = LinearizedSignalResponse(
Instrument, nonlinearity, ht, power, m) Instrument, nonlinearity, ht, power, m)
......
def _logger_init():
import logging
from . import dobj
res = logging.getLogger('NIFTy4')
res.setLevel(logging.DEBUG)
if dobj.rank == 0:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
res.addHandler(ch)
else:
res.addHandler(logging.NullHandler())
return res
logger = _logger_init()
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
from __future__ import division from __future__ import division
from .minimizer import Minimizer from .minimizer import Minimizer
from .. import dobj from ..logger import logger
class ConjugateGradient(Minimizer): class ConjugateGradient(Minimizer):
...@@ -76,12 +76,12 @@ class ConjugateGradient(Minimizer): ...@@ -76,12 +76,12 @@ class ConjugateGradient(Minimizer):
q = energy.curvature(d) q = energy.curvature(d)
ddotq = d.vdot(q).real ddotq = d.vdot(q).real
if ddotq == 0.: if ddotq == 0.:
dobj.mprint("Error: ConjugateGradient: ddotq==0.") logger.error("Error: ConjugateGradient: ddotq==0.")
return energy, controller.ERROR return energy, controller.ERROR
alpha = previous_gamma/ddotq alpha = previous_gamma/ddotq
if alpha < 0: if alpha < 0:
dobj.mprint("Error: ConjugateGradient: alpha<0.") logger.error("Error: ConjugateGradient: alpha<0.")
return energy, controller.ERROR return energy, controller.ERROR
q *= -alpha q *= -alpha
...@@ -93,7 +93,7 @@ class ConjugateGradient(Minimizer): ...@@ -93,7 +93,7 @@ class ConjugateGradient(Minimizer):
gamma = r.vdot(s).real gamma = r.vdot(s).real
if gamma < 0: if gamma < 0:
dobj.mprint( logger.error(
"Positive definiteness of preconditioner violated!") "Positive definiteness of preconditioner violated!")
return energy, controller.ERROR return energy, controller.ERROR
if gamma == 0: if gamma == 0:
......
...@@ -20,7 +20,7 @@ from __future__ import division ...@@ -20,7 +20,7 @@ from __future__ import division
import abc import abc
from .minimizer import Minimizer from .minimizer import Minimizer
from .line_search_strong_wolfe import LineSearchStrongWolfe from .line_search_strong_wolfe import LineSearchStrongWolfe
from .. import dobj from ..logger import logger
class DescentMinimizer(Minimizer): class DescentMinimizer(Minimizer):
...@@ -92,11 +92,11 @@ class DescentMinimizer(Minimizer): ...@@ -92,11 +92,11 @@ class DescentMinimizer(Minimizer):
f_k_minus_1 = energy.value f_k_minus_1 = energy.value
if new_energy.value > energy.value: if new_energy.value > energy.value:
dobj.mprint("Error: Energy has increased") logger.error("Error: Energy has increased")
return energy, controller.ERROR return energy, controller.ERROR
if new_energy.value == energy.value: if new_energy.value == energy.value:
dobj.mprint( logger.warning(
"Warning: Energy has not changed. Assuming convergence...") "Warning: Energy has not changed. Assuming convergence...")
return new_energy, controller.CONVERGED return new_energy, controller.CONVERGED
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
# and financially supported by the Studienstiftung des deutschen Volkes. # and financially supported by the Studienstiftung des deutschen Volkes.
from .iteration_controller import IterationController from .iteration_controller import IterationController
from .. import dobj from ..logger import logger
class GradientNormController(IterationController): class GradientNormController(IterationController):
...@@ -116,13 +116,13 @@ class GradientNormController(IterationController): ...@@ -116,13 +116,13 @@ class GradientNormController(IterationController):
msg += " energy={:.6E}".format(energy.value) msg += " energy={:.6E}".format(energy.value)
msg += " gradnorm={:.2E}".format(energy.gradient_norm) msg += " gradnorm={:.2E}".format(energy.gradient_norm)
msg += " clvl=" + str(self._ccount) msg += " clvl=" + str(self._ccount)
dobj.mprint(msg) logger.info(msg)
# self.logger.info(msg) # self.logger.info(msg)
# Are we done? # Are we done?
if self._iteration_limit is not None: if self._iteration_limit is not None:
if self._itcount >= self._iteration_limit: if self._itcount >= self._iteration_limit:
dobj.mprint( logger.warning(
"Warning:Iteration limit reached. Assuming convergence") "Warning:Iteration limit reached. Assuming convergence")
return self.CONVERGED return self.CONVERGED
if self._ccount >= self._convergence_level: if self._ccount >= self._convergence_level:
......
...@@ -21,7 +21,7 @@ from builtins import range ...@@ -21,7 +21,7 @@ from builtins import range
import numpy as np import numpy as np
from .descent_minimizer import DescentMinimizer from .descent_minimizer import DescentMinimizer
from .line_search_strong_wolfe import LineSearchStrongWolfe from .line_search_strong_wolfe import LineSearchStrongWolfe
from .. import dobj from ..logger import logger
class L_BFGS(DescentMinimizer): class L_BFGS(DescentMinimizer):
...@@ -64,7 +64,7 @@ class L_BFGS(DescentMinimizer): ...@@ -64,7 +64,7 @@ class L_BFGS(DescentMinimizer):
idx = (k-1) % maxhist idx = (k-1) % maxhist
fact = s[idx].vdot(y[idx]) / y[idx].vdot(y[idx]) fact = s[idx].vdot(y[idx]) / y[idx].vdot(y[idx])
if fact <= 0.: if fact <= 0.:
dobj.mprint("L-BFGS curvature not positive definite!") logger.error("L-BFGS curvature not positive definite!")
p *= fact p *= fact
for i in range(k-nhist, k): for i in range(k-nhist, k):
idx = i % maxhist idx = i % maxhist
......
...@@ -95,7 +95,7 @@ class LineEnergy(object): ...@@ -95,7 +95,7 @@ class LineEnergy(object):
""" """
res = self._energy.gradient.vdot(self._line_direction) res = self._energy.gradient.vdot(self._line_direction)
if abs(res.imag) / max(abs(res.real), 1.) > 1e-12: if abs(res.imag) / max(abs(res.real), 1.) > 1e-12:
from ..dobj import mprint from ..logger import logger
mprint("directional derivative has non-negligible " logger.warning("directional derivative has non-negligible "
"imaginary part:", res) "imaginary part:", res)
return res.real return res.real
...@@ -21,7 +21,7 @@ from builtins import range ...@@ -21,7 +21,7 @@ from builtins import range
import numpy as np import numpy as np
from .line_search import LineSearch from .line_search import LineSearch
from .line_energy import LineEnergy from .line_energy import LineEnergy
from .. import dobj from ..logger import logger
class LineSearchStrongWolfe(LineSearch): class LineSearchStrongWolfe(LineSearch):
...@@ -100,10 +100,11 @@ class LineSearchStrongWolfe(LineSearch): ...@@ -100,10 +100,11 @@ class LineSearchStrongWolfe(LineSearch):
phi_0 = le_0.value phi_0 = le_0.value
phiprime_0 = le_0.directional_derivative phiprime_0 = le_0.directional_derivative
if phiprime_0 == 0: if phiprime_0 == 0:
dobj.mprint("Directional derivative is zero; assuming convergence") logger.warning(
"Directional derivative is zero; assuming convergence")
return energy, False return energy, False
if phiprime_0 > 0: if phiprime_0 > 0:
dobj.mprint("Error: search direction is not a descent direction") logger.error("Error: search direction is not a descent direction")
return energy, False return energy, False
# set alphas # set alphas
...@@ -149,13 +150,13 @@ class LineSearchStrongWolfe(LineSearch): ...@@ -149,13 +150,13 @@ class LineSearchStrongWolfe(LineSearch):
# update alphas # update alphas
alpha0, alpha1 = alpha1, min(2*alpha1, maxstepsize) alpha0, alpha1 = alpha1, min(2*alpha1, maxstepsize)
if alpha1 == maxstepsize: if alpha1 == maxstepsize:
dobj.mprint("max step size reached") logger.warning("max step size reached")
return le_alpha1.energy, False return le_alpha1.energy, False
phi_alpha0 = phi_alpha1 phi_alpha0 = phi_alpha1
phiprime_alpha0 = phiprime_alpha1 phiprime_alpha0 = phiprime_alpha1
dobj.mprint("max iterations reached") logger.warning("max iterations reached")
return le_alpha1.energy, False return le_alpha1.energy, False
def _zoom(self, alpha_lo, alpha_hi, phi_0, phiprime_0, def _zoom(self, alpha_lo, alpha_hi, phi_0, phiprime_0,
...@@ -252,7 +253,8 @@ class LineSearchStrongWolfe(LineSearch): ...@@ -252,7 +253,8 @@ class LineSearchStrongWolfe(LineSearch):
phiprime_alphaj) phiprime_alphaj)
else: else:
dobj.mprint("The line search algorithm (zoom) did not converge.") logger.warning(
"The line search algorithm (zoom) did not converge.")
return le_alphaj.energy, False return le_alphaj.energy, False
def _cubicmin(self, a, fa, fpa, b, fb, c, fc): def _cubicmin(self, a, fa, fpa, b, fb, c, fc):
......
...@@ -20,6 +20,7 @@ from __future__ import division ...@@ -20,6 +20,7 @@ from __future__ import division
from .minimizer import Minimizer from .minimizer import Minimizer
from ..field import Field from ..field import Field
from .. import dobj from .. import dobj
from ..logger import logger
class ScipyMinimizer(Minimizer): class ScipyMinimizer(Minimizer):
...@@ -97,9 +98,9 @@ class ScipyMinimizer(Minimizer): ...@@ -97,9 +98,9 @@ class ScipyMinimizer(Minimizer):
status = self._controller.check(hlp._energy) status = self._controller.check(hlp._energy)
return hlp._energy, self._controller.check(hlp._energy) return hlp._energy, self._controller.check(hlp._energy)
if not r.success: if not r.success:
dobj.mprint("Problem in Scipy minimization:", r.message) logger.error("Problem in Scipy minimization:", r.message)
else: else:
dobj.mprint("Problem in Scipy minimization") logger.error("Problem in Scipy minimization")
return hlp._energy, self._controller.ERROR return hlp._energy, self._controller.ERROR
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
from ..minimization.quadratic_energy import QuadraticEnergy from ..minimization.quadratic_energy import QuadraticEnergy
from ..minimization.iteration_controller import IterationController from ..minimization.iteration_controller import IterationController
from ..field import Field, dobj from ..field import Field
from ..logger import logger
from .linear_operator import LinearOperator from .linear_operator import LinearOperator
...@@ -71,5 +72,5 @@ class InversionEnabler(LinearOperator): ...@@ -71,5 +72,5 @@ class InversionEnabler(LinearOperator):
energy = QuadraticEnergy(A=func, b=x, position=x0) energy = QuadraticEnergy(A=func, b=x, position=x0)
r, stat = self._inverter(energy, preconditioner=self._preconditioner) r, stat = self._inverter(energy, preconditioner=self._preconditioner)
if stat != IterationController.CONVERGED: if stat != IterationController.CONVERGED:
dobj.mprint("Error detected during operator inversion") logger.warning("Error detected during operator inversion")
return r.position return r.position
...@@ -23,6 +23,7 @@ from .operators.diagonal_operator import DiagonalOperator ...@@ -23,6 +23,7 @@ from .operators.diagonal_operator import DiagonalOperator
from .operators.power_distributor import PowerDistributor from .operators.power_distributor import PowerDistributor
from .domain_tuple import DomainTuple from .domain_tuple import DomainTuple
from . import dobj, utilities from . import dobj, utilities
from .logger import logger
__all__ = ['PS_field', __all__ = ['PS_field',
'power_analyze', 'power_analyze',
...@@ -85,7 +86,7 @@ def power_analyze(field, spaces=None, binbounds=None, ...@@ -85,7 +86,7 @@ def power_analyze(field, spaces=None, binbounds=None,
for sp in field.domain: for sp in field.domain:
if not sp.harmonic and not isinstance(sp, PowerSpace): if not sp.harmonic and not isinstance(sp, PowerSpace):
dobj.mprint("WARNING: Field has a space in `domain` which is " logger.warning("WARNING: Field has a space in `domain` which is "
"neither harmonic nor a PowerSpace.") "neither harmonic nor a PowerSpace.")
spaces = utilities.parse_spaces(spaces, len(field.domain)) spaces = utilities.parse_spaces(spaces, len(field.domain))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment