diff --git a/nifty/__init__.py b/nifty/__init__.py
index b17e137bf0f533c2e3d64018c3e6618676e33749..d541eb8f320c3f0bfaba22a69001790f06d50fb9 100644
--- a/nifty/__init__.py
+++ b/nifty/__init__.py
@@ -26,25 +26,22 @@ mpl.use('Agg')
from .version import __version__
+from logger import logger
+
import dummys
# it is important to import config before d2o such that NIFTy is able to
# pre-create d2o's configuration object with the corrected path
-from config import about,\
- dependency_injector,\
+from config import dependency_injector,\
nifty_configuration,\
d2o_configuration
from d2o import distributed_data_object, d2o_librarian
-from nifty_cmaps import ncmap
from field import Field
-# this line exists for compatibility reasons
-# TODO: Remove this once the transition to field types is done.
-from spaces.space import Space as point_space
-
from random import Random
+
from nifty_simple_math import *
from nifty_utilities import *
diff --git a/nifty/config/__init__.py b/nifty/config/__init__.py
index aa3b2c34d8e5eac9c30901b5a4183b0f1835c8ee..4e4f2441173f083fc4d57553cc4b70e4bf31f17e 100644
--- a/nifty/config/__init__.py
+++ b/nifty/config/__init__.py
@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
-from nifty_about import *
-
from nifty_config import dependency_injector,\
nifty_configuration
diff --git a/nifty/config/nifty_about.py b/nifty/config/nifty_about.py
deleted file mode 100644
index 94cbe802d5a469187316ee2d3566ce096c724358..0000000000000000000000000000000000000000
--- a/nifty/config/nifty_about.py
+++ /dev/null
@@ -1,505 +0,0 @@
-## NIFTY (Numerical Information Field Theory) has been developed at the
-## Max-Planck-Institute for Astrophysics.
-##
-## Copyright (C) 2013 Max-Planck-Society
-##
-## Author: Marco Selig
-## Project homepage:
-##
-## This program is free software: you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation, either version 3 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-## See the GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program. If not, see .
-
-from sys import stdout as so
-import os
-import inspect
-
-import d2o
-import keepers
-
-from nifty import __version__
-
-
-MPI = d2o.config.dependency_injector[
- keepers.get_Configuration('D2O')['mpi_module']]
-
-comm = MPI.COMM_WORLD
-size = comm.size
-rank = comm.rank
-
-
-class switch(object):
- """
- .. __ __ __
- .. /__/ / /_ / /
- .. _______ __ __ __ / _/ _______ / /___
- .. / _____/ | |/\/ / / / / / / ____/ / _ |
- .. /_____ / | / / / / /_ / /____ / / / /
- .. /_______/ |__/\__/ /__/ \___/ \______/ /__/ /__/ class
-
- NIFTY support class for switches.
-
- Parameters
- ----------
- default : bool
- Default status of the switch (default: False).
-
- See Also
- --------
- notification : A derived class for displaying notifications.
-
- Examples
- --------
- >>> option = switch()
- >>> option.status
- False
- >>> option
- OFF
- >>> print(option)
- OFF
- >>> option.on()
- >>> print(option)
- ON
-
- Attributes
- ----------
- status : bool
- Status of the switch.
-
- """
- def __init__(self,default=False):
- """
- Initilizes the switch and sets the `status`
-
- Parameters
- ----------
- default : bool
- Default status of the switch (default: False).
-
- """
- self.status = bool(default)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def on(self):
- """
- Switches the `status` to True.
-
- """
- self.status = True
-
- def off(self):
- """
- Switches the `status` to False.
-
- """
- self.status = False
-
-
- def toggle(self):
- """
- Switches the `status`.
-
- """
- self.status = not self.status
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __repr__(self):
- if(self.status):
- return "ON"
- else:
- return "OFF"
-
-##-----------------------------------------------------------------------------
-
-##-----------------------------------------------------------------------------
-
-class notification(switch):
- """
- .. __ __ ____ __ __ __
- .. / /_ /__/ / _/ /__/ / /_ /__/
- .. __ ___ ______ / _/ __ / /_ __ _______ ____ __ / _/ __ ______ __ ___
- .. / _ | / _ | / / / / / _/ / / / ____/ / _ / / / / / / _ | / _ |
- .. / / / / / /_/ / / /_ / / / / / / / /____ / /_/ / / /_ / / / /_/ / / / / /
- .. /__/ /__/ \______/ \___/ /__/ /__/ /__/ \______/ \______| \___/ /__/ \______/ /__/ /__/ class
-
- NIFTY support class for notifications.
-
- Parameters
- ----------
- default : bool
- Default status of the switch (default: False).
- ccode : string
- Color code as string (default: "\033[0m"). The surrounding special
- characters are added if missing.
-
- Notes
- -----
- The color code is a special ANSI escape code, for a list of valid codes
- see [#]_. Multiple codes can be combined by seperating them with a
- semicolon ';'.
-
- References
- ----------
- .. [#] Wikipedia, `ANSI escape code `_.
-
- Examples
- --------
- >>> note = notification()
- >>> note.status
- True
- >>> note.cprint("This is noteworthy.")
- This is noteworthy.
- >>> note.cflush("12"); note.cflush('3')
- 123
- >>> note.off()
- >>> note.cprint("This is noteworthy.")
- >>>
-
- Raises
- ------
- TypeError
- If `ccode` is no string.
-
- Attributes
- ----------
- status : bool
- Status of the switch.
- ccode : string
- Color code as string.
-
- """
- _code = "\033[0m" ## "\033[39;49m"
- _ccode_red = "\033[31;1m"
- _ccode_yellow = "\033[33;1m"
- _ccode_green = "\033[32;1m"
- def __init__(self,default=True,ccode="\033[0m"):
- """
- Initializes the notification and sets `status` and `ccode`
-
- Parameters
- ----------
- default : bool
- Default status of the switch (default: False).
- ccode : string
- Color code as string (default: "\033[0m"). The surrounding
- special characters are added if missing.
-
- Raises
- ------
- TypeError
- If `ccode` is no string.
-
- """
- self.status = bool(default)
-
- ## check colour code
- if(not isinstance(ccode,str)):
- raise TypeError(about._errors.cstring("ERROR: invalid input."))
- if(ccode[0]!="\033"):
- ccode = "\033"+ccode
- if(ccode[1]!='['):
- ccode = ccode[0]+'['+ccode[1:]
- if(ccode[-1]!='m'):
- ccode = ccode+'m'
- self.ccode = ccode
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def set_ccode(self,newccode=None):
- """
- Resets the the `ccode` string.
-
- Parameters
- ----------
- newccode : string
- Color code as string (default: "\033[0m"). The surrounding
- characters "\033", '[', and 'm' are added if missing.
-
- Returns
- -------
- None
-
- Raises
- ------
- TypeError
- If `ccode` is no string.
-
- Examples
- --------
- >>> note = notification()
- >>> note.set_ccode("31;1") ## "31;1" corresponds to red and bright
-
- """
- if(newccode is None):
- newccode = self._code
- else:
- ## check colour code
- if(not isinstance(newccode,str)):
- raise TypeError(about._errors.cstring("ERROR: invalid input."))
- if(newccode[0]!="\033"):
- newccode = "\033"+newccode
- if(newccode[1]!='['):
- newccode = newccode[0]+'['+newccode[1:]
- if(newccode[-1]!='m'):
- newccode = newccode+'m'
- self.ccode = newccode
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def _get_caller(self):
- result = ''
- i = 2
- current = inspect.stack()[i][3]
- while current != '':
- result = '.' + current + result
- i += 1
- current = inspect.stack()[i][3]
- return result[1:]
-
- def cstring(self, subject):
- """
- Casts an object to a string and augments that with a colour code.
-
- Parameters
- ----------
- subject : {string, object}
- String to be augmented with a color code. A given object is
- cast to its string representation by :py:func:`str`.
-
- Returns
- -------
- cstring : string
- String augmented with a color code.
-
- """
- if rank == 0:
- return self.ccode + str(self._get_caller()) + ':\n' + \
- str(subject) + self._code + '\n'
-
- def cflush(self, subject):
- """
- Flushes an object in its colour coded sting representation to the
- standard output (*without* line break).
-
- Parameters
- ----------
- subject : {string, object}
- String to be flushed. A given object is
- cast to a string by :py:func:`str`.
-
- Returns
- -------
- None
-
- """
- if self.status and rank == 0:
- so.write(self.cstring(subject))
- so.flush()
-
- def cprint(self, subject):
- """
- Flushes an object in its colour coded sting representation to the
- standard output (*with* line break).
-
- Parameters
- ----------
- subject : {string, object}
- String to be flushed. A given object is
- cast to a string by :py:func:`str`.
-
- Returns
- -------
- None
-
- """
- if self.status and rank == 0:
- so.write(self.cstring(subject)+"\n")
- so.flush()
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __repr__(self):
- if(self.status):
- return self.cstring("ON")
- else:
- return self.cstring("OFF")
-
-##-----------------------------------------------------------------------------
-
-
-
-class _about(object): ## nifty support class for global settings
- """
- NIFTY support class for global settings.
-
- .. warning::
- Turning off the `_error` notification will suppress all NIFTY error
- strings (not recommended).
-
- Examples
- --------
- >>> from nifty import *
- >>> about
- nifty version 0.2.0
- >>> print(about)
- nifty version 0.2.0
- - errors = ON (immutable)
- - warnings = ON
- - infos = OFF
- - multiprocessing = ON
- - hermitianize = ON
- - lm2gl = ON
- >>> about.infos.on()
- >>> about.about.save_config()
-
- >>> from nifty import *
- INFO: nifty version 0.2.0
- >>> print(about)
- nifty version 0.2.0
- - errors = ON (immutable)
- - warnings = ON
- - infos = ON
- - multiprocessing = ON
- - hermitianize = ON
- - lm2gl = ON
-
- Attributes
- ----------
- warnings : notification
- Notification instance controlling whether warings shall be printed.
- infos : notification
- Notification instance controlling whether information shall be
- printed.
- multiprocessing : switch
- Switch instance controlling whether multiprocessing might be
- performed.
- hermitianize : switch
- Switch instance controlling whether hermitian symmetry for certain
- :py:class:`rg_space` instances is inforced.
- lm2gl : switch
- Switch instance controlling whether default target of a
- :py:class:`lm_space` instance is a :py:class:`gl_space` or a
- :py:class:`hp_space` instance.
-
- """
- def __init__(self):
- """
- Initializes the _about and sets the attributes.
-
- """
- ## version
- self._version = str(__version__)
-
- ## switches and notifications
- self._errors = notification(default=True,
- ccode=notification._code)
- self.warnings = notification(default=True,
- ccode=notification._code)
- self.infos = notification(default=False,
- ccode=notification._code)
- self.multiprocessing = switch(default=True)
- self.hermitianize = switch(default=True)
- self.lm2gl = switch(default=True)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def load_config(self,force=True):
- """
- Reads the configuration file "~/.nifty/nifty_config".
-
- Parameters
- ----------
- force : bool
- Whether to cause an error if the file does not exsist or not.
-
- Returns
- -------
- None
-
- Raises
- ------
- ValueError
- If the configuration file is malformed.
- OSError
- If the configuration file does not exist.
-
- """
- nconfig = os.path.expanduser('~')+"/.nifty/nifty_config"
- if(os.path.isfile(nconfig)):
- rawconfig = []
- with open(nconfig,'r') as configfile:
- for ll in configfile:
- if(not ll.startswith('#')):
- rawconfig += ll.split()
- try:
- self._errors = notification(default=True,ccode=rawconfig[0])
- self.warnings = notification(default=int(rawconfig[1]),ccode=rawconfig[2])
- self.infos = notification(default=int(rawconfig[3]),ccode=rawconfig[4])
- self.multiprocessing = switch(default=int(rawconfig[5]))
- self.hermitianize = switch(default=int(rawconfig[6]))
- self.lm2gl = switch(default=int(rawconfig[7]))
- except(IndexError):
- raise ValueError(about._errors.cstring("ERROR: '"+nconfig+"' damaged."))
- elif(force):
- raise OSError(about._errors.cstring("ERROR: '"+nconfig+"' nonexisting."))
-
- def save_config(self):
- """
- Writes to the configuration file "~/.nifty/nifty_config".
-
- Returns
- -------
- None
-
- """
- rawconfig = [self._errors.ccode[2:-1],str(int(self.warnings.status)),self.warnings.ccode[2:-1],str(int(self.infos.status)),self.infos.ccode[2:-1],str(int(self.multiprocessing.status)),str(int(self.hermitianize.status)),str(int(self.lm2gl.status))]
-
- nconfig = os.path.expanduser('~')+"/.nifty/nifty_config"
- if(os.path.isfile(nconfig)):
- rawconfig = [self._errors.ccode[2:-1],str(int(self.warnings.status)),self.warnings.ccode[2:-1],str(int(self.infos.status)),self.infos.ccode[2:-1],str(int(self.multiprocessing.status)),str(int(self.hermitianize.status)),str(int(self.lm2gl.status))]
- nconfig = os.path.expanduser('~')+"/.nifty/nifty_config"
-
- with open(nconfig,'r') as sourcefile:
- with open(nconfig+"_",'w') as targetfile:
- for ll in sourcefile:
- if(ll.startswith('#')):
- targetfile.write(ll)
- else:
- ll = ll.replace(ll.split()[0],rawconfig[0]) ## one(!) per line
- rawconfig = rawconfig[1:]
- targetfile.write(ll)
- os.rename(nconfig+"_",nconfig) ## overwrite old congiguration
- else:
- if(not os.path.exists(os.path.expanduser('~')+"/.nifty")):
- os.makedirs(os.path.expanduser('~')+"/.nifty")
- with open(nconfig,'w') as targetfile:
- for rr in rawconfig:
- targetfile.write(rr+"\n") ## one(!) per line
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __repr__(self):
- return "nifty version "+self._version
-
- def __str__(self):
- return "nifty version "+self._version+"\n- errors = "+self._errors.cstring("ON")+" (immutable)\n- warnings = "+str(self.warnings)+"\n- infos = "+str(self.infos)+"\n- multiprocessing = "+str(self.multiprocessing)+"\n- hermitianize = "+str(self.hermitianize)+"\n- lm2gl = "+str(self.lm2gl)
-
-##-----------------------------------------------------------------------------
-
-## set global instance
-about = _about()
-#about.load_config(force=False)
-#about.infos.cprint("INFO: "+about.__repr__())
-
-
diff --git a/nifty/demos/demo_excaliwir.py b/nifty/demos/demo_excaliwir.py
index 0fdd875ec922d7ceb22ba93597936f1c279e36a4..c8b7ae04ffb99420717834aca2f546a3a65ea591 100644
--- a/nifty/demos/demo_excaliwir.py
+++ b/nifty/demos/demo_excaliwir.py
@@ -258,7 +258,6 @@ class problem(object):
if(__name__=="__main__"):
x = RGSpace((1280), zerocenter=True)
p = problem(x, log = False)
- about.warnings.off()
## pl.close("all")
#
# ## define signal space
diff --git a/nifty/demos/demo_faraday.py b/nifty/demos/demo_faraday.py
index 7ac6024d6ef5519e9bae041e0d856c1f01f62bf4..a910a9aeebe1d9bf95bf0bb99ab4838e4b080d7b 100644
--- a/nifty/demos/demo_faraday.py
+++ b/nifty/demos/demo_faraday.py
@@ -41,9 +41,6 @@ from __future__ import division
from nifty import *
-about.warnings.off()
-about.infos.off()
-
##-----------------------------------------------------------------------------
diff --git a/nifty/demos/demo_tomography.py b/nifty/demos/demo_tomography.py
index 88a0a96fb9a79c7c82db85080ff8292d2441996f..ffd1e40efc5a992c522ccb8a42502322e1908cf0 100644
--- a/nifty/demos/demo_tomography.py
+++ b/nifty/demos/demo_tomography.py
@@ -4,9 +4,6 @@ from nifty import *
if __name__ == "__main__":
-
- about.warnings.off()
-
shape = (256, 256)
x_space = RGSpace(shape)
diff --git a/nifty/demos/demo_wf1.py b/nifty/demos/demo_wf1.py
index 0546ccd0cff5c9b1061b82caf3cde2af875df556..5925c87186bdc97c799ed4a49da3607d0162e85f 100644
--- a/nifty/demos/demo_wf1.py
+++ b/nifty/demos/demo_wf1.py
@@ -43,7 +43,6 @@ import gc
from nifty import * # version 0.8.0
if __name__ == "__main__":
- about.warnings.off()
# some signal space; e.g., a two-dimensional regular grid
shape = [1024, 1024]
diff --git a/nifty/field.py b/nifty/field.py
index 5464f1e346e256ced8227bdeb81a7b0a00c5900f..63e10fcd94cb89fa043d6d2d87da3fdf81c2d16a 100644
--- a/nifty/field.py
+++ b/nifty/field.py
@@ -4,8 +4,7 @@ import numpy as np
from d2o import distributed_data_object,\
STRATEGIES as DISTRIBUTION_STRATEGIES
-from nifty.config import about,\
- nifty_configuration as gc
+from nifty.config import nifty_configuration as gc
from nifty.field_types import FieldType
@@ -15,6 +14,9 @@ from nifty.spaces.power_space import PowerSpace
import nifty.nifty_utilities as utilities
from nifty.random import Random
+import logging
+logger = logging.getLogger('NIFTy.Field')
+
class Field(object):
# ---Initialization methods---
@@ -58,9 +60,9 @@ class Field(object):
for d in domain:
if not isinstance(d, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: Given domain contains something that is not a "
- "nifty.space."))
+ raise TypeError(
+ "Given domain contains something that is not a "
+ "nifty.space.")
return domain
def _parse_field_type(self, field_type, val=None):
@@ -75,8 +77,8 @@ class Field(object):
field_type = tuple(field_type)
for ft in field_type:
if not isinstance(ft, FieldType):
- raise TypeError(about._errors.cstring(
- "ERROR: Given object is not a nifty.FieldType."))
+ raise TypeError(
+ "Given object is not a nifty.FieldType.")
return field_type
def _get_axes_tuple(self, things_with_shape, start=0):
@@ -114,12 +116,12 @@ class Field(object):
elif isinstance(val, Field):
distribution_strategy = val.distribution_strategy
else:
- about.warnings.cprint("WARNING: Datamodel set to default!")
+ logger.info("Datamodel set to default!")
distribution_strategy = gc['default_distribution_strategy']
elif distribution_strategy not in DISTRIBUTION_STRATEGIES['global']:
- raise ValueError(about._errors.cstring(
- "ERROR: distribution_strategy must be a global-type "
- "strategy."))
+ raise ValueError(
+ "distribution_strategy must be a global-type "
+ "strategy.")
return distribution_strategy
# ---Factory methods---
@@ -165,12 +167,9 @@ class Field(object):
random_arguments = {'low': low,
'high': high}
-# elif random_type == 'syn':
-# pass
-
else:
- raise KeyError(about._errors.cstring(
- "ERROR: unsupported random key '" + str(random_type) + "'."))
+ raise KeyError(
+ "unsupported random key '" + str(random_type) + "'.")
return random_arguments
@@ -183,7 +182,7 @@ class Field(object):
for sp in self.domain:
if not sp.harmonic and not isinstance(sp, PowerSpace):
raise AttributeError(
- "ERROR: Field has a space in `domain` which is neither "
+ "Field has a space in `domain` which is neither "
"harmonic nor a PowerSpace.")
# check if the `spaces` input is valid
@@ -192,22 +191,22 @@ class Field(object):
if len(self.domain) == 1:
spaces = (0,)
else:
- raise ValueError(about._errors.cstring(
- "ERROR: Field has multiple spaces as domain "
- "but `spaces` is None."))
+ raise ValueError(
+ "Field has multiple spaces as domain "
+ "but `spaces` is None.")
if len(spaces) == 0:
- raise ValueError(about._errors.cstring(
- "ERROR: No space for analysis specified."))
+ raise ValueError(
+ "No space for analysis specified.")
elif len(spaces) > 1:
- raise ValueError(about._errors.cstring(
- "ERROR: Conversion of only one space at a time is allowed."))
+ raise ValueError(
+ "Conversion of only one space at a time is allowed.")
space_index = spaces[0]
if not self.domain[space_index].harmonic:
- raise ValueError(about._errors.cstring(
- "ERROR: The analyzed space must be harmonic."))
+ raise ValueError(
+ "The analyzed space must be harmonic.")
# Create the target PowerSpace instance:
# If the associated signal-space field was real, we extract the
@@ -289,14 +288,14 @@ class Field(object):
def _shape_up_pindex(self, pindex, target_shape, target_strategy, axes):
if pindex.distribution_strategy not in \
DISTRIBUTION_STRATEGIES['global']:
- raise ValueError("ERROR: pindex's distribution strategy must be "
+ raise ValueError("pindex's distribution strategy must be "
"global-type")
if pindex.distribution_strategy in DISTRIBUTION_STRATEGIES['slicing']:
if ((0 not in axes) or
(target_strategy is not pindex.distribution_strategy)):
raise ValueError(
- "ERROR: A slicing distributor shall not be reshaped to "
+ "A slicing distributor shall not be reshaped to "
"something non-sliced.")
semiscaled_shape = [1, ] * len(target_shape)
@@ -316,7 +315,7 @@ class Field(object):
for sp in self.domain:
if not sp.harmonic and not isinstance(sp, PowerSpace):
raise AttributeError(
- "ERROR: Field has a space in `domain` which is neither "
+ "Field has a space in `domain` which is neither "
"harmonic nor a PowerSpace.")
# check if the `spaces` input is valid
@@ -325,22 +324,22 @@ class Field(object):
if len(self.domain) == 1:
spaces = (0,)
else:
- raise ValueError(about._errors.cstring(
- "ERROR: Field has multiple spaces as domain "
- "but `spaces` is None."))
+ raise ValueError(
+ "Field has multiple spaces as domain "
+ "but `spaces` is None.")
if len(spaces) == 0:
- raise ValueError(about._errors.cstring(
- "ERROR: No space for synthesis specified."))
+ raise ValueError(
+ "No space for synthesis specified.")
elif len(spaces) > 1:
- raise ValueError(about._errors.cstring(
- "ERROR: Conversion of only one space at a time is allowed."))
+ raise ValueError(
+ "Conversion of only one space at a time is allowed.")
power_space_index = spaces[0]
power_domain = self.domain[power_space_index]
if not isinstance(power_domain, PowerSpace):
- raise ValueError(about._errors.cstring(
- "ERROR: A PowerSpace is needed for field synthetization."))
+ raise ValueError(
+ "A PowerSpace is needed for field synthetization.")
# create the result domain
result_domain = list(self.domain)
@@ -385,8 +384,8 @@ class Field(object):
result_list[0].domain_axes[power_space_index])
if pindex.distribution_strategy is not local_distribution_strategy:
- about.warnings.cprint(
- "WARNING: The distribution_stragey of pindex does not fit the "
+ logger.warn(
+ "The distribution_stragey of pindex does not fit the "
"slice_local distribution strategy of the synthesized field.")
# Now use numpy advanced indexing in order to put the entries of the
@@ -623,8 +622,8 @@ class Field(object):
for index in xrange(len(self.field_type)):
assert x.field_type[index] == self.field_type[index]
except AssertionError:
- raise ValueError(about._errors.cstring(
- "ERROR: domains are incompatible."))
+ raise ValueError(
+ "domains are incompatible.")
# extract the data from x and try to dot with this
x = x.get_val(copy=False)
@@ -788,8 +787,8 @@ class Field(object):
for index in xrange(len(self.field_type)):
assert other.field_type[index] == self.field_type[index]
except AssertionError:
- raise ValueError(about._errors.cstring(
- "ERROR: domains are incompatible."))
+ raise ValueError(
+ "domains are incompatible.")
other = other.get_val(copy=False)
self_val = self.get_val(copy=False)
diff --git a/nifty/logger.py b/nifty/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..b379750b2693ba40558f77d411c153ac671adda2
--- /dev/null
+++ b/nifty/logger.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8 -*-
+
+from keepers import MPILogger
+
+logger = MPILogger()
diff --git a/nifty/minimization/conjugate_gradient.py b/nifty/minimization/conjugate_gradient.py
index ee1f2f58cd62ec9c302712c5885e9d51b7c086b7..630c05999c0e2d22c7d8d61d32a92d213ae908af 100644
--- a/nifty/minimization/conjugate_gradient.py
+++ b/nifty/minimization/conjugate_gradient.py
@@ -1,319 +1,5 @@
# -*- coding: utf-8 -*-
-import numpy as np
-from nifty.config import notification, about
class ConjugateGradient(object):
- """
- .. _______ ____ __
- .. / _____/ / _ /
- .. / /____ __ / /_/ / __
- .. \______//__/ \____ //__/ class
- .. /______/
-
- NIFTY tool class for conjugate gradient
-
- This tool minimizes :math:`A x = b` with respect to `x` given `A` and
- `b` using a conjugate gradient; i.e., a step-by-step minimization
- relying on conjugated gradient directions. Further, `A` is assumed to
- be a positive definite and self-adjoint operator. The use of a
- preconditioner `W` that is roughly the inverse of `A` is optional.
- For details on the methodology refer to [#]_, for details on usage and
- output, see the notes below.
-
- Parameters
- ----------
- A : {operator, function}
- Operator `A` applicable to a field.
- b : field
- Resulting field of the operation `A(x)`.
- W : {operator, function}, *optional*
- Operator `W` that is a preconditioner on `A` and is applicable to a
- field (default: None).
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- reset : integer, *optional*
- Number of iterations after which to restart; i.e., forget previous
- conjugated directions (default: sqrt(b.dim)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- See Also
- --------
- scipy.sparse.linalg.cg
-
- Notes
- -----
- After initialization by `__init__`, the minimizer is started by calling
- it using `__call__`, which takes additional parameters. Notifications,
- if enabled, will state the iteration number, current step widths
- `alpha` and `beta`, the current relative residual `delta` that is
- compared to the tolerance, and the convergence level if changed.
- The minimizer will exit in three states: DEAD if alpha becomes
- infinite, QUIT if the maximum number of iterations is reached, or DONE
- if convergence is achieved. Returned will be the latest `x` and the
- latest convergence level, which can evaluate ``True`` for the exit
- states QUIT and DONE.
-
- References
- ----------
- .. [#] J. R. Shewchuk, 1994, `"An Introduction to the Conjugate
- Gradient Method Without the Agonizing Pain"
- `_
-
- Examples
- --------
- >>> b = field(point_space(2), val=[1, 9])
- >>> A = diagonal_operator(b.domain, diag=[4, 3])
- >>> x,convergence = conjugate_gradient(A, b, note=True)(tol=1E-4, clevel=3)
- iteration : 00000001 alpha = 3.3E-01 beta = 1.3E-03 delta = 3.6E-02
- iteration : 00000002 alpha = 2.5E-01 beta = 7.6E-04 delta = 1.0E-03
- iteration : 00000003 alpha = 3.3E-01 beta = 2.5E-04 delta = 1.6E-05 convergence level : 1
- iteration : 00000004 alpha = 2.5E-01 beta = 1.8E-06 delta = 2.1E-08 convergence level : 2
- iteration : 00000005 alpha = 2.5E-01 beta = 2.2E-03 delta = 1.0E-09 convergence level : 3
- ... done.
- >>> bool(convergence)
- True
- >>> x.val # yields 1/4 and 9/3
- array([ 0.25, 3. ])
-
- Attributes
- ----------
- A : {operator, function}
- Operator `A` applicable to a field.
- x : field
- Current field.
- b : field
- Resulting field of the operation `A(x)`.
- W : {operator, function}
- Operator `W` that is a preconditioner on `A` and is applicable to a
- field; can be ``None``.
- spam : function
- Callback function which is given the current `x` and iteration
- counter each iteration; can be ``None``.
- reset : integer
- Number of iterations after which to restart; i.e., forget previous
- conjugated directions (default: sqrt(b.dim)).
- note : notification
- Notification instance.
-
- """
- def __init__(self):
- pass
-
- def __call__(self, A, b, x0=None, W=None, spam=None, reset=None,
- note=False, **kwargs):
- """
- Runs the conjugate gradient minimization.
-
- Parameters
- ----------
- x0 : field, *optional*
- Starting guess for the minimization.
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by current relative
- residual (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 1).
- limii : integer, *optional*
- Maximum number of iterations performed (default: 10 * b.dim).
-
- Returns
- -------
- x : field
- Latest `x` of the minimization.
- convergence : integer
- Latest convergence level indicating whether the minimization
- has converged or not.
-
- """
- if hasattr(A, "__call__"):
- self.A = A
- else:
- raise AttributeError(about._errors.cstring(
- "ERROR: A must be callable!"))
-
- self.b = b
-
- if W is None or hasattr(W, "__call__"):
- self.W = W
- else:
- raise AttributeError(about._errors.cstring(
- "ERROR: W must be None or callable!"))
-
- self.spam = spam
-
- if reset is None:
- self.reset = max(2, int(np.sqrt(b.domain.dim)))
- else:
- self.reset = max(2, int(reset))
-
- self.note = notification(default=bool(note))
-
- self.x = self.b.copy_empty()
- self.x.set_val(new_val=0)
- self.x.set_val(new_val=x0)
-
- if self.W is None:
- return self._calc_without(**kwargs)
- else:
- return self._calc_with(**kwargs)
-
- def _calc_without(self, tol=1E-4, clevel=1, limii=None):
- clevel = int(clevel)
- if limii is None:
- limii = 10*self.b.domain.dim
- else:
- limii = int(limii)
-
- r = self.b-self.A(self.x)
- print ('r', r.val)
- d = self.b.copy_empty()
- d.set_val(new_val = r.get_val())
- gamma = r.dot(d)
- if gamma==0:
- return self.x, clevel+1
- delta_ = np.absolute(gamma)**(-0.5)
-
-
- convergence = 0
- ii = 1
- while(True):
-
- # print ('gamma', gamma)
- q = self.A(d)
- # print ('q', q.val)
- alpha = gamma/d.dot(q) ## positive definite
- if np.isfinite(alpha) == False:
- self.note.cprint(
- "\niteration : %08u alpha = NAN\n... dead."%ii)
- return self.x, 0
- self.x += d * alpha
- # print ('x', self.x.val)
- if np.signbit(np.real(alpha)) == True:
- about.warnings.cprint(
- "WARNING: positive definiteness of A violated.")
- r = self.b-self.A(self.x)
- elif (ii%self.reset) == 0:
- r = self.b-self.A(self.x)
- else:
- r -= q * alpha
- # print ('r', r.val)
- gamma_ = gamma
- gamma = r.dot(r)
- # print ('gamma', gamma)
- beta = max(0, gamma/gamma_) ## positive definite
- # print ('d*beta', beta, (d*beta).val)
- d = r + d*beta
- # print ('d', d.val)
- delta = delta_*np.absolute(gamma)**0.5
- self.note.cflush(
- "\niteration : %08u alpha = %3.1E beta = %3.1E delta = %3.1E"\
- %(ii,np.real(alpha),np.real(beta),np.real(delta)))
- if gamma == 0:
- convergence = clevel+1
- self.note.cprint(" convergence level : INF\n... done.")
- break
- elif np.absolute(delta) runs cg with preconditioner
-
- clevel = int(clevel)
- if(limii is None):
- limii = 10*self.b.domain.dim
- else:
- limii = int(limii)
- r = self.b-self.A(self.x)
-
- d = self.W(r)
- gamma = r.dot(d)
- if gamma==0:
- return self.x, clevel+1
- delta_ = np.absolute(gamma)**(-0.5)
-
- convergence = 0
- ii = 1
- while(True):
- q = self.A(d)
- alpha = gamma/d.dot(q) ## positive definite
- if np.isfinite(alpha) == False:
- self.note.cprint(
- "\niteration : %08u alpha = NAN\n... dead."%ii)
- return self.x, 0
- self.x += d * alpha ## update
- if np.signbit(np.real(alpha)) == True:
- about.warnings.cprint(
- "WARNING: positive definiteness of A violated.")
- r = self.b-self.A(self.x)
- elif (ii%self.reset) == 0:
- r = self.b-self.A(self.x)
- else:
- r -= q * alpha
- s = self.W(r)
- gamma_ = gamma
- gamma = r.dot(s)
- if np.signbit(np.real(gamma)) == True:
- about.warnings.cprint(
- "WARNING: positive definiteness of W violated.")
- beta = max(0, gamma/gamma_) ## positive definite
- d = s + d*beta ## conjugated gradient
-
- delta = delta_*np.absolute(gamma)**0.5
- self.note.cflush(
- "\niteration : %08u alpha = %3.1E beta = %3.1E delta = %3.1E"\
- %(ii,np.real(alpha),np.real(beta),np.real(delta)))
- if gamma==0:
- convergence = clevel+1
- self.note.cprint(" convergence level : INF\n... done.")
- break
- elif np.absolute(delta)"
-
-##=============================================================================
-
+ pass
diff --git a/nifty/nifty_cmaps.py b/nifty/nifty_cmaps.py
deleted file mode 100644
index 014367951b74e051b11024bf6a1acbc3dbba51ae..0000000000000000000000000000000000000000
--- a/nifty/nifty_cmaps.py
+++ /dev/null
@@ -1,279 +0,0 @@
-## NIFTY (Numerical Information Field Theory) has been developed at the
-## Max-Planck-Institute for Astrophysics.
-##
-## Copyright (C) 2013 Max-Planck-Society
-##
-## Author: Marco Selig
-## Project homepage:
-##
-## This program is free software: you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation, either version 3 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-## See the GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program. If not, see .
-
-"""
- .. __ ____ __
- .. /__/ / _/ / /_
- .. __ ___ __ / /_ / _/ __ __
- .. / _ | / / / _/ / / / / / /
- .. / / / / / / / / / /_ / /_/ /
- .. /__/ /__/ /__/ /__/ \___/ \___ / cmaps
- .. /______/
-
- This module provides the `ncmap` class whose static methods return color
- maps.
-
- The visualization of fields is useful for obvious reasons, and therefore
- some nice color maps are here to be found. Those are segmented color maps
- that can be used in many settings, including the native plotting method for
- fields. (Some of the color maps offered here are results from IFT
- publications, cf. references below.)
-
- Examples
- --------
- >>> from nifty.nifty_cmaps import *
- >>> f = field(rg_space([42, 42]), random="uni", vmin=-1)
- >>> f[21:] = f.smooth(sigma=1/42)[21:]
- >>> [f.plot(cmap=cc, vmin=-0.8, vmax=0.8) for cc in [None, ncmap.pm()]]
- ## two 2D plots open
-
-"""
-from __future__ import division
-from matplotlib.colors import LinearSegmentedColormap as cm
-
-
-##-----------------------------------------------------------------------------
-
-class ncmap(object):
- """
- .. __ ___ _______ __ ___ ____ ____ __ ______
- .. / _ | / ____/ / _ _ | / _ / / _ |
- .. / / / / / /____ / / / / / / / /_/ / / /_/ /
- .. /__/ /__/ \______/ /__/ /__/ /__/ \______| / ____/ class
- .. /__/
-
- NIFTY support class for color maps.
-
- This class provides several *nifty* color maps that are returned by
- its static methods. The `ncmap` class is not meant to be initialised.
-
- See Also
- --------
- matplotlib.colors.LinearSegmentedColormap
-
- Examples
- --------
- >>> f = field(rg_space([42, 42]), random="uni", vmin=-1)
- >>> f.plot(cmap=ncmap.pm(), vmin=-1, vmax=1)
- ## 2D plot opens
-
- """
- __init__ = None
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def he(ncolors=256):
- """
- Returns a color map often used in High Energy Astronomy.
-
- Parameters
- ----------
- ncolors : int, *optional*
- Number of color segments (default: 256).
-
- Returns
- -------
- cmap : matplotlib.colors.LinearSegmentedColormap instance
- Linear segmented color map.
-
- """
- segmentdata = {"red": [(0.000, 0.0, 0.0), (0.167, 0.0, 0.0),
- (0.333, 0.5, 0.5), (0.500, 1.0, 1.0),
- (0.667, 1.0, 1.0), (0.833, 1.0, 1.0),
- (1.000, 1.0, 1.0)],
- "green": [(0.000, 0.0, 0.0), (0.167, 0.0, 0.0),
- (0.333, 0.0, 0.0), (0.500, 0.0, 0.0),
- (0.667, 0.5, 0.5), (0.833, 1.0, 1.0),
- (1.000, 1.0, 1.0)],
- "blue": [(0.000, 0.0, 0.0), (0.167, 1.0, 1.0),
- (0.333, 0.5, 0.5), (0.500, 0.0, 0.0),
- (0.667, 0.0, 0.0), (0.833, 0.0, 0.0),
- (1.000, 1.0, 1.0)]}
-
- return cm("High Energy", segmentdata, N=int(ncolors), gamma=1.0)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def fm(ncolors=256):
- """
- Returns a color map used in reconstruction of the "Faraday Map".
-
- Parameters
- ----------
- ncolors : int, *optional*
- Number of color segments (default: 256).
-
- Returns
- -------
- cmap : matplotlib.colors.LinearSegmentedColormap instance
- Linear segmented color map.
-
- References
- ----------
- .. [#] N. Opermann et. al.,
- "An improved map of the Galactic Faraday sky",
- Astronomy & Astrophysics, Volume 542, id.A93, 06/2012;
- `arXiv:1111.6186 `_
-
- """
- segmentdata = {"red": [(0.000, 0.35, 0.35), (0.100, 0.40, 0.40),
- (0.200, 0.25, 0.25), (0.410, 0.47, 0.47),
- (0.500, 0.80, 0.80), (0.560, 0.96, 0.96),
- (0.590, 1.00, 1.00), (0.740, 0.80, 0.80),
- (0.800, 0.80, 0.80), (0.900, 0.50, 0.50),
- (1.000, 0.40, 0.40)],
- "green": [(0.000, 0.00, 0.00), (0.200, 0.00, 0.00),
- (0.362, 0.88, 0.88), (0.500, 1.00, 1.00),
- (0.638, 0.88, 0.88), (0.800, 0.25, 0.25),
- (0.900, 0.30, 0.30), (1.000, 0.20, 0.20)],
- "blue": [(0.000, 0.35, 0.35), (0.100, 0.40, 0.40),
- (0.200, 0.80, 0.80), (0.260, 0.80, 0.80),
- (0.410, 1.00, 1.00), (0.440, 0.96, 0.96),
- (0.500, 0.80, 0.80), (0.590, 0.47, 0.47),
- (0.800, 0.00, 0.00), (1.000, 0.00, 0.00)]}
-
- return cm("Faraday Map", segmentdata, N=int(ncolors), gamma=1.0)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def fu(ncolors=256):
- """
- Returns a color map used for the "Faraday Map Uncertainty".
-
- Parameters
- ----------
- ncolors : int, *optional*
- Number of color segments (default: 256).
-
- Returns
- -------
- cmap : matplotlib.colors.LinearSegmentedColormap instance
- Linear segmented color map.
-
- References
- ----------
- .. [#] N. Opermann et. al.,
- "An improved map of the Galactic Faraday sky",
- Astronomy & Astrophysics, Volume 542, id.A93, 06/2012;
- `arXiv:1111.6186 `_
-
- """
- segmentdata = {"red": [(0.000, 1.00, 1.00), (0.100, 0.80, 0.80),
- (0.200, 0.65, 0.65), (0.410, 0.60, 0.60),
- (0.500, 0.70, 0.70), (0.560, 0.96, 0.96),
- (0.590, 1.00, 1.00), (0.740, 0.80, 0.80),
- (0.800, 0.80, 0.80), (0.900, 0.50, 0.50),
- (1.000, 0.40, 0.40)],
- "green": [(0.000, 0.90, 0.90), (0.200, 0.65, 0.65),
- (0.362, 0.95, 0.95), (0.500, 1.00, 1.00),
- (0.638, 0.88, 0.88), (0.800, 0.25, 0.25),
- (0.900, 0.30, 0.30), (1.000, 0.20, 0.20)],
- "blue": [(0.000, 1.00, 1.00), (0.100, 0.80, 0.80),
- (0.200, 1.00, 1.00), (0.410, 1.00, 1.00),
- (0.440, 0.96, 0.96), (0.500, 0.70, 0.70),
- (0.590, 0.42, 0.42), (0.800, 0.00, 0.00),
- (1.000, 0.00, 0.00)]}
-
- return cm("Faraday Uncertainty", segmentdata, N=int(ncolors),
- gamma=1.0)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def pm(ncolors=256):
- """
- Returns a color map useful for a zero-centerd range of values.
-
- Parameters
- ----------
- ncolors : int, *optional*
- Number of color segments (default: 256).
-
- Returns
- -------
- cmap : matplotlib.colors.LinearSegmentedColormap instance
- Linear segmented color map.
-
- """
- segmentdata = {"red": [(0.0, 1.00, 1.00), (0.1, 0.96, 0.96),
- (0.2, 0.84, 0.84), (0.3, 0.64, 0.64),
- (0.4, 0.36, 0.36), (0.5, 0.00, 0.00),
- (0.6, 0.00, 0.00), (0.7, 0.00, 0.00),
- (0.8, 0.00, 0.00), (0.9, 0.00, 0.00),
- (1.0, 0.00, 0.00)],
- "green": [(0.0, 0.50, 0.50), (0.1, 0.32, 0.32),
- (0.2, 0.18, 0.18), (0.3, 0.08, 0.08),
- (0.4, 0.02, 0.02), (0.5, 0.00, 0.00),
- (0.6, 0.02, 0.02), (0.7, 0.08, 0.08),
- (0.8, 0.18, 0.18), (0.9, 0.32, 0.32),
- (1.0, 0.50, 0.50)],
- "blue": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00),
- (0.2, 0.00, 0.00), (0.3, 0.00, 0.00),
- (0.4, 0.00, 0.00), (0.5, 0.00, 0.00),
- (0.6, 0.36, 0.36), (0.7, 0.64, 0.64),
- (0.8, 0.84, 0.84), (0.9, 0.96, 0.96),
- (1.0, 1.00, 1.00)]}
-
- return cm("Plus Minus", segmentdata, N=int(ncolors), gamma=1.0)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def planck(ncolors=256):
- """
- Returns a color map similar to the one used for the "Planck CMB Map".
-
- Parameters
- ----------
- ncolors : int, *optional*
- Number of color segments (default: 256).
-
- Returns
- -------
- cmap : matplotlib.colors.LinearSegmentedColormap instance
- Linear segmented color map.
-
- """
- segmentdata = {"red": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00),
- (0.2, 0.00, 0.00), (0.3, 0.00, 0.00),
- (0.4, 0.00, 0.00), (0.5, 1.00, 1.00),
- (0.6, 1.00, 1.00), (0.7, 1.00, 1.00),
- (0.8, 0.83, 0.83), (0.9, 0.67, 0.67),
- (1.0, 0.50, 0.50)],
- "green": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00),
- (0.2, 0.00, 0.00), (0.3, 0.30, 0.30),
- (0.4, 0.70, 0.70), (0.5, 1.00, 1.00),
- (0.6, 0.70, 0.70), (0.7, 0.30, 0.30),
- (0.8, 0.00, 0.00), (0.9, 0.00, 0.00),
- (1.0, 0.00, 0.00)],
- "blue": [(0.0, 0.50, 0.50), (0.1, 0.67, 0.67),
- (0.2, 0.83, 0.83), (0.3, 1.00, 1.00),
- (0.4, 1.00, 1.00), (0.5, 1.00, 1.00),
- (0.6, 0.00, 0.00), (0.7, 0.00, 0.00),
- (0.8, 0.00, 0.00), (0.9, 0.00, 0.00),
- (1.0, 0.00, 0.00)]}
-
- return cm("Planck-like", segmentdata, N=int(ncolors), gamma=1.0)
-
-##-----------------------------------------------------------------------------
diff --git a/nifty/nifty_random.py b/nifty/nifty_random.py
deleted file mode 100644
index 3b6885067940cbf1f8f2e4b366d721ee990efefe..0000000000000000000000000000000000000000
--- a/nifty/nifty_random.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# NIFTY (Numerical Information Field Theory) has been developed at the
-# Max-Planck-Institute for Astrophysics.
-##
-# Copyright (C) 2013 Max-Planck-Society
-##
-# Author: Marco Selig
-# Project homepage:
-##
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-##
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-##
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-import numpy as np
-from nifty.config import about
-
-
-# -----------------------------------------------------------------------------
-
-
-class random(object):
- """
- .. __
- .. / /
- .. _____ ____ __ __ ___ ____/ / ______ __ ____ ___
- .. / __/ / _ / / _ | / _ / / _ | / _ _ |
- .. / / / /_/ / / / / / / /_/ / / /_/ / / / / / / /
- .. /__/ \______| /__/ /__/ \______| \______/ /__/ /__/ /__/ class
-
- NIFTY (static) class for pseudo random number generators.
-
- """
- __init__ = None
-
- # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def parse_arguments(domain, **kwargs):
- """
- Analyses the keyword arguments for supported or necessary ones.
-
- Parameters
- ----------
- domain : space
- Space wherein the random field values live.
- random : string, *optional*
- Specifies a certain distribution to be drawn from using a
- pseudo random number generator. Supported distributions are:
-
- - "pm1" (uniform distribution over {+1,-1} or {+1,+i,-1,-i}
- - "gau" (normal distribution with zero-mean and a given
- standard deviation or variance)
- - "syn" (synthesizes from a given power spectrum)
- - "uni" (uniform distribution over [vmin,vmax[)
-
- dev : {scalar, list, ndarray, field}, *optional*
- Standard deviation of the normal distribution if
- ``random == "gau"`` (default: None).
- var : {scalar, list, ndarray, field}, *optional*
- Variance of the normal distribution (outranks the standard
- deviation) if ``random == "gau"`` (default: None).
- spec : {scalar, list, array, field, function}, *optional*
- Power spectrum for ``random == "syn"`` (default: 1).
- size : integer, *optional*
- Number of irreducible bands for ``random == "syn"``
- (default: None).
- pindex : numpy.ndarray, *optional*
- Indexing array giving the power spectrum index of each band
- (default: None).
- kindex : numpy.ndarray, *optional*
- Scale of each irreducible band (default: None).
- vmax : {scalar, list, ndarray, field}, *optional*
- Upper limit of the uniform distribution if ``random == "uni"``
- (default: 1).
-
- Returns
- -------
- arg : list
- Ordered list of arguments (to be processed in
- ``get_random_values`` of the domain).
-
- Other Parameters
- ----------------
- codomain : nifty.space, *optional*
- A compatible codomain for power indexing (default: None).
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on
- logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to
- ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- Raises
- ------
- KeyError
- If the `random` key is not supporrted.
-
- """
- if "random" in kwargs:
- key = kwargs.get("random")
- else:
- return None
-
- if key == "pm1":
- return {'random': key}
-
- elif key == "gau":
- mean = kwargs.get('mean', None)
- std = kwargs.get('std', None)
- return {'random': key,
- 'mean': mean,
- 'std': std}
-
- elif key == "syn":
- pindex = kwargs.get('pindex', None)
- kindex = kwargs.get('kindex', None)
- size = kwargs.get('size', None)
- log = kwargs.get('log', 'default')
- nbin = kwargs.get('nbin', 'default')
- binbounds = kwargs.get('binbounds', 'default')
- spec = kwargs.get('spec', 1)
- codomain = kwargs.get('codomain', None)
-
- # check which domain should be taken for powerindexing
- if domain.check_codomain(codomain) and codomain.harmonic:
- harmonic_domain = codomain
- elif domain.harmonic:
- harmonic_domain = domain
- else:
- harmonic_domain = domain.get_codomain()
-
- # building kpack
- if pindex is not None and kindex is not None:
- pindex = domain.cast(pindex, dtype=np.dtype('int'))
- kpack = [pindex, kindex]
- else:
- kpack = None
-
- # simply put size and kindex into enforce_power
- # if one or both are None, enforce power will fix that
- spec = harmonic_domain.enforce_power(spec,
- size=size,
- kindex=kindex)
-
- return {'random': key,
- 'spec': spec,
- 'kpack': kpack,
- 'harmonic_domain': harmonic_domain,
- 'log': log,
- 'nbin': nbin,
- 'binbounds': binbounds}
-
- elif key == "uni":
- vmin = domain.dtype.type(kwargs.get('vmin', 0))
- vmax = domain.dtype.type(kwargs.get('vmax', 1))
- return {'random': key,
- 'vmin': vmin,
- 'vmax': vmax}
-
- else:
- raise KeyError(about._errors.cstring(
- "ERROR: unsupported random key '" + str(key) + "'."))
-
- # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def pm1(dtype=np.dtype('int'), shape=1):
- """
- Generates random field values according to an uniform distribution
- over {+1,-1} or {+1,+i,-1,-i}, respectively.
-
- Parameters
- ----------
- dtype : type, *optional*
- Data type of the field values (default: int).
- shape : {integer, tuple, list, ndarray}, *optional*
- Split up dimension of the space (default: 1).
-
- Returns
- -------
- x : ndarray
- Random field values (with correct dtype and shape).
-
- """
- size = reduce(lambda x, y: x * y, shape)
-
- if issubclass(dtype.type, np.complexfloating):
- x = np.array([1 + 0j, 0 + 1j, -1 + 0j, 0 - 1j],
- dtype=dtype)[np.random.randint(4,
- high=None,
- size=size)]
- else:
- x = 2 * np.random.randint(2, high=None, size=size) - 1
-
- return x.astype(dtype).reshape(shape)
-
- # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def gau(dtype=np.dtype('float64'), shape=(1,), mean=None, std=None):
- """
- Generates random field values according to a normal distribution.
-
- Parameters
- ----------
- dtype : type, *optional*
- Data type of the field values (default: float64).
- shape : {integer, tuple, list, ndarray}, *optional*
- Split up dimension of the space (default: 1).
- mean : {scalar, ndarray}, *optional*
- Mean of the normal distribution (default: 0).
- dev : {scalar, ndarray}, *optional*
- Standard deviation of the normal distribution (default: 1).
- var : {scalar, ndarray}, *optional*
- Variance of the normal distribution (outranks the standard
- deviation) (default: None).
-
- Returns
- -------
- x : ndarray
- Random field values (with correct dtype and shape).
-
- Raises
- ------
- ValueError
- If the array dimension of `mean`, `dev` or `var` mismatch with
- `shape`.
-
- """
- size = reduce(lambda x, y: x * y, shape)
-
- if issubclass(dtype.type, np.complexfloating):
- x = np.empty(size, dtype=dtype)
- x.real = np.random.normal(loc=0, scale=np.sqrt(0.5), size=size)
- x.imag = np.random.normal(loc=0, scale=np.sqrt(0.5), size=size)
- else:
- x = np.random.normal(loc=0, scale=1, size=size)
-
- if std is not None:
- if np.size(std) == 1:
- x *= np.abs(std)
- elif np.size(std) == size:
- x *= np.absolute(std).flatten()
- else:
- raise ValueError(about._errors.cstring(
- "ERROR: dimension mismatch ( " + str(np.size(std)) +
- " <> " + str(size) + " )."))
-
- if mean is not None:
- if np.size(mean) == 1:
- x += mean
- elif np.size(mean) == size:
- x += np.array(mean).flatten(order='C')
- else:
- raise ValueError(about._errors.cstring(
- "ERROR: dimension mismatch ( " + str(np.size(mean)) +
- " <> " + str(size) + " )."))
-
- return x.astype(dtype).reshape(shape)
-
- # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- @staticmethod
- def uni(dtype=np.dtype('float64'), shape=1, vmin=0, vmax=1):
- """
- Generates random field values according to an uniform distribution
- over [vmin,vmax[.
-
- Parameters
- ----------
- dtype : type, *optional*
- Data type of the field values (default: float64).
- shape : {integer, tuple, list, ndarray}, *optional*
- Split up dimension of the space (default: 1).
-
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution (default: 0).
- vmax : {scalar, list, ndarray, field}, *optional*
- Upper limit of the uniform distribution (default: 1).
-
- Returns
- -------
- x : ndarray
- Random field values (with correct dtype and shape).
-
- """
- size = reduce(lambda x, y: x * y, shape)
- if(np.size(vmin) > 1):
- vmin = np.array(vmin).flatten(order='C')
- if(np.size(vmax) > 1):
- vmax = np.array(vmax).flatten(order='C')
-
- if(dtype in [np.dtype('complex64'), np.dtype('complex128')]):
- x = np.empty(size, dtype=dtype, order='C')
- x.real = (vmax - vmin) * np.random.random(size=size) + vmin
- x.imag = (vmax - vmin) * np.random.random(size=size) + vmin
- elif(dtype in [np.dtype('int8'), np.dtype('int16'), np.dtype('int32'),
- np.dtype('int64')]):
- x = np.random.random_integers(
- min(vmin, vmax), high=max(vmin, vmax), size=size)
- else:
- x = (vmax - vmin) * np.random.random(size=size) + vmin
-
- return x.astype(dtype).reshape(shape, order='C')
-
- # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __repr__(self):
- return ""
-
-# -----------------------------------------------------------------------------
diff --git a/nifty/nifty_simple_math.py b/nifty/nifty_simple_math.py
index 785fcfb36ecfb4126bd4d326556a20dd134a0b06..3b3af9486001e4518ee061c84ed22f939269776a 100644
--- a/nifty/nifty_simple_math.py
+++ b/nifty/nifty_simple_math.py
@@ -22,8 +22,6 @@
##-----------------------------------------------------------------------------
import numpy as np
-#from nifty.field import Field
-from nifty.config import about
def vdot(x, y):
@@ -490,7 +488,7 @@ def log(x, base=None):
if np.all(base > 0):
return _math_helper(x, np.log)/np.log(base)
else:
- raise ValueError(about._errors.cstring("ERROR: invalid input basis."))
+ raise ValueError("invalid input basis.")
def conjugate(x):
diff --git a/nifty/nifty_utilities.py b/nifty/nifty_utilities.py
index ad30cebe678887110bb77f2662b3be009fb14e37..23a2f739cec3d31f67efe0d54c728337bb21a56e 100644
--- a/nifty/nifty_utilities.py
+++ b/nifty/nifty_utilities.py
@@ -3,8 +3,6 @@
import numpy as np
from itertools import product
-from nifty.config import about
-
def get_slice_list(shape, axes):
"""
@@ -32,14 +30,11 @@ def get_slice_list(shape, axes):
"""
if not shape:
- raise ValueError(about._errors.cstring("ERROR: shape cannot be None."))
+ raise ValueError("shape cannot be None.")
if axes:
if not all(axis < len(shape) for axis in axes):
- raise ValueError(
- about._errors.cstring("ERROR: \
- axes(axis) does not match shape.")
- )
+ raise ValueError("axes(axis) does not match shape.")
axes_select = [0 if x in axes else 1 for x, y in enumerate(shape)]
axes_iterables = \
[range(y) for x, y in enumerate(shape) if x not in axes]
@@ -234,7 +229,7 @@ def cast_axis_to_tuple(axis, length):
axis = (int(axis),)
else:
raise TypeError(
- "ERROR: Could not convert axis-input to tuple of ints")
+ "Could not convert axis-input to tuple of ints")
# shift negative indices to positive ones
axis = tuple(item if (item >= 0) else (item + length) for item in axis)
@@ -281,4 +276,4 @@ def get_default_codomain(domain):
# TODO: get the preferred transformation path from config
return LMGLTransformation.get_codomain(domain)
else:
- raise TypeError(about._errors.cstring('ERROR: unknown domain'))
+ raise TypeError('ERROR: unknown domain')
diff --git a/nifty/operators/__init__.py b/nifty/operators/__init__.py
index 1026adea32c0d15be477b0660efbfc0e518e8efb..2c981a319d9b58b0cb1fcba533909476301261a7 100644
--- a/nifty/operators/__init__.py
+++ b/nifty/operators/__init__.py
@@ -32,24 +32,3 @@ from smoothing_operator import SmoothingOperator
from fft_operator import *
from propagator_operator import PropagatorOperator
-
-from nifty_operators import operator,\
- diagonal_operator,\
- power_operator,\
- projection_operator,\
- vecvec_operator,\
- response_operator,\
- invertible_operator,\
- propagator_operator,\
- identity,\
- identity_operator
-
-
-from nifty_probing import prober,\
- trace_prober,\
- inverse_trace_prober,\
- diagonal_prober,\
- inverse_diagonal_prober
-
-from nifty_minimization import conjugate_gradient,\
- steepest_descent
diff --git a/nifty/operators/diagonal_operator/diagonal_operator.py b/nifty/operators/diagonal_operator/diagonal_operator.py
index 0b980fb57307ca94ef84da79fc86ca2254ac34b3..b282e9cb42365b688c9cd574317c3d8123a2e0ab 100644
--- a/nifty/operators/diagonal_operator/diagonal_operator.py
+++ b/nifty/operators/diagonal_operator/diagonal_operator.py
@@ -5,11 +5,13 @@ import numpy as np
from d2o import distributed_data_object,\
STRATEGIES as DISTRIBUTION_STRATEGIES
-from nifty.config import about,\
- nifty_configuration as gc
+from nifty.config import nifty_configuration as gc
from nifty.field import Field
from nifty.operators.endomorphic_operator import EndomorphicOperator
+import logging
+logger = logging.getLogger('NIFTy.DiagonalOperator')
+
class DiagonalOperator(EndomorphicOperator):
@@ -117,11 +119,11 @@ class DiagonalOperator(EndomorphicOperator):
elif isinstance(val, Field):
distribution_strategy = val.distribution_strategy
else:
- about.warnings.cprint("WARNING: Datamodel set to default!")
+ logger.info("Datamodel set to default!")
distribution_strategy = gc['default_distribution_strategy']
elif distribution_strategy not in DISTRIBUTION_STRATEGIES['all']:
- raise ValueError(about._errors.cstring(
- "ERROR: Invalid distribution_strategy!"))
+ raise ValueError(
+ "Invalid distribution_strategy!")
return distribution_strategy
def set_diagonal(self, diagonal, bare=False, copy=True):
diff --git a/nifty/operators/fft_operator/fft_operator.py b/nifty/operators/fft_operator/fft_operator.py
index 578a5895efbc937f5bbb3ba2acef9613ffe4b223..1de50271c18ea2c11945b7a75865b3949053c512 100644
--- a/nifty/operators/fft_operator/fft_operator.py
+++ b/nifty/operators/fft_operator/fft_operator.py
@@ -1,4 +1,4 @@
-from nifty.config import about
+
import nifty.nifty_utilities as utilities
from nifty.spaces import RGSpace,\
GLSpace,\
@@ -40,15 +40,15 @@ class FFTOperator(LinearOperator):
# Initialize domain and target
if len(self.domain) != 1:
- raise ValueError(about._errors.cstring(
+ raise ValueError(
'ERROR: TransformationOperator accepts only exactly one '
- 'space as input domain.'))
+ 'space as input domain.')
if self.field_type != ():
- raise ValueError(about._errors.cstring(
+ raise ValueError(
'ERROR: TransformationOperator field-type must be an '
'empty tuple.'
- ))
+ )
if target is None:
target = (self.get_default_codomain(self.domain[0]), )
@@ -59,16 +59,16 @@ class FFTOperator(LinearOperator):
forward_class = self.transformation_dictionary[
(self.domain[0].__class__, self.target[0].__class__)]
except KeyError:
- raise TypeError(about._errors.cstring(
- "ERROR: No forward transformation for domain-target pair "
- "found."))
+ raise TypeError(
+ "No forward transformation for domain-target pair "
+ "found.")
try:
backward_class = self.transformation_dictionary[
(self.target[0].__class__, self.domain[0].__class__)]
except KeyError:
- raise TypeError(about._errors.cstring(
- "ERROR: No backward transformation for domain-target pair "
- "found."))
+ raise TypeError(
+ "No backward transformation for domain-target pair "
+ "found.")
self._forward_transformation = TransformationCache.create(
forward_class, self.domain[0], self.target[0], module=module)
@@ -156,13 +156,13 @@ class FFTOperator(LinearOperator):
try:
codomain_class = cls.default_codomain_dictionary[domain_class]
except KeyError:
- raise TypeError(about._errors.cstring("ERROR: unknown domain"))
+ raise TypeError("unknown domain")
try:
transform_class = cls.transformation_dictionary[(domain_class,
codomain_class)]
except KeyError:
- raise TypeError(about._errors.cstring(
- "ERROR: No transformation for domain-codomain pair found."))
+ raise TypeError(
+ "No transformation for domain-codomain pair found.")
return transform_class.get_codomain(domain)
diff --git a/nifty/operators/fft_operator/transformations/gllmtransformation.py b/nifty/operators/fft_operator/transformations/gllmtransformation.py
index 934ca364d63247304d08960c1ae43024895a8b53..f08794ca24f23a277b748f51713edd96c4ac777c 100644
--- a/nifty/operators/fft_operator/transformations/gllmtransformation.py
+++ b/nifty/operators/fft_operator/transformations/gllmtransformation.py
@@ -1,10 +1,13 @@
import numpy as np
-from nifty.config import dependency_injector as gdi,\
- about
+
+from nifty.config import dependency_injector as gdi
from nifty import GLSpace, LMSpace
from slicing_transformation import SlicingTransformation
import lm_transformation_factory as ltf
+import logging
+logger = logging.getLogger('NIFTy.GLLMTransformation')
+
libsharp = gdi.get('libsharp_wrapper_gl')
@@ -14,8 +17,8 @@ class GLLMTransformation(SlicingTransformation):
def __init__(self, domain, codomain=None, module=None):
if 'libsharp_wrapper_gl' not in gdi:
- raise ImportError(about._errors.cstring(
- "The module libsharp is needed but not available."))
+ raise ImportError(
+ "The module libsharp is needed but not available.")
super(GLLMTransformation, self).__init__(domain, codomain, module)
@@ -39,8 +42,8 @@ class GLLMTransformation(SlicingTransformation):
"""
if not isinstance(domain, GLSpace):
- raise TypeError(about._errors.cstring(
- "ERROR: domain needs to be a GLSpace"))
+ raise TypeError(
+ "domain needs to be a GLSpace")
nlat = domain.nlat
lmax = nlat - 1
@@ -57,12 +60,12 @@ class GLLMTransformation(SlicingTransformation):
@staticmethod
def check_codomain(domain, codomain):
if not isinstance(domain, GLSpace):
- raise TypeError(about._errors.cstring(
- "ERROR: domain is not a GLSpace"))
+ raise TypeError(
+ "domain is not a GLSpace")
if not isinstance(codomain, LMSpace):
- raise TypeError(about._errors.cstring(
- "ERROR: codomain must be a LMSpace."))
+ raise TypeError(
+ "codomain must be a LMSpace.")
nlat = domain.nlat
nlon = domain.nlon
@@ -70,16 +73,16 @@ class GLLMTransformation(SlicingTransformation):
mmax = codomain.mmax
if lmax != mmax:
- raise ValueError(about._errors.cstring(
- 'ERROR: codomain has lmax != mmax.'))
+ raise ValueError(
+ 'ERROR: codomain has lmax != mmax.')
if lmax != nlat - 1:
- raise ValueError(about._errors.cstring(
- 'ERROR: codomain has lmax != nlat - 1.'))
+ raise ValueError(
+ 'ERROR: codomain has lmax != nlat - 1.')
if nlon != 2 * nlat - 1:
- raise ValueError(about._errors.cstring(
- 'ERROR: domain has nlon != 2 * nlat - 1.'))
+ raise ValueError(
+ 'ERROR: domain has nlon != 2 * nlat - 1.')
return None
@@ -118,8 +121,8 @@ class GLLMTransformation(SlicingTransformation):
elif inp.dtype == np.dtype('float64'):
return libsharp.map2alm(inp, **kwargs)
else:
- about.warnings.cprint("WARNING: performing dtype conversion for "
- "libsharp compatibility.")
+ logger.debug("performing dtype conversion for libsharp "
+ "compatibility.")
casted_inp = inp.astype(np.dtype('float64'), copy=False)
result = libsharp.map2alm(casted_inp, **kwargs)
- return result
\ No newline at end of file
+ return result
diff --git a/nifty/operators/fft_operator/transformations/hplmtransformation.py b/nifty/operators/fft_operator/transformations/hplmtransformation.py
index 9e972ed6b8804b0fc3cd5f3142df760f6fda7280..59acee863dd29c01ac3cf2268552fb044ba3ca5c 100644
--- a/nifty/operators/fft_operator/transformations/hplmtransformation.py
+++ b/nifty/operators/fft_operator/transformations/hplmtransformation.py
@@ -1,6 +1,6 @@
import numpy as np
-from nifty.config import dependency_injector as gdi,\
- about
+
+from nifty.config import dependency_injector as gdi
from nifty import HPSpace, LMSpace
from slicing_transformation import SlicingTransformation
@@ -15,8 +15,8 @@ class HPLMTransformation(SlicingTransformation):
def __init__(self, domain, codomain=None, module=None):
if 'healpy' not in gdi:
- raise ImportError(about._errors.cstring(
- "The module healpy is needed but not available"))
+ raise ImportError(
+ "The module healpy is needed but not available")
super(HPLMTransformation, self).__init__(domain, codomain, module)
@@ -40,8 +40,8 @@ class HPLMTransformation(SlicingTransformation):
"""
if not isinstance(domain, HPSpace):
- raise TypeError(about._errors.cstring(
- "ERROR: domain needs to be a HPSpace"))
+ raise TypeError(
+ "domain needs to be a HPSpace")
lmax = 3 * domain.nside - 1
@@ -52,19 +52,19 @@ class HPLMTransformation(SlicingTransformation):
@staticmethod
def check_codomain(domain, codomain):
if not isinstance(domain, HPSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: domain is not a HPSpace'))
+ raise TypeError(
+ 'ERROR: domain is not a HPSpace')
if not isinstance(codomain, LMSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: codomain must be a LMSpace.'))
+ raise TypeError(
+ 'ERROR: codomain must be a LMSpace.')
nside = domain.nside
lmax = codomain.lmax
if 3 * nside - 1 != lmax:
- raise ValueError(about._errors.cstring(
- 'ERROR: codomain has 3*nside-1 != lmax.'))
+ raise ValueError(
+ 'ERROR: codomain has 3*nside-1 != lmax.')
return None
diff --git a/nifty/operators/fft_operator/transformations/lmgltransformation.py b/nifty/operators/fft_operator/transformations/lmgltransformation.py
index 4fe809f65f4b2a18d52a9168e35b2a39e9591649..06950054c6974621268f6c34730dc5c847bae0a1 100644
--- a/nifty/operators/fft_operator/transformations/lmgltransformation.py
+++ b/nifty/operators/fft_operator/transformations/lmgltransformation.py
@@ -1,11 +1,13 @@
import numpy as np
-from nifty.config import dependency_injector as gdi,\
- about
+from nifty.config import dependency_injector as gdi
from nifty import GLSpace, LMSpace
from slicing_transformation import SlicingTransformation
import lm_transformation_factory as ltf
+import logging
+logger = logging.getLogger('NIFTy.LMGLTransformation')
+
libsharp = gdi.get('libsharp_wrapper_gl')
@@ -15,8 +17,8 @@ class LMGLTransformation(SlicingTransformation):
def __init__(self, domain, codomain=None, module=None):
if 'libsharp_wrapper_gl' not in gdi:
- raise ImportError(about._errors.cstring(
- "The module libsharp is needed but not available."))
+ raise ImportError(
+ "The module libsharp is needed but not available.")
super(LMGLTransformation, self).__init__(domain, codomain, module)
@@ -46,8 +48,8 @@ class LMGLTransformation(SlicingTransformation):
`arXiv:1303.4945 `_
"""
if not isinstance(domain, LMSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: domain needs to be a LMSpace'))
+ raise TypeError(
+ 'ERROR: domain needs to be a LMSpace')
if domain.dtype is np.dtype('float32'):
new_dtype = np.float32
@@ -64,12 +66,12 @@ class LMGLTransformation(SlicingTransformation):
@staticmethod
def check_codomain(domain, codomain):
if not isinstance(domain, LMSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: domain is not a LMSpace'))
+ raise TypeError(
+ 'ERROR: domain is not a LMSpace')
if not isinstance(codomain, GLSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: codomain must be a GLSpace.'))
+ raise TypeError(
+ 'ERROR: codomain must be a GLSpace.')
nlat = codomain.nlat
nlon = codomain.nlon
@@ -77,16 +79,16 @@ class LMGLTransformation(SlicingTransformation):
mmax = domain.mmax
if lmax != mmax:
- raise ValueError(about._errors.cstring(
- 'ERROR: domain has lmax != mmax.'))
+ raise ValueError(
+ 'ERROR: domain has lmax != mmax.')
if nlat != lmax + 1:
- raise ValueError(about._errors.cstring(
- 'ERROR: codomain has nlat != lmax + 1.'))
+ raise ValueError(
+ 'ERROR: codomain has nlat != lmax + 1.')
if nlon != 2 * lmax + 1:
- raise ValueError(about._errors.cstring(
- 'ERROR: domain has nlon != 2 * lmax + 1.'))
+ raise ValueError(
+ 'ERROR: domain has nlon != 2 * lmax + 1.')
return None
@@ -126,8 +128,8 @@ class LMGLTransformation(SlicingTransformation):
elif inp.dtype == np.dtype('complex128'):
return libsharp.alm2map(inp, **kwargs)
else:
- about.warnings.cprint("WARNING: performing dtype conversion for "
- "libsharp compatibility.")
+ logger.debug("performing dtype conversion for libsharp "
+ "compatibility.")
casted_inp = inp.astype(np.dtype('complex128'), copy=False)
result = libsharp.alm2map(casted_inp, **kwargs)
return result
diff --git a/nifty/operators/fft_operator/transformations/lmhptransformation.py b/nifty/operators/fft_operator/transformations/lmhptransformation.py
index 9302249d8fb969b4c7d8ff1bb0651530391651bc..d1a88db4d09541c71ede65edf7afeadbc848e761 100644
--- a/nifty/operators/fft_operator/transformations/lmhptransformation.py
+++ b/nifty/operators/fft_operator/transformations/lmhptransformation.py
@@ -1,6 +1,5 @@
import numpy as np
-from nifty.config import dependency_injector as gdi,\
- about
+from nifty.config import dependency_injector as gdi
from nifty import HPSpace, LMSpace
from slicing_transformation import SlicingTransformation
import lm_transformation_factory as ltf
@@ -14,8 +13,8 @@ class LMHPTransformation(SlicingTransformation):
def __init__(self, domain, codomain=None, module=None):
if gdi.get('healpy') is None:
- raise ImportError(about._errors.cstring(
- "The module libsharp is needed but not available."))
+ raise ImportError(
+ "The module libsharp is needed but not available.")
super(LMHPTransformation, self).__init__(domain, codomain, module)
@@ -44,8 +43,8 @@ class LMHPTransformation(SlicingTransformation):
Distributed on the Sphere", *ApJ* 622..759G.
"""
if not isinstance(domain, LMSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: domain needs to be a LMSpace'))
+ raise TypeError(
+ 'ERROR: domain needs to be a LMSpace')
nside = (domain.lmax + 1) // 3
result = HPSpace(nside=nside)
@@ -55,19 +54,19 @@ class LMHPTransformation(SlicingTransformation):
@staticmethod
def check_codomain(domain, codomain):
if not isinstance(domain, LMSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: domain is not a LMSpace'))
+ raise TypeError(
+ 'ERROR: domain is not a LMSpace')
if not isinstance(codomain, HPSpace):
- raise TypeError(about._errors.cstring(
- 'ERROR: codomain must be a HPSpace.'))
+ raise TypeError(
+ 'ERROR: codomain must be a HPSpace.')
nside = codomain.nside
lmax = domain.lmax
if 3*nside - 1 != lmax:
- raise ValueError(about._errors.cstring(
- 'ERROR: codomain has 3*nside -1 != lmax.'))
+ raise ValueError(
+ 'ERROR: codomain has 3*nside -1 != lmax.')
return None
diff --git a/nifty/operators/fft_operator/transformations/rg_transforms.py b/nifty/operators/fft_operator/transformations/rg_transforms.py
index b136e212247ca336d2243a07261f1940f90c6956..9944d6c49c72f67ad3bf45b4b1a62df1263ad9a0 100644
--- a/nifty/operators/fft_operator/transformations/rg_transforms.py
+++ b/nifty/operators/fft_operator/transformations/rg_transforms.py
@@ -2,9 +2,11 @@ import warnings
import numpy as np
from d2o import distributed_data_object, STRATEGIES
-from nifty.config import about, dependency_injector as gdi
+from nifty.config import dependency_injector as gdi
import nifty.nifty_utilities as utilities
-from nifty import nifty_configuration
+
+import logging
+logger = logging.getLogger('NIFTy.RGTransforms')
pyfftw = gdi.get('pyfftw')
@@ -131,7 +133,7 @@ class FFTW(Transform):
offset.reshape(offset.shape +
(1,) *
(np.array(
- args).ndim - 1)),
+ args).ndim - 1)),
1)),
(2,) * to_center.size)
# Cast the core to the smallest integers we can get
@@ -295,8 +297,7 @@ class FFTW(Transform):
def _repack_to_fftw_and_transform(self, val, axes, **kwargs):
temp_val = val.copy_empty(distribution_strategy='fftw')
- about.warnings.cprint('WARNING: Repacking d2o to fftw \
- distribution strategy')
+ logger.info("Repacking d2o to fftw distribution strategy")
temp_val.set_full_data(val, copy=False)
# Recursive call to transform
@@ -409,7 +410,7 @@ class FFTW(Transform):
# Check if the axes provided are valid given the shape
if axes is not None and \
not all(axis in range(len(val.shape)) for axis in axes):
- raise ValueError("ERROR: Provided axes does not match array shape")
+ raise ValueError("Provided axes does not match array shape")
# If the input is a numpy array we transform it locally
if not isinstance(val, distributed_data_object):
@@ -507,11 +508,6 @@ class FFTWLocalTransformInfo(FFTWTransformInfo):
def fftw_interface(self):
return self._fftw_interface
- @fftw_interface.setter
- def fftw_interface(self, interface):
- about.warnings.cprint('WARNING: FFTWLocalTransformInfo fftw_interface \
- cannot be modified')
-
class FFTWMPITransfromInfo(FFTWTransformInfo):
def __init__(self, domain, codomain, local_shape,
@@ -535,11 +531,6 @@ class FFTWMPITransfromInfo(FFTWTransformInfo):
def plan(self):
return self._plan
- @plan.setter
- def plan(self, plan):
- about.warnings.cprint('WARNING: FFTWMPITransfromInfo plan \
- cannot be modified')
-
class GFFT(Transform):
"""
@@ -584,7 +575,7 @@ class GFFT(Transform):
# Check if the axes provided are valid given the shape
if axes is not None and \
not all(axis in range(len(val.shape)) for axis in axes):
- raise ValueError("ERROR: Provided axes does not match array shape")
+ raise ValueError("Provided axes does not match array shape")
# GFFT doesn't accept d2o objects as input. Consolidate data from
# all nodes into numpy.ndarray before proceeding.
diff --git a/nifty/operators/fft_operator/transformations/rgrgtransformation.py b/nifty/operators/fft_operator/transformations/rgrgtransformation.py
index 0cbcaa8686fa954aa97645f0bff9c246169303fd..fe91bdb4131f8e2d95b46736628d91eba02146b0 100644
--- a/nifty/operators/fft_operator/transformations/rgrgtransformation.py
+++ b/nifty/operators/fft_operator/transformations/rgrgtransformation.py
@@ -1,9 +1,12 @@
import numpy as np
from transformation import Transformation
from rg_transforms import FFTW, GFFT
-from nifty.config import about, dependency_injector as gdi
+from nifty.config import dependency_injector as gdi
from nifty import RGSpace, nifty_configuration
+import logging
+logger = logging.getLogger('NIFTy.RGRGTransformation')
+
class RGRGTransformation(Transformation):
def __init__(self, domain, codomain=None, module=None):
@@ -101,8 +104,7 @@ class RGRGTransformation(Transformation):
if codomain.harmonic and not issubclass(codomain.dtype.type,
np.complexfloating):
- about.warnings.cprint(
- "WARNING: codomain is harmonic but dtype is real.")
+ logger.warn("codomain is harmonic but dtype is real.")
# Check if the distances match, i.e. dist' = 1 / (num * dist)
if not np.all(
diff --git a/nifty/operators/linear_operator/linear_operator.py b/nifty/operators/linear_operator/linear_operator.py
index 25a128498ac4e3ee0b4474e628ac47f438692935..07a7eeddc8653d7f19e29aa634566c1a7f2e75e4 100644
--- a/nifty/operators/linear_operator/linear_operator.py
+++ b/nifty/operators/linear_operator/linear_operator.py
@@ -2,7 +2,6 @@
import abc
-from nifty.config import about
from nifty.field import Field
from nifty.spaces import Space
from nifty.field_types import FieldType
@@ -25,9 +24,9 @@ class LinearOperator(object):
for d in domain:
if not isinstance(d, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: Given object contains something that is not a "
- "nifty.space."))
+ raise TypeError(
+ "Given object contains something that is not a "
+ "nifty.space.")
return domain
def _parse_field_type(self, field_type):
@@ -40,8 +39,8 @@ class LinearOperator(object):
for ft in field_type:
if not isinstance(ft, FieldType):
- raise TypeError(about._errors.cstring(
- "ERROR: Given object is not a nifty.FieldType."))
+ raise TypeError(
+ "Given object is not a nifty.FieldType.")
return field_type
@abc.abstractproperty
@@ -124,29 +123,29 @@ class LinearOperator(object):
return y
def _times(self, x, spaces, types):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'times'."))
+ raise NotImplementedError(
+ "no generic instance method 'times'.")
def _adjoint_times(self, x, spaces, types):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'adjoint_times'."))
+ raise NotImplementedError(
+ "no generic instance method 'adjoint_times'.")
def _inverse_times(self, x, spaces, types):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'inverse_times'."))
+ raise NotImplementedError(
+ "no generic instance method 'inverse_times'.")
def _adjoint_inverse_times(self, x, spaces, types):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'adjoint_inverse_times'."))
+ raise NotImplementedError(
+ "no generic instance method 'adjoint_inverse_times'.")
def _inverse_adjoint_times(self, x, spaces, types):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'inverse_adjoint_times'."))
+ raise NotImplementedError(
+ "no generic instance method 'inverse_adjoint_times'.")
def _check_input_compatibility(self, x, spaces, types, inverse=False):
if not isinstance(x, Field):
- raise ValueError(about._errors.cstring(
- "ERROR: supplied object is not a `nifty.Field`."))
+ raise ValueError(
+ "supplied object is not a `nifty.Field`.")
# sanitize the `spaces` and `types` input
spaces = utilities.cast_axis_to_tuple(spaces, len(x.domain))
@@ -169,27 +168,27 @@ class LinearOperator(object):
if spaces is None:
if self_domain != () and self_domain != x.domain:
- raise ValueError(about._errors.cstring(
- "ERROR: The operator's and and field's domains don't "
- "match."))
+ raise ValueError(
+ "The operator's and and field's domains don't "
+ "match.")
else:
for i, space_index in enumerate(spaces):
if x.domain[space_index] != self_domain[i]:
- raise ValueError(about._errors.cstring(
- "ERROR: The operator's and and field's domains don't "
- "match."))
+ raise ValueError(
+ "The operator's and and field's domains don't "
+ "match.")
if types is None:
if self_field_type != () and self_field_type != x.field_type:
- raise ValueError(about._errors.cstring(
- "ERROR: The operator's and and field's field_types don't "
- "match."))
+ raise ValueError(
+ "The operator's and and field's field_types don't "
+ "match.")
else:
for i, field_type_index in enumerate(types):
if x.field_type[field_type_index] != self_field_type[i]:
- raise ValueError(about._errors.cstring(
- "ERROR: The operator's and and field's field_type "
- "don't match."))
+ raise ValueError(
+ "The operator's and and field's field_type "
+ "don't match.")
return (spaces, types)
diff --git a/nifty/operators/nifty_minimization.py b/nifty/operators/nifty_minimization.py
deleted file mode 100644
index c2394f2e33c72851356d94deac419d050fdf9a99..0000000000000000000000000000000000000000
--- a/nifty/operators/nifty_minimization.py
+++ /dev/null
@@ -1,1285 +0,0 @@
-## NIFTY (Numerical Information Field Theory) has been developed at the
-## Max-Planck-Institute for Astrophysics.
-##
-## Copyright (C) 2013 Max-Planck-Society
-##
-## Author: Marco Selig
-## Project homepage:
-##
-## This program is free software: you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation, either version 3 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-## See the GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program. If not, see .
-
-"""
- .. __ ____ __
- .. /__/ / _/ / /_
- .. __ ___ __ / /_ / _/ __ __
- .. / _ | / / / _/ / / / / / /
- .. / / / / / / / / / /_ / /_/ /
- .. /__/ /__/ /__/ /__/ \___/ \___ / tools
- .. /______/
-
- This module extends NIFTY with a nifty set of tools including further
- operators, namely the :py:class:`invertible_operator` and the
- :py:class:`propagator_operator`, and minimization schemes, namely
- :py:class:`steepest_descent` and :py:class:`conjugate_gradient`. Those
- tools are supposed to support the user in solving information field
- theoretical problems (almost) without numerical pain.
-
-"""
-from __future__ import division
-#from nifty_core import *
-import numpy as np
-from nifty.config import notification, about
-from nifty.field import Field
-from nifty.nifty_simple_math import vdot
-
-
-#from nifty_core import space, \
-# field
-#from operators import operator, \
-# diagonal_operator
-
-
-
-##=============================================================================
-
-class conjugate_gradient(object):
- """
- .. _______ ____ __
- .. / _____/ / _ /
- .. / /____ __ / /_/ / __
- .. \______//__/ \____ //__/ class
- .. /______/
-
- NIFTY tool class for conjugate gradient
-
- This tool minimizes :math:`A x = b` with respect to `x` given `A` and
- `b` using a conjugate gradient; i.e., a step-by-step minimization
- relying on conjugated gradient directions. Further, `A` is assumed to
- be a positive definite and self-adjoint operator. The use of a
- preconditioner `W` that is roughly the inverse of `A` is optional.
- For details on the methodology refer to [#]_, for details on usage and
- output, see the notes below.
-
- Parameters
- ----------
- A : {operator, function}
- Operator `A` applicable to a field.
- b : field
- Resulting field of the operation `A(x)`.
- W : {operator, function}, *optional*
- Operator `W` that is a preconditioner on `A` and is applicable to a
- field (default: None).
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- reset : integer, *optional*
- Number of iterations after which to restart; i.e., forget previous
- conjugated directions (default: sqrt(b.dim)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- See Also
- --------
- scipy.sparse.linalg.cg
-
- Notes
- -----
- After initialization by `__init__`, the minimizer is started by calling
- it using `__call__`, which takes additional parameters. Notifications,
- if enabled, will state the iteration number, current step widths
- `alpha` and `beta`, the current relative residual `delta` that is
- compared to the tolerance, and the convergence level if changed.
- The minimizer will exit in three states: DEAD if alpha becomes
- infinite, QUIT if the maximum number of iterations is reached, or DONE
- if convergence is achieved. Returned will be the latest `x` and the
- latest convergence level, which can evaluate ``True`` for the exit
- states QUIT and DONE.
-
- References
- ----------
- .. [#] J. R. Shewchuk, 1994, `"An Introduction to the Conjugate
- Gradient Method Without the Agonizing Pain"
- `_
-
- Examples
- --------
- >>> b = field(point_space(2), val=[1, 9])
- >>> A = diagonal_operator(b.domain, diag=[4, 3])
- >>> x,convergence = conjugate_gradient(A, b, note=True)(tol=1E-4, clevel=3)
- iteration : 00000001 alpha = 3.3E-01 beta = 1.3E-03 delta = 3.6E-02
- iteration : 00000002 alpha = 2.5E-01 beta = 7.6E-04 delta = 1.0E-03
- iteration : 00000003 alpha = 3.3E-01 beta = 2.5E-04 delta = 1.6E-05 convergence level : 1
- iteration : 00000004 alpha = 2.5E-01 beta = 1.8E-06 delta = 2.1E-08 convergence level : 2
- iteration : 00000005 alpha = 2.5E-01 beta = 2.2E-03 delta = 1.0E-09 convergence level : 3
- ... done.
- >>> bool(convergence)
- True
- >>> x.val # yields 1/4 and 9/3
- array([ 0.25, 3. ])
-
- Attributes
- ----------
- A : {operator, function}
- Operator `A` applicable to a field.
- x : field
- Current field.
- b : field
- Resulting field of the operation `A(x)`.
- W : {operator, function}
- Operator `W` that is a preconditioner on `A` and is applicable to a
- field; can be ``None``.
- spam : function
- Callback function which is given the current `x` and iteration
- counter each iteration; can be ``None``.
- reset : integer
- Number of iterations after which to restart; i.e., forget previous
- conjugated directions (default: sqrt(b.dim)).
- note : notification
- Notification instance.
-
- """
- def __init__(self, A, b, W=None, spam=None, reset=None, note=False):
- """
- Initializes the conjugate_gradient and sets the attributes (except
- for `x`).
-
- Parameters
- ----------
- A : {operator, function}
- Operator `A` applicable to a field.
- b : field
- Resulting field of the operation `A(x)`.
- W : {operator, function}, *optional*
- Operator `W` that is a preconditioner on `A` and is applicable to a
- field (default: None).
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- reset : integer, *optional*
- Number of iterations after which to restart; i.e., forget previous
- conjugated directions (default: sqrt(b.dim)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- """
- if hasattr(A,"__call__") == True:
- self.A = A ## applies A
- else:
- raise AttributeError(about._errors.cstring(
- "ERROR: A must be callable!"))
-
- self.b = b
-
- if (W is None) or (hasattr(W,"__call__")==True):
- self.W = W ## applies W ~ A_inverse
- else:
- raise AttributeError(about._errors.cstring(
- "ERROR: W must be None or callable!"))
-
- self.spam = spam ## serves as callback given x and iteration number
-
- if reset is None: ## 2 < reset ~ sqrt(dim)
- self.reset = max(2,
- int(np.sqrt(b.domain.dim)))
- else:
- self.reset = max(2,
- int(reset))
-
- self.note = notification(default=bool(note))
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __call__(self, x0=None, **kwargs): ## > runs cg with/without preconditioner
- """
- Runs the conjugate gradient minimization.
-
- Parameters
- ----------
- x0 : field, *optional*
- Starting guess for the minimization.
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by current relative
- residual (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 1).
- limii : integer, *optional*
- Maximum number of iterations performed (default: 10 * b.dim).
-
- Returns
- -------
- x : field
- Latest `x` of the minimization.
- convergence : integer
- Latest convergence level indicating whether the minimization
- has converged or not.
-
- """
- self.x = self.b.copy_empty()
- self.x.set_val(new_val = 0)
- self.x.set_val(new_val = x0)
-
- if self.W is None:
- return self._calc_without(**kwargs)
- else:
- return self._calc_with(**kwargs)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
- def _calc_without(self, tol=1E-4, clevel=1, limii=None): ## > runs cg without preconditioner
- clevel = int(clevel)
- if limii is None:
- limii = 10*self.b.domain.dim
- else:
- limii = int(limii)
-
- r = self.b-self.A(self.x)
- print ('r', r.val)
- d = self.b.copy_empty()
- d.set_val(new_val = r.get_val())
- gamma = r.dot(d)
- if gamma==0:
- return self.x, clevel+1
- delta_ = np.absolute(gamma)**(-0.5)
-
-
- convergence = 0
- ii = 1
- while(True):
-
- # print ('gamma', gamma)
- q = self.A(d)
- # print ('q', q.val)
- alpha = gamma/d.dot(q) ## positive definite
- if np.isfinite(alpha) == False:
- self.note.cprint(
- "\niteration : %08u alpha = NAN\n... dead."%ii)
- return self.x, 0
- self.x += d * alpha
- # print ('x', self.x.val)
- if np.signbit(np.real(alpha)) == True:
- about.warnings.cprint(
- "WARNING: positive definiteness of A violated.")
- r = self.b-self.A(self.x)
- elif (ii%self.reset) == 0:
- r = self.b-self.A(self.x)
- else:
- r -= q * alpha
- # print ('r', r.val)
- gamma_ = gamma
- gamma = r.dot(r)
- # print ('gamma', gamma)
- beta = max(0, gamma/gamma_) ## positive definite
- # print ('d*beta', beta, (d*beta).val)
- d = r + d*beta
- # print ('d', d.val)
- delta = delta_*np.absolute(gamma)**0.5
- self.note.cflush(
- "\niteration : %08u alpha = %3.1E beta = %3.1E delta = %3.1E"\
- %(ii,np.real(alpha),np.real(beta),np.real(delta)))
- if gamma == 0:
- convergence = clevel+1
- self.note.cprint(" convergence level : INF\n... done.")
- break
- elif np.absolute(delta) runs cg with preconditioner
-
- clevel = int(clevel)
- if(limii is None):
- limii = 10*self.b.domain.dim
- else:
- limii = int(limii)
- r = self.b-self.A(self.x)
-
- d = self.W(r)
- gamma = r.dot(d)
- if gamma==0:
- return self.x, clevel+1
- delta_ = np.absolute(gamma)**(-0.5)
-
- convergence = 0
- ii = 1
- while(True):
- q = self.A(d)
- alpha = gamma/d.dot(q) ## positive definite
- if np.isfinite(alpha) == False:
- self.note.cprint(
- "\niteration : %08u alpha = NAN\n... dead."%ii)
- return self.x, 0
- self.x += d * alpha ## update
- if np.signbit(np.real(alpha)) == True:
- about.warnings.cprint(
- "WARNING: positive definiteness of A violated.")
- r = self.b-self.A(self.x)
- elif (ii%self.reset) == 0:
- r = self.b-self.A(self.x)
- else:
- r -= q * alpha
- s = self.W(r)
- gamma_ = gamma
- gamma = r.dot(s)
- if np.signbit(np.real(gamma)) == True:
- about.warnings.cprint(
- "WARNING: positive definiteness of W violated.")
- beta = max(0, gamma/gamma_) ## positive definite
- d = s + d*beta ## conjugated gradient
-
- delta = delta_*np.absolute(gamma)**0.5
- self.note.cflush(
- "\niteration : %08u alpha = %3.1E beta = %3.1E delta = %3.1E"\
- %(ii,np.real(alpha),np.real(beta),np.real(delta)))
- if gamma==0:
- convergence = clevel+1
- self.note.cprint(" convergence level : INF\n... done.")
- break
- elif np.absolute(delta)"
-
-##=============================================================================
-
-
-
-
-
-##=============================================================================
-
-class steepest_descent(object):
- """
- .. __
- .. / /
- .. _______ ____/ /
- .. / _____/ / _ /
- .. /_____ / __ / /_/ / __
- .. /_______//__/ \______|/__/ class
-
- NIFTY tool class for steepest descent minimization
-
- This tool minimizes a scalar energy-function by steepest descent using
- the functions gradient. Steps and step widths are choosen according to
- the Wolfe conditions [#]_. For details on usage and output, see the
- notes below.
-
- Parameters
- ----------
- eggs : function
- Given the current `x` it returns the tuple of energy and gradient.
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- a : {4-tuple}, *optional*
- Numbers obeying 0 < a1 ~ a2 < 1 ~ a3 < a4 that modify the step
- widths (default: (0.2,0.5,1,2)).
- c : {2-tuple}, *optional*
- Numbers obeying 0 < c1 < c2 < 1 that specify the Wolfe-conditions
- (default: (1E-4,0.9)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- See Also
- --------
- scipy.optimize.fmin_cg, scipy.optimize.fmin_ncg,
- scipy.optimize.fmin_l_bfgs_b
-
- Notes
- -----
- After initialization by `__init__`, the minimizer is started by calling
- it using `__call__`, which takes additional parameters. Notifications,
- if enabled, will state the iteration number, current step width `alpha`,
- current maximal change `delta` that is compared to the tolerance, and
- the convergence level if changed. The minimizer will exit in three
- states: DEAD if no step width above 1E-13 is accepted, QUIT if the
- maximum number of iterations is reached, or DONE if convergence is
- achieved. Returned will be the latest `x` and the latest convergence
- level, which can evaluate ``True`` for all exit states.
-
- References
- ----------
- .. [#] J. Nocedal and S. J. Wright, Springer 2006, "Numerical
- Optimization", ISBN: 978-0-387-30303-1 (print) / 978-0-387-40065-5
- `(online) `_
-
- Examples
- --------
- >>> def egg(x):
- ... E = 0.5*x.dot(x) # energy E(x) -- a two-dimensional parabola
- ... g = x # gradient
- ... return E,g
- >>> x = field(point_space(2), val=[1, 3])
- >>> x,convergence = steepest_descent(egg, note=True)(x0=x, tol=1E-4, clevel=3)
- iteration : 00000001 alpha = 1.0E+00 delta = 6.5E-01
- iteration : 00000002 alpha = 2.0E+00 delta = 1.4E-01
- iteration : 00000003 alpha = 1.6E-01 delta = 2.1E-03
- iteration : 00000004 alpha = 2.6E-03 delta = 3.0E-04
- iteration : 00000005 alpha = 2.0E-04 delta = 5.3E-05 convergence level : 1
- iteration : 00000006 alpha = 8.2E-05 delta = 4.4E-06 convergence level : 2
- iteration : 00000007 alpha = 6.6E-06 delta = 3.1E-06 convergence level : 3
- ... done.
- >>> bool(convergence)
- True
- >>> x.val # approximately zero
- array([ -6.87299426e-07 -2.06189828e-06])
-
- Attributes
- ----------
- x : field
- Current field.
- eggs : function
- Given the current `x` it returns the tuple of energy and gradient.
- spam : function
- Callback function which is given the current `x` and iteration
- counter each iteration; can be ``None``.
- a : {4-tuple}
- Numbers obeying 0 < a1 ~ a2 < 1 ~ a3 < a4 that modify the step
- widths (default: (0.2,0.5,1,2)).
- c : {2-tuple}
- Numbers obeying 0 < c1 < c2 < 1 that specify the Wolfe-conditions
- (default: (1E-4,0.9)).
- note : notification
- Notification instance.
-
- """
- def __init__(self,eggs,spam=None,a=(0.2,0.5,1,2),c=(1E-4,0.9),note=False):
- """
- Initializes the steepest_descent and sets the attributes (except
- for `x`).
-
- Parameters
- ----------
- eggs : function
- Given the current `x` it returns the tuple of energy and gradient.
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- a : {4-tuple}, *optional*
- Numbers obeying 0 < a1 ~ a2 < 1 ~ a3 < a4 that modify the step
- widths (default: (0.2,0.5,1,2)).
- c : {2-tuple}, *optional*
- Numbers obeying 0 < c1 < c2 < 1 that specify the Wolfe-conditions
- (default: (1E-4,0.9)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- """
- self.eggs = eggs ## returns energy and gradient
-
- self.spam = spam ## serves as callback given x and iteration number
- self.a = a ## 0 < a1 ~ a2 < 1 ~ a3 < a4
- self.c = c ## 0 < c1 < c2 < 1
- self.note = notification(default=bool(note))
-
- self._alpha = None ## last alpha
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __call__(self,x0,alpha=1,tol=1E-4,clevel=8,limii=100000):
- """
- Runs the steepest descent minimization.
-
- Parameters
- ----------
- x0 : field
- Starting guess for the minimization.
- alpha : scalar, *optional*
- Starting step width to be multiplied with normalized gradient
- (default: 1).
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by maximal change in
- `x` (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 8).
- limii : integer, *optional*
- Maximum number of iterations performed (default: 100,000).
-
- Returns
- -------
- x : field
- Latest `x` of the minimization.
- convergence : integer
- Latest convergence level indicating whether the minimization
- has converged or not.
-
- """
- if(not isinstance(x0,Field)):
- raise TypeError(about._errors.cstring("ERROR: invalid input."))
- self.x = x0
-
- ## check for exsisting alpha
- if(alpha is None):
- if(self._alpha is not None):
- alpha = self._alpha
- else:
- alpha = 1
-
- clevel = max(1,int(clevel))
- limii = int(limii)
-
- E,g = self.eggs(self.x) ## energy and gradient
- norm = g.norm() ## gradient norm
- if(norm==0):
- self.note.cprint("\niteration : 00000000 alpha = 0.0E+00 delta = 0.0E+00\n... done.")
- return self.x,clevel+2
-
- convergence = 0
- ii = 1
- while(True):
- x_,E,g,alpha,a = self._get_alpha(E,g,norm,alpha) ## "news",alpha,a
-
- if(alpha is None):
- self.note.cprint("\niteration : %08u alpha < 1.0E-13\n... dead."%ii)
- break
- else:
- delta = abs(g).max()*(alpha/norm)
- #delta = np.absolute(g.val).max()*(alpha/norm)
- self.note.cflush("\niteration : %08u alpha = %3.1E delta = %3.1E"%(ii,alpha,delta))
- ## update
- self.x = x_
-
- alpha *= a
-
- norm = g.norm() ## gradient norm
- if(delta==0):
- convergence = clevel+2
- self.note.cprint(" convergence level : %u\n... done."%convergence)
- break
- elif(delta determines the new alpha
- while(True):
- ## Wolfe conditions
- wolfe,x_,E_,g_,a = self._check_wolfe(E,g,norm,alpha)
- # wolfe,x_,E_,g_,a = self._check_strong_wolfe(E,g,norm,alpha)
- if(wolfe):
- return x_,E_,g_,alpha,a
- else:
- alpha *= a
- if(alpha<1E-13):
- return None,None,None,None,None
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def _check_wolfe(self,E,g,norm,alpha): ## > checks the Wolfe conditions
- x_ = self._get_x(g,norm,alpha)
- pg = norm
- E_,g_ = self.eggs(x_)
- if(E_>E+self.c[0]*alpha*pg):
- if(E_ checks the strong Wolfe conditions
-# x_ = self._get_x(g,norm,alpha)
-# pg = norm
-# E_,g_ = self.eggs(x_)
-# if(E_>E+self.c[0]*alpha*pg):
-# if(E_self.c[1]*np.absolute(pg)):
-# return True,x_,E_,g_,self.a[3]
-# return True,x_,E_,g_,self.a[2]
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def _get_x(self,g,norm,alpha): ## > updates x
- return self.x-g*(alpha/norm)
-
- ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
- def __repr__(self):
- return ""
-
-##=============================================================================
-
-
-##=============================================================================
-
-class quasi_newton_minimizer(object):
- """
- .. __
- .. / /
- .. _______ ____/ /
- .. / _____/ / _ /
- .. /_____ / __ / /_/ / __
- .. /_______//__/ \______|/__/ class
-
- NIFTY tool class for steepest descent minimization
-
- This tool minimizes a scalar energy-function by steepest descent using
- the functions gradient. Steps and step widths are choosen according to
- the Wolfe conditions [#]_. For details on usage and output, see the
- notes below.
-
- Parameters
- ----------
- eggs : function
- Given the current `x` it returns the tuple of energy and gradient.
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- a : {4-tuple}, *optional*
- Numbers obeying 0 < a1 ~ a2 < 1 ~ a3 < a4 that modify the step
- widths (default: (0.2,0.5,1,2)).
- c : {2-tuple}, *optional*
- Numbers obeying 0 < c1 < c2 < 1 that specify the Wolfe-conditions
- (default: (1E-4,0.9)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- See Also
- --------
- scipy.optimize.fmin_cg, scipy.optimize.fmin_ncg,
- scipy.optimize.fmin_l_bfgs_b
-
- Notes
- -----
- After initialization by `__init__`, the minimizer is started by calling
- it using `__call__`, which takes additional parameters. Notifications,
- if enabled, will state the iteration number, current step width `alpha`,
- current maximal change `delta` that is compared to the tolerance, and
- the convergence level if changed. The minimizer will exit in three
- states: DEAD if no step width above 1E-13 is accepted, QUIT if the
- maximum number of iterations is reached, or DONE if convergence is
- achieved. Returned will be the latest `x` and the latest convergence
- level, which can evaluate ``True`` for all exit states.
-
- References
- ----------
- .. [#] J. Nocedal and S. J. Wright, Springer 2006, "Numerical
- Optimization", ISBN: 978-0-387-30303-1 (print) / 978-0-387-40065-5
- `(online) `_
-
- Examples
- --------
- >>> def egg(x):
- ... E = 0.5*x.dot(x) # energy E(x) -- a two-dimensional parabola
- ... g = x # gradient
- ... return E,g
- >>> x = field(point_space(2), val=[1, 3])
- >>> x,convergence = steepest_descent(egg, note=True)(x0=x, tol=1E-4, clevel=3)
- iteration : 00000001 alpha = 1.0E+00 delta = 6.5E-01
- iteration : 00000002 alpha = 2.0E+00 delta = 1.4E-01
- iteration : 00000003 alpha = 1.6E-01 delta = 2.1E-03
- iteration : 00000004 alpha = 2.6E-03 delta = 3.0E-04
- iteration : 00000005 alpha = 2.0E-04 delta = 5.3E-05 convergence level : 1
- iteration : 00000006 alpha = 8.2E-05 delta = 4.4E-06 convergence level : 2
- iteration : 00000007 alpha = 6.6E-06 delta = 3.1E-06 convergence level : 3
- ... done.
- >>> bool(convergence)
- True
- >>> x.val # approximately zero
- array([ -6.87299426e-07 -2.06189828e-06])
-
- Attributes
- ----------
- x : field
- Current field.
- eggs : function
- Given the current `x` it returns the tuple of energy and gradient.
- spam : function
- Callback function which is given the current `x` and iteration
- counter each iteration; can be ``None``.
- a : {4-tuple}
- Numbers obeying 0 < a1 ~ a2 < 1 ~ a3 < a4 that modify the step
- widths (default: (0.2,0.5,1,2)).
- c : {2-tuple}
- Numbers obeying 0 < c1 < c2 < 1 that specify the Wolfe-conditions
- (default: (1E-4,0.9)).
- note : notification
- Notification instance.
-
- """
- def __init__(self, f, fprime, f_args=(), callback=None,
- c1=1E-4, c2=0.9, note=False):
- """
- Initializes the steepest_descent and sets the attributes (except
- for `x`).
-
- Parameters
- ----------
- eggs : function
- Given the current `x` it returns the tuple of energy and gradient.
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- a : {4-tuple}, *optional*
- Numbers obeying 0 < a1 ~ a2 < 1 ~ a3 < a4 that modify the step
- widths (default: (0.2,0.5,1,2)).
- c : {2-tuple}, *optional*
- Numbers obeying 0 < c1 < c2 < 1 that specify the Wolfe-conditions
- (default: (1E-4,0.9)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
-
- """
- assert(callable(f))
- assert(callable(fprime))
- self.f = f
- self.fprime = fprime
-
- self.callback = callback
-
- self.line_searcher = line_search_strong_wolfe(f=self.f,
- fprime=self.fprime,
- f_args=f_args,
- c1=c1, c2=c2)
-
- self.note = notification(default=bool(note))
-
- self.memory = {}
-
- def __call__(self, x0, tol=1E-4, clevel=8, limii=100000):
- """
- Runs the steepest descent minimization.
-
- Parameters
- ----------
- x0 : field
- Starting guess for the minimization.
- alpha : scalar, *optional*
- Starting step width to be multiplied with normalized gradient
- (default: 1).
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by maximal change in
- `x` (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 8).
- limii : integer, *optional*
- Maximum number of iterations performed (default: 100,000).
-
- Returns
- -------
- x : field
- Latest `x` of the minimization.
- convergence : integer
- Latest convergence level indicating whether the minimization
- has converged or not.
-
- """
- if not isinstance(x0, Field):
- raise TypeError(about._errors.cstring("ERROR: invalid input."))
- self.x = x0
-
- clevel = max(1, int(clevel))
- limii = int(limii)
-
- convergence = 0
- f_k_minus_1 = None
- f_k = self.f(self.x)
- step_length = 0
- for i in xrange(limii):
- if self.callback is not None:
- try:
- self.callback(self.x, f_k, i)
- except StopIteration:
- self.note.cprint("\nCallback function stopped minization.")
- break
-
- # compute the the gradient for the current x
- gradient = self.fprime(self.x)
- gradient_norm = gradient.norm()
-
- # check if x is at a flat point
- if gradient_norm == 0:
- self.note.cprint(
- "\niteration : 00000000 step_length = 0.0E+00 " +
- "delta = 0.0E+00\n... done.")
- convergence = clevel+2
- break
-
- descend_direction = self._get_descend_direction(gradient,
- gradient_norm)
-
- # compute the step length, which minimizes f_k along the
- # search direction = the gradient
- self.line_searcher.set_coordinates(xk=self.x,
- pk=descend_direction,
- f_k=f_k,
- fprime_k=gradient,
- f_k_minus_1=f_k_minus_1)
- f_k_minus_1 = f_k
- step_length, f_k = self.line_searcher.perform_line_search()
-
- if step_length < 1E-13:
- self.note.cprint(
- "\niteration : %08u step_length < 1.0E-13\n... dead." % i)
- break
-
- # update x
- self.x += descend_direction*step_length
-
- # check convergence
- delta = abs(gradient).max() * (step_length/gradient_norm)
- self.note.cflush(
- "\niteration : %08u step_length = %3.1E delta = %3.1E"
- % (i, step_length, delta))
- if delta == 0:
- convergence = clevel + 2
- self.note.cprint(" convergence level : %u\n... done." %
- convergence)
- break
- elif delta < tol:
- convergence += 1
- self.note.cflush(" convergence level : %u" % convergence)
- if convergence == clevel:
- convergence += int(i == clevel)
- self.note.cprint("\n... done.")
- break
- else:
- convergence = max(0, convergence-1)
-
- else:
- self.note.cprint("\n... quit.")
-
- return self.x, convergence
-
- def _get_descend_direction(self, gradient, gradient_norm):
- raise NotImplementedError
-
- def __repr__(self):
- return ""
-
-
-class steepest_descent_new(quasi_newton_minimizer):
- def _get_descend_direction(self, gradient, gradient_norm):
- return gradient/(-gradient_norm)
-
-class lbfgs(quasi_newton_minimizer):
- def __init__(self, *args, **kwargs):
- super(lbfgs, self).__init__(*args, **kwargs)
- # self.memory...
-
-
- def _get_descend_direction(self, gradient, gradient_norm):
- pass
-
-
-
-# -----------------------------------------------------------------------------
-# Pure-Python Wolfe line and scalar searches
-# Based on scipy implementation
-# -----------------------------------------------------------------------------
-
-class line_search_strong_wolfe(object):
- """
- Class for finding a step size that satisfies the strong Wolfe conditions.
- """
-
- def __init__(self, f, fprime, f_args=(), c1=1e-4, c2=0.9,
- max_step_size=50):
-
- """
- Parameters
- ----------
-
- f : callable f(x, *args)
- Objective function.
-
- fprime : callable f'(x, *args)
- Objective functions gradient.
-
- f_args : tuple (optional)
- Additional arguments passed to objective function and its
- derivation.
-
- c1 : float (optional)
- Parameter for Armijo condition rule.
-
- c2 : float (optional)
- Parameter for curvature condition rule.
-
- max_step_size : float (optional)
- Maximum step size
- """
-
- assert(callable(f))
- assert(callable(fprime))
- self.c1 = np.float(c1)
- self.c2 = np.float(c2)
- self.max_step_size = max_step_size
- self.max_zoom_iterations = 100
-
- self.f = f
- self.fprime = fprime
- self.f_args = f_args
-
- self.xk = None
- self.pk = None
-
- self.f_k = None
- self.f_k_minus_1 = None
- self.fprime_k = None
-
- self._last_alpha_star = 1.
-
- def set_coordinates(self, xk, pk, f_k=None, fprime_k=None,
- f_k_minus_1=None):
- """
- Set the coordinates for a new line search.
-
- Parameters
- ----------
- xk : ndarray, d2o, field
- Starting point.
-
- pk : ndarray, d2o, field
- Unit vector in search direction.
-
- f_k : float (optional)
- Function value f(x_k).
-
- fprime_k : ndarray, d2o, field (optional)
- Function value fprime(xk).
-
- f_k_minus_1 : None, float (optional)
- Function value f(x_k-1).
-
- """
-
- self.xk = xk
- self.pk = pk
-
- if f_k is None:
- self.f_k = self.f(xk)
- else:
- self.f_k = f_k
-
- if fprime_k is None:
- self.fprime_k = self.fprime(xk)
- else:
- self.fprime_k = fprime_k
-
- self.f_k_minus_1 = f_k_minus_1
-
- def _phi(self, alpha):
- if alpha == 0:
- value = self.f_k
- else:
- value = self.f(self.xk + self.pk*alpha, *self.f_args)
- return value
-
- def _phiprime(self, alpha):
- if alpha == 0:
- gradient = self.fprime_k
- else:
- gradient = self.fprime(self.xk + self.pk*alpha, *self.f_args)
- return vdot(gradient, self.pk)
-
- def perform_line_search(self, c1=None, c2=None, max_step_size=None,
- max_iterations=10):
- if c1 is None:
- c1 = self.c1
- if c2 is None:
- c2 = self.c2
- if max_step_size is None:
- max_step_size = self.max_step_size
-
- # initialize the zero phis
- old_phi_0 = self.f_k_minus_1
- phi_0 = self._phi(0.)
- phiprime_0 = self._phiprime(0.)
-
- if phiprime_0 == 0:
- about.warnings.cprint(
- "WARNING: Flat gradient in search direction.")
- return 0., 0.
-
- # set alphas
- alpha0 = 0.
- if old_phi_0 is not None and phiprime_0 != 0:
- alpha1 = min(1.0, 1.01*2*(phi_0 - old_phi_0)/phiprime_0)
- if alpha1 < 0:
- alpha1 = 1.0
- else:
- alpha1 = 1.0
-
-# alpha1 = 1.
-# alpha1 = 1.01*self._last_alpha_star
-
-
- # give the alpha0 phis the right init value
- phi_alpha0 = phi_0
- phiprime_alpha0 = phiprime_0
-
- # start the minimization loop
- for i in xrange(max_iterations):
- phi_alpha1 = self._phi(alpha1)
-
- if alpha1 == 0:
- about.warnings.cprint("WARNING: Increment size became 0.")
- alpha_star = 0.
- phi_star = phi_0
- break
-
- if (phi_alpha1 > phi_0 + c1*alpha1*phiprime_0) or \
- ((phi_alpha1 >= phi_alpha0) and (i > 1)):
- (alpha_star, phi_star) = self._zoom(alpha0, alpha1,
- phi_0, phiprime_0,
- phi_alpha0, phiprime_alpha0,
- phi_alpha1,
- c1, c2)
- break
-
- phiprime_alpha1 = self._phiprime(alpha1)
- if abs(phiprime_alpha1) <= -c2*phiprime_0:
- alpha_star = alpha1
- phi_star = phi_alpha1
- break
-
- if phiprime_alpha1 >= 0:
- (alpha_star, phi_star) = self._zoom(alpha1, alpha0,
- phi_0, phiprime_0,
- phi_alpha1, phiprime_alpha1,
- phi_alpha0,
- c1, c2)
- break
-
- # update alphas
- alpha0, alpha1 = alpha1, min(2*alpha1, max_step_size)
- phi_alpha0 = phi_alpha1
- phiprime_alpha0 = phiprime_alpha1
- # phi_alpha1 = self._phi(alpha1)
-
- else:
- # max_iterations was reached
- alpha_star = alpha1
- phi_star = phi_alpha1
- about.warnings.cprint(
- "WARNING: The line search algorithm did not converge.")
-
- self._last_alpha_star = alpha_star
- return alpha_star, phi_star
-
- def _zoom(self, alpha_lo, alpha_hi, phi_0, phiprime_0,
- phi_lo, phiprime_lo, phi_hi, c1, c2):
-
- max_iterations = self.max_zoom_iterations
- # define the cubic and quadratic interpolant checks
- cubic_delta = 0.2 # cubic
- quad_delta = 0.1 # quadratic
-
- # initialize the most recent versions (j-1) of phi and alpha
- alpha_recent = 0
- phi_recent = phi_0
-
- for i in xrange(max_iterations):
- delta_alpha = alpha_hi - alpha_lo
- if delta_alpha < 0:
- a, b = alpha_hi, alpha_lo
- else:
- a, b = alpha_lo, alpha_hi
-
- # Try cubic interpolation
- if i > 0:
- cubic_check = cubic_delta * delta_alpha
- alpha_j = self._cubicmin(alpha_lo, phi_lo, phiprime_lo,
- alpha_hi, phi_hi,
- alpha_recent, phi_recent)
- # If cubic was not successful or not available, try quadratic
- if (i == 0) or (alpha_j is None) or (alpha_j > b - cubic_check) or\
- (alpha_j < a + cubic_check):
- quad_check = quad_delta * delta_alpha
- alpha_j = self._quadmin(alpha_lo, phi_lo, phiprime_lo,
- alpha_hi, phi_hi)
- # If quadratic was not successfull, try bisection
- if (alpha_j is None) or (alpha_j > b - quad_check) or \
- (alpha_j < a + quad_check):
- alpha_j = alpha_lo + 0.5*delta_alpha
-
- # Check if the current value of alpha_j is already sufficient
- phi_alphaj = self._phi(alpha_j)
- # If the first Wolfe condition is not met replace alpha_hi
- # by alpha_j
- if (phi_alphaj > phi_0 + c1*alpha_j*phiprime_0) or\
- (phi_alphaj >= phi_lo):
- alpha_recent, phi_recent = alpha_hi, phi_hi
- alpha_hi, phi_hi = alpha_j, phi_alphaj
- else:
- phiprime_alphaj = self._phiprime(alpha_j)
- # If the second Wolfe condition is met, return the result
- if abs(phiprime_alphaj) <= -c2*phiprime_0:
- alpha_star = alpha_j
- phi_star = phi_alphaj
- break
- # If not, check the sign of the slope
- if phiprime_alphaj*delta_alpha >= 0:
- alpha_recent, phi_recent = alpha_hi, phi_hi
- alpha_hi, phi_hi = alpha_lo, phi_lo
- else:
- alpha_recent, phi_recent = alpha_lo, phi_lo
- # Replace alpha_lo by alpha_j
- (alpha_lo, phi_lo, phiprime_lo) = (alpha_j, phi_alphaj,
- phiprime_alphaj)
-
- else:
- alpha_star, phi_star = alpha_j, phi_alphaj
- about.warnings.cprint(
- "WARNING: The line search algorithm (zoom) did not converge.")
-
- return alpha_star, phi_star
-
- def _cubicmin(self, a, fa, fpa, b, fb, c, fc):
- """
- Finds the minimizer for a cubic polynomial that goes through the
- points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
- If no minimizer can be found return None
- """
- # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
-
- with np.errstate(divide='raise', over='raise', invalid='raise'):
- try:
- C = fpa
- db = b - a
- dc = c - a
- denom = (db * dc) ** 2 * (db - dc)
- d1 = np.empty((2, 2))
- d1[0, 0] = dc ** 2
- d1[0, 1] = -db ** 2
- d1[1, 0] = -dc ** 3
- d1[1, 1] = db ** 3
- [A, B] = np.dot(d1, np.asarray([fb - fa - C * db,
- fc - fa - C * dc]).flatten())
- A /= denom
- B /= denom
- radical = B * B - 3 * A * C
- xmin = a + (-B + np.sqrt(radical)) / (3 * A)
- except ArithmeticError:
- return None
- if not np.isfinite(xmin):
- return None
- return xmin
-
- def _quadmin(self, a, fa, fpa, b, fb):
- """
- Finds the minimizer for a quadratic polynomial that goes through
- the points (a,fa), (b,fb) with derivative at a of fpa,
- """
- # f(x) = B*(x-a)^2 + C*(x-a) + D
- with np.errstate(divide='raise', over='raise', invalid='raise'):
- try:
- D = fa
- C = fpa
- db = b - a * 1.0
- B = (fb - D - C * db) / (db * db)
- xmin = a - C / (2.0 * B)
- except ArithmeticError:
- return None
- if not np.isfinite(xmin):
- return None
- return xmin
-
-
-class cyclic_storage(object):
- pass
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/nifty/operators/nifty_operators.py b/nifty/operators/nifty_operators.py
deleted file mode 100644
index a89e38cad4774aed7a41c069c59c1734a3a8bba4..0000000000000000000000000000000000000000
--- a/nifty/operators/nifty_operators.py
+++ /dev/null
@@ -1,3713 +0,0 @@
-# NIFTY (Numerical Information Field Theory) has been developed at the
-# Max-Planck-Institute for Astrophysics.
-#
-# Copyright (C) 2015 Max-Planck-Society
-#
-# Author: Marco Selig
-# Project homepage:
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-from __future__ import division
-import numpy as np
-from nifty.config import about
-from nifty.field import Field
-from nifty.spaces.space import Space
-
-from nifty_minimization import conjugate_gradient
-from nifty_probing import trace_prober,\
- inverse_trace_prober,\
- diagonal_prober,\
- inverse_diagonal_prober
-import nifty.nifty_utilities as utilities
-import nifty.nifty_simple_math as nifty_simple_math
-
-
-# =============================================================================
-
-class operator(object):
- """
- .. __
- .. / /_
- .. ______ ______ _______ _____ ____ __ / _/ ______ _____
- .. / _ | / _ | / __ / / __/ / _ / / / / _ | / __/
- .. / /_/ / / /_/ / / /____/ / / / /_/ / / /_ / /_/ / / /
- .. \______/ / ____/ \______/ /__/ \______| \___/ \______/ /__/ class
- .. /__/
-
- NIFTY base class for (linear) operators
-
- The base NIFTY operator class is an abstract class from which other
- specific operator subclasses, including those preimplemented in NIFTY
- (e.g. the diagonal operator class) must be derived.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- sym : bool, *optional*
- Indicates whether the operator is self-adjoint or not
- (default: False)
- uni : bool, *optional*
- Indicates whether the operator is unitary or not
- (default: False)
- imp : bool, *optional*
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not (default: False)
- target : space, *optional*
- The space wherein the operator output lives (default: domain)
- para : {single object, list of objects}, *optional*
- This is a freeform list of parameters that derivatives of the
- operator class can use. Not used in the base operators.
- (default: None)
-
- See Also
- --------
- diagonal_operator : An operator class for handling purely diagonal
- operators.
- power_operator : Similar to diagonal_operator but with handy features
- for dealing with diagonal operators whose diagonal
- consists of a power spectrum.
- vecvec_operator : Operators constructed from the outer product of two
- fields
- response_operator : Implements a modeled instrument response which
- translates a signal into data space.
- projection_operator : An operator that projects out one or more
- components in a basis, e.g. a spectral band
- of Fourier components.
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- sym : bool
- Indicates whether the operator is self-adjoint or not
- uni : bool
- Indicates whether the operator is unitary or not
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not
- target : space
- The space wherein the operator output lives
- para : {single object, list of objects}
- This is a freeform list of parameters that derivatives of the
- operator class can use. Not used in the base operators.
- """
-
- def __init__(self, domain, codomain=None, sym=False, uni=False,
- imp=False, target=None, cotarget=None, bare=False):
- """
- Sets the attributes for an operator class instance.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- sym : bool, *optional*
- Indicates whether the operator is self-adjoint or not
- (default: False)
- uni : bool, *optional*
- Indicates whether the operator is unitary or not
- (default: False)
- imp : bool, *optional*
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not (default: False)
- target : space, *optional*
- The space wherein the operator output lives (default: domain)
- para : {object, list of objects}, *optional*
- This is a freeform list of parameters that derivatives of the
- operator class can use. Not used in the base operators.
- (default: None)
-
- Returns
- -------
- None
- """
- # Check if the domain is realy a space
- if not isinstance(domain, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: invalid input. domain is not a space."))
- self.domain = domain
- # Parse codomain
- if self.domain.check_codomain(codomain) == True:
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- # Cast the symmetric and unitary input
- self.sym = bool(sym)
- self.uni = bool(uni)
- self.bare = bool(bare)
-
- # If no target is supplied, we assume that the operator is square
- # If the operator is symmetric or unitary, we know that the operator
- # must be square
-
- if self.sym or self.uni:
- target = self.domain
- cotarget = self.codomain
- if target is not None:
- about.warnings.cprint("WARNING: Ignoring target.")
-
- elif target is None:
- target = self.domain
- cotarget = self.codomain
-
- elif isinstance(target, Space):
- self.target = target
- # Parse cotarget
- if self.target.check_codomain(cotarget) == True:
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
- else:
- raise TypeError(about._errors.cstring(
- "ERROR: invalid input. Target is not a space."))
-
- self.imp = bool(imp)
-
- def set_val(self, new_val):
- """
- Resets the field values.
-
- Parameters
- ----------
- new_val : {scalar, ndarray}
- New field values either as a constant or an arbitrary array.
-
- """
- self.val = new_val
- return self.val
-
- def get_val(self):
- return self.val
-
- def _multiply(self, x, **kwargs):
- # > applies the operator to a given field
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'multiply'."))
-
- def _adjoint_multiply(self, x, **kwargs):
- # > applies the adjoint operator to a given field
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'adjoint_multiply'."))
-
- def _inverse_multiply(self, x, **kwargs):
- # > applies the inverse operator to a given field
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'inverse_multiply'."))
-
- def _adjoint_inverse_multiply(self, x, **kwargs):
- # > applies the inverse adjoint operator to a given field
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'adjoint_inverse_multiply'."))
-
- def _inverse_adjoint_multiply(self, x, **kwargs):
- # > applies the adjoint inverse operator to a given field
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'inverse_adjoint_multiply'."))
-
- def _briefing(self, x, domain, codomain, inverse):
- # make sure, that the result_field of the briefing lives in the
- # given domain and codomain
- result_field = Field(domain=domain, val=x, codomain=codomain,
- copy=False)
-
- # weight if necessary
- if (not self.imp) and (not inverse):
- result_field = result_field.weight(power=1)
- return result_field
-
- def _debriefing(self, x, y, target, cotarget, inverse):
- # The debriefing takes care that the result field lives in the same
- # fourier-type domain as the input field
- assert(isinstance(y, Field))
-
- # weight if necessary
- if (not self.imp) and inverse:
- y = y.weight(power=-1)
-
- # if the operators domain as well as the target have the harmonic
- # attribute, try to match the result_field to the input_field
- if hasattr(self.domain, 'harmonic') and \
- hasattr(self.target, 'harmonic'):
- if x.domain.harmonic != y.domain.harmonic:
- y = y.transform()
-
- return y
-
- def times(self, x, **kwargs):
- """
- Applies the operator to a given object
-
- Parameters
- ----------
- x : {scalar, ndarray, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the domain of the operator.
-
- Returns
- -------
- Ox : field
- Mapped field on the target domain of the operator.
- """
- # prepare
- y = self._briefing(x, self.domain, self.codomain, inverse=False)
- # apply operator
- y = self._multiply(y, **kwargs)
- # evaluate
- return self._debriefing(x, y, self.target, self.cotarget,
- inverse=False)
-
- def __call__(self, x, **kwargs):
- return self.times(x, **kwargs)
-
- def adjoint_times(self, x, **kwargs):
- """
- Applies the adjoint operator to a given object.
-
- Parameters
- ----------
- x : {scalar, ndarray, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the target space of the operator.
-
- Returns
- -------
- OAx : field
- Mapped field on the domain of the operator.
-
- """
- # check whether self-adjoint
- if self.sym:
- return self.times(x, **kwargs)
- # check whether unitary
- if self.uni:
- return self.inverse_times(x, **kwargs)
-
- # prepare
- y = self._briefing(x, self.target, self.cotarget, inverse=False)
- # apply operator
- y = self._adjoint_multiply(y, **kwargs)
- # evaluate
- return self._debriefing(x, y, self.domain, self.codomain,
- inverse=False)
-
- def inverse_times(self, x, **kwargs):
- """
- Applies the inverse operator to a given object.
-
- Parameters
- ----------
- x : {scalar, ndarray, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the domain space of the operator.
-
- Returns
- -------
- OIx : field
- Mapped field on the target space of the operator.
- """
- # check whether self-inverse
- if self.sym and self.uni:
- return self.times(x, **kwargs)
-
- # prepare
- y = self._briefing(x, self.target, self.cotarget, inverse=True)
- # apply operator
- y = self._inverse_multiply(y, **kwargs)
- # evaluate
- return self._debriefing(x, y, self.domain, self.codomain,
- inverse=True)
-
- def adjoint_inverse_times(self, x, **kwargs):
- """
- Applies the inverse adjoint operator to a given object.
-
- Parameters
- ----------
- x : {scalar, ndarray, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the target space of the operator.
-
- Returns
- -------
- OAIx : field
- Mapped field on the domain of the operator.
-
- """
- # check whether self-adjoint
- if self.sym:
- return self.inverse_times(x, **kwargs)
- # check whether unitary
- if self.uni:
- return self.times(x, **kwargs)
-
- # prepare
- y = self._briefing(x, self.domain, self.codomain, inverse=True)
- # apply operator
- y = self._adjoint_inverse_multiply(y, **kwargs)
- # evaluate
- return self._debriefing(x, y, self.target, self.cotarget,
- inverse=True)
-
- def inverse_adjoint_times(self, x, **kwargs):
- """
- Applies the adjoint inverse operator to a given object.
-
- Parameters
- ----------
- x : {scalar, ndarray, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the target space of the operator.
-
- Returns
- -------
- OIAx : field
- Mapped field on the domain of the operator.
-
- """
- # check whether self-adjoint
- if self.sym:
- return self.inverse_times(x, **kwargs)
- # check whether unitary
- if self.uni:
- return self.times(x, **kwargs)
-
- # prepare
- y = self._briefing(x, self.domain, self.codomain, inverse=True)
- # apply operator
- y = self._inverse_adjoint_multiply(y, **kwargs)
- # evaluate
- return self._debriefing(x, y, self.target, self.cotarget,
- inverse=True)
-
- def tr(self, domain=None, codomain=None, random="pm1", nrun=8,
- varQ=False, **kwargs):
- """
- Computes the trace of the operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- tr : float
- Trace of the operator
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- See Also
- --------
- probing : The class used to perform probing operations
- """
-
- return trace_prober(operator=self,
- domain=domain,
- codomain=codomain,
- random=random,
- nrun=nrun,
- varQ=varQ,
- **kwargs
- )()
-
- def inverse_tr(self, domain=None, codomain=None, random="pm1", nrun=8,
- varQ=False, **kwargs):
- """
- Computes the trace of the inverse operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- nrun : int, *optional*
- total number of probes (default: 8)
- varQ : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
-
-
- Returns
- -------
- tr : float
- Trace of the inverse operator
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- See Also
- --------
- probing : The class used to perform probing operations
- """
- return inverse_trace_prober(operator=self,
- domain=domain,
- codomain=codomain,
- random=random,
- nrun=nrun,
- varQ=varQ,
- **kwargs
- )()
-
- def diag(self, domain=None, codomain=None, random="pm1", nrun=8,
- varQ=False, bare=False, **kwargs):
- """
- Computes the diagonal of the operator via probing.
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- diag : ndarray
- The matrix diagonal
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
-
- diag = diagonal_prober(operator=self,
- domain=domain,
- codomain=codomain,
- random=random,
- nrun=nrun,
- varQ=varQ,
- **kwargs
- )()
- if diag is None:
- about.warnings.cprint("WARNING: forwarding 'NoneType'.")
- return None
-
- if domain is None:
- domain = diag.domain
- # weight if ...
- if bare:
- if(isinstance(diag, tuple)): # diag == (diag,variance)
- return (diag[0].weight(power=-1),
- diag[1].weight(power=-1))
- else:
- return diag.weight(power=-1)
- else:
- return diag
-
- def inverse_diag(self, domain=None, codomain=None, random="pm1",
- nrun=8, varQ=False, bare=False, **kwargs):
- """
- Computes the diagonal of the inverse operator via probing.
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- diag : ndarray
- The diagonal of the inverse matrix
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- if(domain is None):
- domain = self.target
- diag = inverse_diagonal_prober(operator=self,
- domain=domain,
- codomain=codomain,
- random=random,
- nrun=nrun,
- varQ=varQ,
- **kwargs
- )()
- if(diag is None):
- about.infos.cprint("INFO: forwarding 'NoneType'.")
- return None
-
- if domain is None:
- domain = diag.codomain
- # weight if ...
- if bare:
- if(isinstance(diag, tuple)): # diag == (diag,variance)
- return (diag[0].weight(power=-1),
- diag[1].weight(power=-1))
- else:
- return diag.weight(power=-1)
- else:
- return diag
-
- def det(self):
- """
- Computes the determinant of the operator.
-
- Returns
- -------
- det : float
- The determinant
-
- """
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'det'."))
-
- def inverse_det(self):
- """
- Computes the determinant of the inverse operator.
-
- Returns
- -------
- det : float
- The determinant
-
- """
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'inverse_det'."))
-
- def log_det(self):
- """
- Computes the logarithm of the determinant of the operator
- (if applicable).
-
- Returns
- -------
- logdet : float
- The logarithm of the determinant
-
- """
- raise NotImplementedError(about._errors.cstring(
- "ERROR: no generic instance method 'log_det'."))
-
- def tr_log(self):
- """
- Computes the trace of the logarithm of the operator
- (if applicable).
-
- Returns
- -------
- logdet : float
- The trace of the logarithm
-
- """
- return self.log_det()
-
- def hat(self, bare=False, domain=None, codomain=None, **kwargs):
- """
- Translates the operator's diagonal into a field
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- x : field
- The matrix diagonal as a field living on the operator
- domain space
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- if domain is None:
- domain = self.domain
- if codomain is None:
- codomain = self.codomain
-
- diag = self.diag(bare=bare, domain=domain, codomain=codomain,
- var=False, **kwargs)
- if diag is None:
- about.infos.cprint("WARNING: forwarding 'NoneType'.")
- return None
- return diag
-
- def inverse_hat(self, bare=False, domain=None, codomain=None, **kwargs):
- """
- Translates the inverse operator's diagonal into a field
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- x : field
- The matrix diagonal as a field living on the operator
- domain space
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- if domain is None:
- domain = self.target
- if codomain is None:
- codomain = self.cotarget
- diag = self.inverse_diag(bare=bare, domain=domain, codomain=codomain,
- var=False, **kwargs)
- if diag is None:
- about.infos.cprint("WARNING: forwarding 'NoneType'.")
- return None
- return diag
-
- def hathat(self, domain=None, codomain=None, **kwargs):
- """
- Translates the operator's diagonal into a diagonal operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- D : diagonal_operator
- The matrix diagonal as an operator
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- if domain is None:
- domain = self.domain
- if codomain is None:
- codomain = self.codomain
-
- diag = self.diag(bare=False, domain=domain, codomain=codomain,
- var=False, **kwargs)
- if diag is None:
- about.infos.cprint("WARNING: forwarding 'NoneType'.")
- return None
- return diagonal_operator(domain=domain, codomain=codomain,
- diag=diag, bare=False)
-
- def inverse_hathat(self, domain=None, codomain=None, **kwargs):
- """
- Translates the inverse operator's diagonal into a diagonal
- operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- D : diagonal_operator
- The diagonal of the inverse matrix as an operator
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- if domain is None:
- domain = self.target
- if codomain is None:
- codomain = self.cotarget
-
- diag = self.inverse_diag(bare=False, domain=domain, codomain=codomain,
- var=False, **kwargs)
- if diag is None:
- about.infos.cprint("WARNING: forwarding 'NoneType'.")
- return None
- return diagonal_operator(domain=domain, codomain=codomain,
- diag=diag, bare=False)
-
- def __repr__(self):
- return ""
-
-# =============================================================================
-
-
-class diagonal_operator(operator):
- """
- .. __ __ __
- .. / / /__/ / /
- .. ____/ / __ ____ __ ____ __ ______ __ ___ ____ __ / /
- .. / _ / / / / _ / / _ / / _ | / _ | / _ / / /
- .. / /_/ / / / / /_/ / / /_/ / / /_/ / / / / / / /_/ / / /_
- .. \______| /__/ \______| \___ / \______/ /__/ /__/ \______| \___/ operator class
- .. /______/
-
- NIFTY subclass for diagonal operators
-
- Parameters
- ----------
- domain : space, *optional*
- The space wherein valid arguments live. If no domain is given
- then the diag parameter *must* be a field and the domain
- of that field is assumed. (default: None)
- diag : {scalar, ndarray, field}
- The diagonal entries of the operator. For a scalar, a constant
- diagonal is defined having the value provided. If no domain
- is given, diag must be a field. (default: 1)
- bare : bool, *optional*
- whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- The inverse applications of the diagonal operator feature a ``pseudo``
- flag indicating if zero divison shall be ignored and return zero
- instead of causing an error.
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- val : ndarray
- A field containing the diagonal entries of the matrix.
- sym : bool
- Indicates whether the operator is self-adjoint or not
- uni : bool
- Indicates whether the operator is unitary or not
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not
- target : space
- The space wherein the operator output lives
- """
-
- def __init__(self, domain=None, codomain=None, diag=1, bare=False):
- """
- Sets the standard operator properties and `values`.
-
- Parameters
- ----------
- domain : space, *optional*
- The space wherein valid arguments live. If no domain is given
- then the diag parameter *must* be a field and the domain
- of that field is assumed. (default: None)
- diag : {scalar, ndarray, field}, *optional*
- The diagonal entries of the operator. For a scalar, a constant
- diagonal is defined having the value provided. If no domain
- is given, diag must be a field. (default: 1)
- bare : bool, *optional*
- whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
-
- Returns
- -------
- None
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- # Set the domain
- if domain is None:
- try:
- self.domain = diag.domain
- except(AttributeError):
- raise TypeError(about._errors.cstring(
- "ERROR: Explicit or implicit, i.e. via diag domain " +
- "inupt needed!"))
-
- else:
- self.domain = domain
-
- if self.domain.check_codomain(codomain):
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- self.target = self.domain
- self.cotarget = self.codomain
- self.imp = True
- self.set_diag(new_diag=diag, bare=bare)
-
- def set_diag(self, new_diag, bare=False):
- """
- Sets the diagonal of the diagonal operator
-
- Parameters
- ----------
- new_diag : {scalar, ndarray, field}
- The new diagonal entries of the operator. For a scalar, a
- constant diagonal is defined having the value provided. If
- no domain is given, diag must be a field.
-
- bare : bool, *optional*
- whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
-
- Returns
- -------
- None
- """
-
- # Set the diag-val
- self.val = self.domain.cast(new_diag)
-
- # Set the bare-val #TODO Check with Theo
- self.bare = bare
-
- # Weight if necessary
- if bare:
- self.val = self.domain.calc_weight(self.val, power=1)
-
- # Check complexity attributes
- if self.domain.calc_real_Q(self.val) == True:
- self.sym = True
- else:
- self.sym = False
-
- # Check if unitary, i.e. identity
- if (self.val == 1).all() == True:
- self.uni = True
- else:
- self.uni = False
-
- def _multiply(self, x, **kwargs):
- # applies the operator to a given field
- y = x.copy(domain=self.target, codomain=self.cotarget)
- y *= self.get_val()
- return y
-
- def _adjoint_multiply(self, x, **kwargs):
- # applies the adjoint operator to a given field
- y = x.copy(domain=self.domain, codomain=self.codomain)
- y *= self.get_val().conjugate()
- return y
-
- def _inverse_multiply(self, x, pseudo=False, **kwargs):
- # applies the inverse operator to a given field
- y = x.copy(domain=self.domain, codomain=self.codomain)
- if (self.get_val() == 0).any():
- if not pseudo:
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
- else:
-# raise NotImplementedError(
-# "ERROR: function not yet implemented!")
- y /= self.get_val()
- # TODO: implement this
- # the following code does not work. np.isnan is needed,
- # but on a level of fields
-# y[y == np.nan] = 0
-# y[y == np.inf] = 0
- else:
- y /= self.get_val()
- return y
-
- def _adjoint_inverse_multiply(self, x, pseudo=False, **kwargs):
- # > applies the inverse adjoint operator to a given field
- y = x.copy(domain=self.target, codomain=self.cotarget)
- if (self.get_val() == 0).any():
- if not pseudo:
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
- else:
- raise NotImplementedError(
- "ERROR: function not yet implemented!")
- # TODO: implement this
- # the following code does not work. np.isnan is needed,
- # but on a level of fields
- y /= self.get_val().conjugate()
- y[y == np.nan] = 0
- y[y == np.inf] = 0
- else:
- y /= self.get_val().conjugate()
- return y
-
- def _inverse_adjoint_multiply(self, x, pseudo=False, **kwargs):
- # > applies the adjoint inverse operator to a given field
- return self._adjoint_inverse_multiply(x, pseudo=pseudo, **kwargs)
-
- def tr(self, varQ=False, **kwargs):
- """
- Computes the trace of the operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- tr : float
- Trace of the operator
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- """
-
- tr = self.domain.unary_operation(self.val, 'sum')
-
- if varQ:
- return (tr, 1)
- else:
- return tr
-
- def inverse_tr(self, varQ=False, **kwargs):
- """
- Computes the trace of the inverse operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- tr : float
- Trace of the inverse operator
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- """
- if (self.get_val() == 0).any():
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
- inverse_tr = self.domain.unary_operation(
- self.domain.binary_operation(self.val, 1, 'rdiv', cast=0),
- 'sum')
-
- if varQ:
- return (inverse_tr, 1)
- else:
- return inverse_tr
-
- def diag(self, bare=False, domain=None, codomain=None,
- varQ=False, **kwargs):
- """
- Computes the diagonal of the operator.
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- diag : ndarray
- The matrix diagonal
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
-
- if (domain is None) or (domain == self.domain):
- if bare:
- diag_val = self.domain.calc_weight(self.val, power=-1)
- else:
- diag_val = self.val
- diag = Field(self.domain, codomain=self.codomain, val=diag_val)
- else:
- diag = super(diagonal_operator, self).diag(bare=bare,
- domain=domain,
- codomain=codomain,
- nrun=1,
- random='pm1',
- varQ=False,
- **kwargs)
- if varQ:
- return (diag, diag.domain.cast(1))
- else:
- return diag
-
- def inverse_diag(self, bare=False, domain=None, codomain=None,
- varQ=False, **kwargs):
- """
- Computes the diagonal of the inverse operator.
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- diag : ndarray
- The diagonal of the inverse matrix
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- See Also
- --------
- probing : The class used to perform probing operations
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
-
- if (domain is None) or (domain == self.domain):
- inverse_val = 1. / self.val
- if bare:
- inverse_diag_val = self.domain.calc_weight(inverse_val,
- power=-1)
- else:
- inverse_diag_val = inverse_val
- inverse_diag = Field(self.domain, codomain=self.codomain,
- val=inverse_diag_val)
-
- else:
- inverse_diag = super(diagonal_operator,
- self).inverse_diag(bare=bare,
- domain=domain,
- nrun=1,
- random='pm1',
- varQ=False,
- **kwargs)
- if varQ:
- return (inverse_diag, inverse_diag.domain.cast(1))
- else:
- return inverse_diag
-
- def det(self):
- """
- Computes the determinant of the matrix.
-
- Returns
- -------
- det : float
- The determinant
-
- """
- if self.uni: # identity
- return 1.
- else:
- return self.domain.unary_operation(self.val, op='prod')
-
- def inverse_det(self):
- """
- Computes the determinant of the inverse operator.
-
- Returns
- -------
- det : float
- The determinant
-
- """
- if self.uni: # identity
- return 1.
-
- det = self.det()
-
- if det != 0:
- return 1. / det
- else:
- raise ValueError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def log_det(self):
- """
- Computes the logarithm of the determinant of the operator.
-
- Returns
- -------
- logdet : float
- The logarithm of the determinant
-
- """
- if self.uni: # identity
- return 0
- else:
- return self.domain.unary_operation(
- nifty_simple_math.log(self.val), op='sum')
-
- def get_random_field(self, domain=None, codomain=None):
- """
- Generates a Gaussian random field with variance equal to the
- diagonal.
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the field lives (default: None, indicates
- to use self.domain)
- target : space, *optional*
- space wherein the transform of the field lives
- (default: None, indicates to use target of domain)
-
- Returns
- -------
- x : field
- Random field.
-
- """
- temp_field = Field(domain=self.domain,
- codomain=self.codomain,
- random='gau',
- std=nifty_simple_math.sqrt(
- self.diag(bare=True).get_val()))
- if domain is None:
- domain = self.domain
- if domain.check_codomain(codomain):
- codomain = codomain
- elif domain.check_codomain(self.codomain):
- codomain = self.codomain
- else:
- codomain = domain.get_codomain()
-
- return Field(domain=domain, val=temp_field, codomain=codomain)
-
-# if domain.harmonic != self.domain.harmonic:
-# temp_field = temp_field.transform(new_domain=domain)
-#
-# if self.domain == domain and self.codomain == codomain:
-# return temp_field
-# else:
-# return temp_field.copy(domain=domain,
-# codomain=codomain)
-
- def __repr__(self):
- return ""
-
-
-class identity_operator(diagonal_operator):
- def __init__(self, domain, codomain=None, bare=False):
- super(identity_operator, self).__init__(domain=domain,
- codomain=codomain,
- diag=1,
- bare=bare)
-
-
-def identity(domain, codomain=None):
- """
- Returns an identity operator.
-
- The identity operator is represented by a `diagonal_operator` instance,
- which is applicable to a field-like object; i.e., a scalar, list,
- array or field. (The identity operator is unrelated to PYTHON's
- built-in function :py:func:`id`.)
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
-
- Returns
- -------
- id : diagonal_operator
- The identity operator as a `diagonal_operator` instance.
-
- See Also
- --------
- diagonal_operator
-
- Examples
- --------
- >>> I = identity(rg_space(8,dist=0.2))
- >>> I.diag()
- array([ 1., 1., 1., 1., 1., 1., 1., 1.])
- >>> I.diag(bare=True)
- array([ 5., 5., 5., 5., 5., 5., 5., 5.])
- >>> I.tr()
- 8.0
- >>> I(3)
-
- >>> I(3).val
- array([ 3., 3., 3., 3., 3., 3., 3., 3.])
- >>> I(np.arange(8))[:]
- array([ 0., 1., 2., 3., 4., 5., 6., 7.])
- >>> f = I.get_random_field()
- >>> print(I(f) - f)
- nifty.field instance
- - domain =
- - val = [...]
- - min.,max. = [0.0, 0.0]
- - med.,mean = [0.0, 0.0]
- - target =
- >>> I.times(f) ## equal to I(f)
-
- >>> I.inverse_times(f)
-
-
- """
- about.warnings.cprint('WARNING: The identity function is deprecated. ' +
- 'Use the identity_operator class.')
- return diagonal_operator(domain=domain,
- codomain=codomain,
- diag=1,
- bare=False)
-
-
-class power_operator(diagonal_operator):
- """
- .. ______ ______ __ __ _______ _____
- .. / _ | / _ | | |/\/ / / __ / / __/
- .. / /_/ / / /_/ / | / / /____/ / /
- .. / ____/ \______/ |__/\__/ \______/ /__/ operator class
- .. /__/
-
- NIFTY subclass for (signal-covariance-type) diagonal operators
- containing a power spectrum
-
- Parameters
- ----------
- domain : space, *optional*
- The space wherein valid arguments live. If no domain is given
- then the diag parameter *must* be a field and the domain
- of that field is assumed. (default: None)
- spec : {scalar, list, array, field, function}
- The power spectrum. For a scalar, a constant power
- spectrum is defined having the value provided. If no domain
- is given, diag must be a field. (default: 1)
- bare : bool, *optional*
- whether the entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: True)
- pindex : ndarray, *optional*
- indexing array, obtainable from domain.get_power_indices
- (default: None)
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None). vmin : {scalar, list, ndarray, field},
- *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- val : ndarray
- A field containing the diagonal entries of the matrix.
- sym : bool
- Indicates whether the operator is self-adjoint or not
- uni : bool
- Indicates whether the operator is unitary or not
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not
- target : space
- The space wherein the operator output lives
-
- """
-
- def __init__(self, domain, codomain=None, spec=1, bare=True, **kwargs):
- """
- Sets the diagonal operator's standard properties
-
- Parameters
- ----------
- domain : space, *optional*
- The space wherein valid arguments live. If no domain is given
- then the diag parameter *must* be a field and the domain
- of that field is assumed. (default: None)
- spec : {scalar, list, array, field, function}
- The power spectrum. For a scalar, a constant power
- spectrum is defined having the value provided. If no domain
- is given, diag must be a field. (default: 1)
- bare : bool, *optional*
- whether the entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: True)
- pindex : ndarray, *optional*
- indexing array, obtainable from domain.get_power_indices
- (default: None)
-
- Returns
- -------
- None
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on
- logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to
- ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- """
- # Set the domain
- if not isinstance(domain, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: The given domain is not a nifty space."))
- self.domain = domain
-
- if self.domain.check_codomain(codomain):
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- # Set the target
- self.target = self.domain
- # Set the cotarget
- self.cotarget = self.codomain
-
- # Set imp
- self.imp = True
- # Save the kwargs
- self.kwargs = kwargs
- # Set the diag
- self.set_power(new_spec=spec, bare=bare, **kwargs)
-
- self.sym = True
-
- # check whether identity
- if(np.all(spec == 1)):
- self.uni = True
- else:
- self.uni = False
-
- # The domain is used for calculations of the power-spectrum, not for
- # actual field values. Therefore the casting of self.val must be switched
- # off.
- def set_val(self, new_val):
- """
- Resets the field values.
-
- Parameters
- ----------
- new_val : {scalar, ndarray}
- New field values either as a constant or an arbitrary array.
-
- """
- self.val = new_val
- return self.val
-
- def get_val(self):
- return self.val
-
- def set_power(self, new_spec, bare=True, pindex=None, **kwargs):
- """
- Sets the power spectrum of the diagonal operator
-
- Parameters
- ----------
- newspec : {scalar, list, array, field, function}
- The entries of the operator. For a scalar, a constant
- diagonal is defined having the value provided. If no domain
- is given, diag must be a field. (default: 1)
- bare : bool
- whether the entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- pindex : ndarray, *optional*
- indexing array, obtainable from domain.get_power_indices
- (default: None)
-
- Returns
- -------
- None
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on
- logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to
- ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
-
- """
-
- # Cast the pontentially given pindex. If no pindex was given,
- # extract it from self.domain using the supplied kwargs.
- pindex = self._cast_pindex(pindex, **kwargs)
-
- # Cast the new powerspectrum function
- temp_spec = self.domain.enforce_power(new_spec)
-
- # Calculate the diagonal
- try:
- diag = pindex.apply_scalar_function(lambda x: temp_spec[x],
- dtype=temp_spec.dtype.type)
- diag.hermitian = True
- except(AttributeError):
- diag = temp_spec[pindex]
-
- # Weight if necessary
- if bare:
- self.val = self.domain.calc_weight(diag, power=1)
- else:
- self.val = diag
-
- # check whether identity
- if (self.val == 1).all() == True:
- self.uni = True
- else:
- self.uni = False
-
- return self.val
-
- def _cast_pindex(self, pindex=None, **kwargs):
- # Update the internal kwargs dict with the given one:
- temp_kwargs = self.kwargs
- temp_kwargs.update(kwargs)
-
- # Case 1: no pindex given
- if pindex is None:
- pindex = self.domain.power_indices.get_index_dict(
- **temp_kwargs)['pindex']
- # Case 2: explicit pindex given
- else:
- # TODO: Pindex casting could be done here. No must-have.
- assert(np.all(np.array(pindex.shape) ==
- np.array(self.domain.shape)))
- return pindex
-
- def get_power(self, bare=True, **kwargs):
- """
- Computes the power spectrum
-
- Parameters
- ----------
- bare : bool, *optional*
- whether the entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: True)
- pundex : ndarray, *optional*
- unindexing array, obtainable from domain.get_power_indices
- (default: None)
- pindex : ndarray, *optional*
- indexing array, obtainable from domain.get_power_indices
- (default: None)
-
- Returns
- -------
- spec : ndarray
- The power spectrum
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on
- logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to
- ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- """
-
- temp_kwargs = self.kwargs
- temp_kwargs.update(kwargs)
-
- # Weight the diagonal values if necessary
- if bare:
- diag = self.domain.calc_weight(self.val, power=-1)
- else:
- diag = self.val
-
- # Use the calc_power routine of the domain in order to to stay
- # independent of the implementation
- diag = diag**(0.5)
-
- power = self.domain.calc_power(diag, **temp_kwargs)
-
- return power
-
- def get_projection_operator(self, pindex=None, **kwargs):
- """
- Generates a spectral projection operator
-
- Parameters
- ----------
- pindex : ndarray
- indexing array obtainable from domain.get_power_indices
- (default: None)
-
- Returns
- -------
- P : projection_operator
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on
- logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to
- ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- """
-
- pindex = self._cast_pindex(pindex, **kwargs)
- return projection_operator(self.domain,
- codomain=self.codomain,
- assign=pindex)
-
- def __repr__(self):
- return ""
-
-
-class projection_operator(operator):
- """
- .. __ __ __
- .. /__/ / /_ /__/
- .. ______ _____ ______ __ _______ _______ / _/ __ ______ __ ___
- .. / _ | / __/ / _ | / / / __ / / ____/ / / / / / _ | / _ |
- .. / /_/ / / / / /_/ / / / / /____/ / /____ / /_ / / / /_/ / / / / /
- .. / ____/ /__/ \______/ / / \______/ \______/ \___/ /__/ \______/ /__/ /__/ operator class
- .. /__/ /___/
-
- NIFTY subclass for projection operators
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- assign : ndarray, *optional*
- Assignments of domain items to projection bands. An array
- of integers, negative integers are associated with the
- nullspace of the projection. (default: None)
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- Notes
- -----
- The application of the projection operator features a ``band`` keyword
- specifying a single projection band (see examples), a ``bandsup``
- keyword specifying which projection bands to sum up, and a ``split``
- keyword.
-
- Examples
- --------
- >>> space = point_space(3)
- >>> P = projection_operator(space, assign=[0, 1, 0])
- >>> P.bands()
- 2
- >>> P([1, 2, 3], band=0) # equal to P.times(field(space,val=[1, 2, 3]))
-
- >>> P([1, 2, 3], band=0).domain
-
- >>> P([1, 2, 3], band=0).val # projection on band 0 (items 0 and 2)
- array([ 1., 0., 3.])
- >>> P([1, 2, 3], band=1).val # projection on band 1 (item 1)
- array([ 0., 2., 0.])
- >>> P([1, 2, 3])
-
- >>> P([1, 2, 3]).domain
-
- >>> P([1, 2, 3]).val # projection on all bands
- array([[ 1., 0., 3.],
- [ 0., 2., 0.]])
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- ind : ndarray
- Assignments of domain items to projection bands.
- sym : bool
- Indicates whether the operator is self-adjoint or not
- uni : bool
- Indicates whether the operator is unitary or not
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not
- target : space
- The space wherein the operator output lives
-
- """
-
- def __init__(self, domain, assign=None, codomain=None, **kwargs):
- """
- Sets the standard operator properties and `indexing`.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- assign : ndarray, *optional*
- Assignments of domain items to projection bands. An array
- of integers, negative integers are associated with the
- nullspace of the projection. (default: None)
-
- Returns
- -------
- None
-
- Other Parameters
- ----------------
- log : bool, *optional*
- Flag specifying if the spectral binning is performed on
- logarithmic
- scale or not; if set, the number of used bins is set
- automatically (if not given otherwise); by default no binning
- is done (default: None).
- nbin : integer, *optional*
- Number of used spectral bins; if given `log` is set to
- ``False``;
- integers below the minimum of 3 induce an automatic setting;
- by default no binning is done (default: None).
- binbounds : {list, array}, *optional*
- User specific inner boundaries of the bins, which are preferred
- over the above parameters; by default no binning is done
- (default: None).
- vmin : {scalar, list, ndarray, field}, *optional*
- Lower limit of the uniform distribution if ``random == "uni"``
- (default: 0).
-
- """
- # Check the domain
- if not isinstance(domain, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: The supplied domain is not a nifty space instance."))
- self.domain = domain
- # Parse codomain
- if self.domain.check_codomain(codomain):
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- self.target = self.domain
- self.cotarget = self.codomain
-
- # Cast the assignment
- if assign is None:
- try:
- self.domain.power_indices['pindex']
- except AttributeError:
- assign = np.arange(self.domain.dim, dtype=np.int)
-
- self.assign = self.domain.cast(assign, dtype=np.dtype('int'),
- hermitianize=False)
-
- # build indexing
- self.ind = self.domain.unary_operation(self.assign, op='unique')
-
- self.sym = True
- self.uni = False
- self.imp = True
-
- def bands(self):
- about.warnings.cprint(
- "WARNING: projection_operator.bands() is deprecated. " +
- "Use get_band_num() instead.")
- return self.get_band_num()
-
- def get_band_num(self):
- """
- Computes the number of projection bands
-
- Returns
- -------
- bands : int
- The number of projection bands
- """
- return len(self.ind)
-
- def rho(self):
- """
- Computes the number of degrees of freedom per projection band.
-
- Returns
- -------
- rho : ndarray
- The number of degrees of freedom per projection band.
- """
- # Check if the space has some meta-degrees of freedom
- if self.domain.dim == self.domain.dof:
- # If not, compute the degeneracy factor directly
- rho = self.domain.calc_bincount(self.assign)
- else:
- meta_mask = self.domain.calc_weight(
- self.domain.meta_volume_split,
- power=-1)
- rho = self.domain.calc_bincount(self.assign,
- weights=meta_mask)
- return rho
-
- def _multiply(self, x, bands=None, bandsum=None, **kwargs):
- """
- Applies the operator to a given field.
-
- Parameters
- ----------
- x : field
- Valid input field.
- band : int, *optional*
- Projection band whereon to project (default: None).
- bandsup: {integer, list/array of integers}, *optional*
- List of projection bands whereon to project and which to sum
- up. The `band` keyword is prefered over `bandsup`
- (default: None).
-
- Returns
- -------
- Px : field
- projected field(!)
- """
-
- if bands is not None:
- # cast the band
- if np.isscalar(bands):
- bands_was_scalar = True
- else:
- bands_was_scalar = False
- bands = np.array(bands, dtype=np.int).flatten()
-
- # check for consistency
- if np.any(bands > self.get_band_num() - 1) or np.any(bands < 0):
- raise TypeError(about._errors.cstring("ERROR: Invalid bands."))
-
- if bands_was_scalar:
- new_field = x * (self.assign == bands[0])
- else:
- # build up the projection results
- # prepare the projector-carrier
- carrier = np.empty((len(bands),), dtype=np.object_)
- for i in xrange(len(bands)):
- current_band = bands[i]
- projector = (self.assign == current_band)
- carrier[i] = projector
- # Use the carrier and tensor dot to do the projection
- new_field = x.tensor_product(carrier)
- return new_field
-
- elif bandsum is not None:
- if np.isscalar(bandsum):
- bandsum = np.arange(int(bandsum) + 1)
- else:
- bandsum = np.array(bandsum, dtype=np.int_).flatten()
-
- # check for consistency
- if np.any(bandsum > self.get_band_num() - 1) or \
- np.any(bandsum < 0):
- raise TypeError(about._errors.cstring(
- "ERROR: Invalid bandsum."))
- new_field = x.copy_empty()
- # Initialize the projector array, completely
- projector_sum = (self.assign != self.assign)
- for i in xrange(len(bandsum)):
- current_band = bandsum[i]
- projector = self.domain.binary_operation(self.assign,
- current_band,
- 'eq')
- projector_sum += projector
- new_field = x * projector_sum
- return new_field
-
- else:
- return self._multiply(x, bands=self.ind)
-
- def _inverse_multiply(self, x, **kwargs):
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def pseudo_tr(self, x, axis=(), **kwargs):
- """
- Computes the pseudo trace of a given object for all projection
- bands
-
- Parameters
- ----------
- x : {field, operator}
- The object whose pseudo-trace is to be computed. If the input
- is
- a field, the pseudo trace equals the trace of
- the projection operator mutliplied by a vector-vector operator
- corresponding to the input field. This is also equal to the
- pseudo inner product of the field with projected field itself.
- If the input is a operator, the pseudo trace equals the trace
- of
- the projection operator multiplied by the input operator.
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- tr : float
- Pseudo trace for all projection bands
- """
- # Parse the input
- # Case 1: x is a field
- # -> Compute the diagonal of the corresponding vecvec-operator:
- # x * x^dagger
- if isinstance(x, Field):
- # check if field is in the same signal/harmonic space as the
- # domain of the projection operator
- if self.domain != x.domain:
- # TODO: check if a exception should be raised, or if
- # one should try to fix stuff.
- pass
- # x = x.transform(new_domain=self.domain)
- vecvec = vecvec_operator(val=x)
- return self.pseudo_tr(x=vecvec, axis=axis, **kwargs)
-
- # Case 2: x is an operator
- # -> take the diagonal
- elif isinstance(x, operator):
- working_field = x.diag(bare=False,
- domain=self.domain,
- codomain=self.codomain)
- if self.domain != working_field.domain:
- # TODO: check if a exception should be raised, or if
- # one should try to fix stuff.
- pass
- # working_field = working_field.transform(new_domain=self.domain)
-
- # Case 3: x is something else
- else:
- raise TypeError(about._errors.cstring(
- "ERROR: x must be a field or an operator."))
-
- # Check for hidden degrees of freedom and compensate the trace
- # accordingly
- if self.domain.dim != self.domain.dof:
- working_field *= self.domain.calc_weight(
- self.domain.meta_volume_split,
- power=-1)
- # prepare the result object
- projection_result = utilities.field_map(
- working_field.ishape,
- lambda z: self.domain.calc_bincount(self.assign, weights=z),
- working_field.get_val())
-
- projection_result = np.sum(projection_result, axis=axis)
- return projection_result
-
- def __repr__(self):
- return ""
-
-
-class vecvec_operator(operator):
- """
- .. __
- .. __/ /__
- .. __ __ _______ _______ __ __ _______ _______ /__ __/
- .. | |/ / / __ / / ____/ | |/ / / __ / / ____/ /__/
- .. | / / /____/ / /____ | / / /____/ / /____
- .. |____/ \______/ \______/ |____/ \______/ \______/
- .. operator class
-
- NIFTY subclass for vector-vector operators
-
- Parameters
- ----------
- domain : space, *optional*
- The space wherein valid arguments live. If none is given, the
- space of the field given in val is used. (default: None)
- val : {scalar, ndarray, field}, *optional*
- The field from which to construct the operator. For a scalar,
- a constant
- field is defined having the value provided. If no domain
- is given, val must be a field. (default: 1)
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- val : ndarray
- The field from which the operator is derived.
- sym : bool
- Indicates whether the operator is self-adjoint or not
- uni : bool
- Indicates whether the operator is unitary or not
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not
- target : space
- The space wherein the operator output lives.
- """
-
- def __init__(self, domain=None, codomain=None, val=1):
- """
- Sets the standard operator properties and `values`.
-
- Parameters
- ----------
- domain : space, *optional*
- The space wherein valid arguments live. If none is given, the
- space of the field given in val is used. (default: None)
- val : {scalar, ndarray, field}, *optional*
- The field from which to construct the operator. For a scalar,
- a constant
- field is defined having the value provided. If no domain
- is given, val must be a field. (default: 1)
-
- Returns
- -------
- None
- """
- if isinstance(val, Field):
- if domain is None:
- domain = val.domain
- if codomain is None:
- codomain = val.codomain
-
- if not isinstance(domain, Space):
- raise TypeError(about._errors.cstring("ERROR: invalid input."))
- self.domain = domain
-
- if self.domain.check_codomain(codomain):
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- self.target = self.domain
- self.cotarget = self.codomain
- self.val = Field(domain=self.domain,
- codomain=self.codomain,
- val=val)
-
- self.sym = True
- self.uni = False
- self.imp = True
-
- def set_val(self, new_val, copy=False):
- """
- Sets the field values of the operator
-
- Parameters
- ----------
- newval : {scalar, ndarray, field}
- The new field values. For a scalar, a constant
- diagonal is defined having the value provided. If no domain
- is given, diag must be a field. (default: 1)
-
- Returns
- -------
- None
- """
- self.val = self.val.set_val(new_val=new_val, copy=copy)
-
- def _multiply(self, x, **kwargs):
- y = x.copy_empty(domain=self.target, codomain=self.cotarget)
- y.set_val(new_val=self.val, copy=True)
- y *= self.val.dot(x, axis=())
- return y
-
- def _inverse_multiply(self, x, **kwargs):
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def tr(self, domain=None, axis=None, **kwargs):
- """
- Computes the trace of the operator
-
- Parameters
- ----------
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- tr : float
- Trace of the operator
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- """
- if domain is None or domain == self.domain:
- return self.val.dot(self.val, axis=axis)
- else:
- return super(vecvec_operator, self).tr(domain=domain, **kwargs)
-
- def inverse_tr(self):
- """
- Inverse is ill-defined for this operator.
- """
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def diag(self, bare=False, domain=None, **kwargs):
- """
- Computes the diagonal of the operator.
-
- Parameters
- ----------
- bare : bool, *optional*
- Indicatese whether the diagonal entries are `bare` or not
- (mandatory for the correct incorporation of volume weights)
- (default: False)
- domain : space, *optional*
- space wherein the probes live (default: self.domain)
- target : space, *optional*
- space wherein the transform of the probes live
- (default: None, applies target of the domain)
- random : string, *optional*
- Specifies the pseudo random number generator. Valid
- options are "pm1" for a random vector of +/-1, or "gau"
- for a random vector with entries drawn from a Gaussian
- distribution with zero mean and unit variance.
- (default: "pm1")
- ncpu : int, *optional*
- number of used CPUs to use. (default: 2)
- nrun : int, *optional*
- total number of probes (default: 8)
- nper : int, *optional*
- number of tasks performed by one process (default: 1)
- var : bool, *optional*
- Indicates whether to additionally return the probing variance
- or not (default: False).
- save : bool, *optional*
- whether all individual probing results are saved or not
- (default: False)
- path : string, *optional*
- path wherein the results are saved (default: "tmp")
- prefix : string, *optional*
- prefix for all saved files (default: "")
- loop : bool, *optional*
- Indicates whether or not to perform a loop i.e., to
- parallelise (default: False)
-
- Returns
- -------
- diag : ndarray
- The matrix diagonal
- delta : float, *optional*
- Probing variance of the trace. Returned if `var` is True in
- of probing case.
-
- Notes
- -----
- The ambiguity of `bare` or non-bare diagonal entries is based
- on the choice of a matrix representation of the operator in
- question. The naive choice of absorbing the volume weights
- into the matrix leads to a matrix-vector calculus with the
- non-bare entries which seems intuitive, though. The choice of
- keeping matrix entries and volume weights separate deals with the
- bare entries that allow for correct interpretation of the matrix
- entries; e.g., as variance in case of an covariance operator.
-
- """
- if domain is None or (domain == self.domain):
- diag_val = self.val * self.val.conjugate() # bare diagonal
- # weight if ...
- if not bare:
- diag_val = diag_val.weight(power=1, overwrite=True)
- return diag_val
- else:
- return super(vecvec_operator, self).diag(bare=bare,
- domain=domain,
- **kwargs)
-
- def inverse_diag(self):
- """
- Inverse is ill-defined for this operator.
-
- """
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def det(self):
- """
- Computes the determinant of the operator.
-
- Returns
- -------
- det : 0
- The determinant
-
- """
- return 0
-
- def inverse_det(self):
- """
- Inverse is ill-defined for this operator.
-
- """
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def log_det(self):
- """
- Logarithm of the determinant is ill-defined for this singular
- operator.
-
- """
- raise AttributeError(about._errors.cstring(
- "ERROR: singular operator."))
-
- def __repr__(self):
- return ""
-
-
-class response_operator(operator):
- """
- .. _____ _______ _______ ______ ______ __ ___ _______ _______
- .. / __/ / __ / / _____/ / _ | / _ | / _ | / _____/ / __ /
- .. / / / /____/ /_____ / / /_/ / / /_/ / / / / / /_____ / / /____/
- .. /__/ \______/ /_______/ / ____/ \______/ /__/ /__/ /_______/ \______/ operator class
- .. /__/
-
- NIFTY subclass for response operators (of a certain family)
-
- Any response operator handles Gaussian convolutions, itemwise masking,
- and selective mappings.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- sigma : float, *optional*
- The standard deviation of the Gaussian kernel. Zero indicates
- no convolution. (default: 0)
- mask : {scalar, ndarray}, *optional*
- Masking values for arguments (default: 1)
- assign : {list, ndarray}, *optional*
- Assignments of codomain items to domain items. A list of
- indices/ index tuples or a one/ two-dimensional array.
- (default: None)
- den : bool, *optional*
- Whether to consider the arguments as densities or not.
- Mandatory for the correct incorporation of volume weights.
- (default: False)
- target : space, *optional*
- The space wherein the operator output lives (default: domain)
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- sym : bool
- Indicates whether the operator is self-adjoint or not.
- uni : bool
- Indicates whether the operator is unitary or not.
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not.
- target : space
- The space wherein the operator output lives
- sigma : float
- The standard deviation of the Gaussian kernel. Zero indicates
- no convolution.
- mask : {scalar, ndarray}
- Masking values for arguments
- assign : {list, ndarray}
- Assignments of codomain items to domain items. A list of
- indices/ index tuples or a one/ two-dimensional array.
- den : bool
- Whether to consider the arguments as densities or not.
- Mandatory for the correct incorporation of volume weights.
- """
-
- def __init__(self, domain, codomain=None, sigma=0, mask=1, assign=None,
- den=False, target=None, cotarget=None):
- """
- Sets the standard properties and `density`, `sigma`, `mask` and
- `assignment(s)`.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- sigma : float, *optional*
- The standard deviation of the Gaussian kernel. Zero indicates
- no convolution. (default: 0)
- mask : {scalar, ndarray}, *optional*
- Masking values for arguments (default: 1)
- assign : {list, ndarray}, *optional*
- Assignments of codomain items to domain items. A list of
- indices/ index tuples or a one/ two-dimensional array.
- (default: None)
- den : bool, *optional*
- Whether to consider the arguments as densities or not.
- Mandatory for the correct incorporation of volume weights.
- (default: False)
- target : space, *optional*
- The space wherein the operator output lives (default: domain)
-
- Returns
- -------
- None
- """
- if not isinstance(domain, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: The domain must be a space instance."))
- self.domain = domain
-
- if self.domain.check_codomain(codomain):
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- self.sym = False
- self.uni = False
- self.imp = False
- self.den = bool(den)
-
- self.set_mask(new_mask=mask)
-
- # check sigma
- self.sigma = np.float(sigma)
- if sigma < 0:
- raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
-
- # check assignment(s)
- if assign is None:
- assignments = self.domain.dim
- self.assign = None
- elif isinstance(assign, list):
- # check that the advanced indexing entries are either scalar
- # or all have the same size
- shape_list = map(np.shape, assign)
- shape_list.remove(())
- if len(self.domain.shape) == 1:
- if len(shape_list) == 0:
- assignments = len(assign)
- elif len(shape_list) == 1:
- assignments = len(assign[0])
- else:
- raise ValueError(about._errors.cstring(
- "ERROR: Wrong number of indices!"))
- else:
- if len(assign) != len(self.domain.shape):
- raise ValueError(about._errors.cstring(
- "ERROR: Wrong number of indices!"))
- elif shape_list == []:
- raise ValueError(about._errors.cstring(
- "ERROR: Purely scalar entries in the assign list " +
- "are only valid for one-dimensional fields!"))
- elif not all([x == shape_list[0] for x in shape_list]):
- raise ValueError(about._errors.cstring(
- "ERROR: Non-scalar entries of assign list all must " +
- "have the same shape!"))
- else:
- assignments = reduce(lambda x, y: x * y, shape_list[0])
- self.assign = assign
- else:
- raise ValueError(about._errors.cstring(
- "ERROR: assign must be None or list of arrays!"))
-
- if target is None:
- # set target
- # TODO: Fix the target spaces
- target = Space(assignments,
- dtype=self.domain.dtype,
- distribution_strategy=self.domain.distribution_strategy)
- else:
- # check target
- if not isinstance(target, Space):
- raise TypeError(about._errors.cstring(
- "ERROR: Given target is not a nifty space"))
- elif len(target.shape) > 1:
- raise ValueError(about._errors.cstring(
- "ERROR: Given target must be a one-dimensional space."))
- elif assignments != target.dim:
- raise ValueError(about._errors.cstring(
- "ERROR: dimension mismatch ( " +
- str(assignments) + " <> " +
- str(target.dim) + " )."))
- self.target = target
- if self.target.check_codomain(cotarget):
- self.cotarget = cotarget
- else:
- self.cotarget = self.target.get_codomain()
-
- def set_sigma(self, new_sigma):
- """
- Sets the standard deviation of the response operator, indicating
- the amount of convolution.
-
- Parameters
- ----------
- sigma : float
- The standard deviation of the Gaussian kernel. Zero indicates
- no convolution.
-
- Returns
- -------
- None
- """
- # check sigma
- if new_sigma < 0:
- raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
- self.sigma = np.float(new_sigma)
-
- def set_mask(self, new_mask):
- """
- Sets the masking values of the response operator
-
- Parameters
- ----------
- newmask : {scalar, ndarray}
- masking values for arguments
-
- Returns
- -------
- None
- """
- if np.isscalar(new_mask):
- self.mask = np.bool(new_mask)
- else:
- self.mask = self.domain.cast(new_mask, dtype=np.dtype('bool'),
- hermitianize=False)
-
- def _multiply(self, x, **kwargs):
- # smooth
- y = self.domain.calc_smooth(x.val, sigma=self.sigma)
- # mask
- y *= self.mask
- # assign and return
- if self.assign is not None:
- val = y[self.assign]
- else:
- try:
- val = y.flatten(inplace=True)
- except TypeError:
- val = y.flatten()
- return Field(self.target,
- val=val,
- codomain=self.cotarget)
-
- def _adjoint_multiply(self, x, **kwargs):
- if self.assign is None:
- y = self.domain.cast(x.val)
- else:
- y = self.domain.cast(0)
- y[self.assign] = x.val
-
- y *= self.mask
- y = self.domain.calc_smooth(y, sigma=self.sigma)
- return Field(self.domain,
- val=y,
- codomain=self.codomain)
-
- def _briefing(self, x, domain, codomain, inverse):
- # make sure, that the result_field of the briefing lives in the
- # given domain and codomain
- result_field = Field(domain=domain, val=x, codomain=codomain,
- copy=False)
-
- # weight if necessary
- if (not self.imp) and (not inverse) and \
- self.den:
- result_field = result_field.weight(power=1)
- return result_field
-
- def _debriefing(self, x, y, target, cotarget, inverse):
- # The debriefing takes care that the result field lives in the same
- # fourier-type domain as the input field
- assert(isinstance(y, Field))
-
- # weight if necessary
- if (not self.imp) and (not self.den ^ inverse):
- y = y.weight(power=-1)
-
- return y
-#
-#
-# # > evaluates x and y after `multiply`
-# if y is None:
-# return None
-# else:
-# # inspect y
-# if not isinstance(y, field):
-# y = field(target, codomain=cotarget, val=y)
-# elif y.domain != target:
-# raise ValueError(about._errors.cstring(
-# "ERROR: invalid output domain."))
-# # weight if ...
-# if (not self.imp) and (not target.discrete) and \
-# (not self.den ^ inverse):
-# y = y.weight(power=-1)
-# # inspect x
-# if isinstance(x, field):
-# # repair if the originally field was living in the codomain
-# # of the operators domain
-# if self.domain == self.target == x.codomain:
-# y = y.transform(new_domain=x.domain)
-# if x.domain == y.domain and (x.codomain != y.codomain):
-# y.set_codomain(new_codomain=x.codomain)
-# return y
-
- def __repr__(self):
- return ""
-
-
-class invertible_operator(operator):
- """
- .. __ __ __ __ __
- .. /__/ / /_ /__/ / / / /
- .. __ __ ___ __ __ _______ _____ / _/ __ / /___ / / _______
- .. / / / _ || |/ / / __ / / __/ / / / / / _ | / / / __ /
- .. / / / / / / | / / /____/ / / / /_ / / / /_/ / / /_ / /____/
- .. /__/ /__/ /__/ |____/ \______/ /__/ \___/ /__/ \______/ \___/ \______/ operator class
-
- NIFTY subclass for invertible, self-adjoint (linear) operators
-
- The invertible operator class is an abstract class for self-adjoint or
- symmetric (linear) operators from which other more specific operator
- subclassescan be derived. Such operators inherit an automated inversion
- routine, namely conjugate gradient.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- uni : bool, *optional*
- Indicates whether the operator is unitary or not.
- (default: False)
- imp : bool, *optional*
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not (default: False).
- para : {single object, tuple/list of objects}, *optional*
- This is a freeform tuple/list of parameters that derivatives of
- the operator class can use (default: None).
-
- See Also
- --------
- operator
-
- Notes
- -----
- This class is not meant to be instantiated. Operator classes derived
- from this one only need a `_multiply` or `_inverse_multiply` instance
- method to perform the other. However, one of them needs to be defined.
-
- Attributes
- ----------
- domain : space
- The space wherein valid arguments live.
- sym : bool
- Indicates whether the operator is self-adjoint or not.
- uni : bool
- Indicates whether the operator is unitary or not.
- imp : bool
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not.
- target : space
- The space wherein the operator output lives.
- para : {single object, list of objects}
- This is a freeform tuple/list of parameters that derivatives of
- the operator class can use. Not used in the base operators.
-
- """
-
- def __init__(self, domain, codomain=None, uni=False, imp=False):
- """
- Sets the standard operator properties.
-
- Parameters
- ----------
- domain : space
- The space wherein valid arguments live.
- uni : bool, *optional*
- Indicates whether the operator is unitary or not.
- (default: False)
- imp : bool, *optional*
- Indicates whether the incorporation of volume weights in
- multiplications is already implemented in the `multiply`
- instance methods or not (default: False).
- para : {single object, tuple/list of objects}, *optional*
- This is a freeform tuple/list of parameters that derivatives of
- the operator class can use (default: None).
-
- """
- if not isinstance(domain, Space):
- raise TypeError(about._errors.cstring("ERROR: invalid input."))
- self.domain = domain
- if self.domain.check_codomain(codomain):
- self.codomain = codomain
- else:
- self.codomain = self.domain.get_codomain()
-
- self.sym = True
- self.uni = bool(uni)
-
- self.imp = bool(imp)
-
- self.target = self.domain
- self.cotarget = self.codomain
-
- def _multiply(self, x, force=False, W=None, spam=None, reset=None,
- note=False, x0=None, tol=1E-4, clevel=1, limii=None,
- **kwargs):
- """
- Applies the invertible operator to a given field by invoking a
- conjugate gradient.
-
- Parameters
- ----------
- x : field
- Valid input field.
- force : bool
- Indicates wheter to return a field instead of ``None`` in case
- the conjugate gradient fails.
-
- Returns
- -------
- OIIx : field
- Mapped field with suitable domain.
-
- See Also
- --------
- conjugate_gradient
-
- Other Parameters
- ----------------
- W : {operator, function}, *optional*
- Operator `W` that is a preconditioner on `A` and is applicable
- to a
- field (default: None).
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- reset : integer, *optional*
- Number of iterations after which to restart; i.e., forget
- previous
- conjugated directions (default: sqrt(b.dim)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
- x0 : field, *optional*
- Starting guess for the minimization.
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by current relative
- residual (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 1).
- limii : integer, *optional*
- Maximum number of iterations performed
- (default: 10 * b.dim).
-
- """
- x_, convergence = conjugate_gradient(self.inverse_times,
- x,
- W=W,
- spam=spam,
- reset=reset,
- note=note)(x0=x0,
- tol=tol,
- clevel=clevel,
- limii=limii)
- # check convergence
- if not convergence:
- if not force or x_ is None:
- return None
- about.warnings.cprint("WARNING: conjugate gradient failed.")
- # TODO: A weighting here shoud be wrong, as this is done by
- # the (de)briefing methods -> Check!
-# # weight if ...
-# if not self.imp: # continiuos domain/target
-# x_.weight(power=-1, overwrite=True)
- return x_
-
- def _inverse_multiply(self, x, force=False, W=None, spam=None, reset=None,
- note=False, x0=None, tol=1E-4, clevel=1, limii=None,
- **kwargs):
- """
- Applies the inverse of the invertible operator to a given field by
- invoking a conjugate gradient.
-
- Parameters
- ----------
- x : field
- Valid input field.
- force : bool
- Indicates wheter to return a field instead of ``None`` in case
- the conjugate gradient fails.
-
- Returns
- -------
- OIx : field
- Mapped field with suitable domain.
-
- See Also
- --------
- conjugate_gradient
-
- Other Parameters
- ----------------
- W : {operator, function}, *optional*
- Operator `W` that is a preconditioner on `A` and is applicable
- to a
- field (default: None).
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- reset : integer, *optional*
- Number of iterations after which to restart; i.e., forget
- previous
- conjugated directions (default: sqrt(b.dim)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
- x0 : field, *optional*
- Starting guess for the minimization.
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by current relative
- residual (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 1).
- limii : integer, *optional*
- Maximum number of iterations performed
- (default: 10 * b.dim).
-
- """
- x_, convergence = conjugate_gradient(self.times,
- x,
- W=W,
- spam=spam,
- reset=reset,
- note=note)(x0=x0,
- tol=tol,
- clevel=clevel,
- limii=limii)
- # check convergence
- if not convergence:
- if not force or x_ is None:
- return None
- about.warnings.cprint("WARNING: conjugate gradient failed.")
- # TODO: A weighting here shoud be wrong, as this is done by
- # the (de)briefing methods -> Check!
-# # weight if ...
-# if not self.imp: # continiuos domain/target
-# x_.weight(power=1, overwrite=True)
- return x_
-
- def __repr__(self):
- return ""
-
-
-
-class propagator_operator(operator):
- """
- .. __
- .. / /_
- .. _______ _____ ______ ______ ____ __ ____ __ ____ __ / _/ ______ _____
- .. / _ / / __/ / _ | / _ | / _ / / _ / / _ / / / / _ | / __/
- .. / /_/ / / / / /_/ / / /_/ / / /_/ / / /_/ / / /_/ / / /_ / /_/ / / /
- .. / ____/ /__/ \______/ / ____/ \______| \___ / \______| \___/ \______/ /__/ operator class
- .. /__/ /__/ /______/
-
- NIFTY subclass for propagator operators (of a certain family)
-
- The propagator operators :math:`D` implemented here have an inverse
- formulation like :math:`(S^{-1} + M)`, :math:`(S^{-1} + N^{-1})`, or
- :math:`(S^{-1} + R^\dagger N^{-1} R)` as appearing in Wiener filter
- theory.
-
- Parameters
- ----------
- S : operator
- Covariance of the signal prior.
- M : operator
- Likelihood contribution.
- R : operator
- Response operator translating signal to (noiseless) data.
- N : operator
- Covariance of the noise prior or the likelihood, respectively.
-
- See Also
- --------
- conjugate_gradient
-
- Notes
- -----
- The propagator will puzzle the operators `S` and `M` or `R`, `N` or
- only `N` together in the predefined from, a domain is set
- automatically. The application of the inverse is done by invoking a
- conjugate gradient.
- Note that changes to `S`, `M`, `R` or `N` auto-update the propagator.
-
- Examples
- --------
- >>> f = field(rg_space(4), val=[2, 4, 6, 8])
- >>> S = power_operator(f.target, spec=1)
- >>> N = diagonal_operator(f.domain, diag=1)
- >>> D = propagator_operator(S=S, N=N) # D^{-1} = S^{-1} + N^{-1}
- >>> D(f).val
- array([ 1., 2., 3., 4.])
-
- Attributes
- ----------
- domain : space
- A space wherein valid arguments live.
- codomain : space
- An alternative space wherein valid arguments live; commonly the
- codomain of the `domain` attribute.
- sym : bool
- Indicates that the operator is self-adjoint.
- uni : bool
- Indicates that the operator is not unitary.
- imp : bool
- Indicates that volume weights are implemented in the `multiply`
- instance methods.
- target : space
- The space wherein the operator output lives.
- _A1 : {operator, function}
- Application of :math:`S^{-1}` to a field.
- _A2 : {operator, function}
- Application of all operations not included in `A1` to a field.
- RN : {2-tuple of operators}, *optional*
- Contains `R` and `N` if given.
-
- """
-
- def __init__(self, S=None, M=None, R=None, N=None):
- """
- Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
- and `RN` if required.
-
- Parameters
- ----------
- S : operator
- Covariance of the signal prior.
- M : operator
- Likelihood contribution.
- R : operator
- Response operator translating signal to (noiseless) data.
- N : operator
- Covariance of the noise prior or the likelihood, respectively.
-
- """
-
- # parse the signal prior covariance
- if not isinstance(S, operator):
- raise ValueError(about._errors.cstring(
- "ERROR: The given S is not an operator."))
-
- self.S = S
- self.S_inverse_times = self.S.inverse_times
-
- # take signal-space domain from S as the domain for D
- S_is_harmonic = False
- if hasattr(S.domain, 'harmonic'):
- if S.domain.harmonic:
- S_is_harmonic = True
-
- if S_is_harmonic:
- self.domain = S.codomain
- self.codomain = S.domain
- else:
- self.domain = S.domain
- self.codomain = S.codomain
-
- self.target = self.domain
- self.cotarget = self.codomain
-
- # build up the likelihood contribution
- (self.M_times,
- M_domain,
- M_codomain,
- M_target,
- M_cotarget) = self._build_likelihood_contribution(M, R, N)
-
- # assert that S and M have matching domains
- if not (self.domain == M_domain and
- self.codomain == M_codomain and
- self.target == M_target and
- self.cotarget == M_cotarget):
- raise ValueError(about._errors.cstring(
- "ERROR: The (co)domains and (co)targets of the prior " +
- "signal covariance and the likelihood contribution must be " +
- "the same in the sense of '=='."))
-
- self.sym = True
- self.uni = False
- self.imp = True
-
- def _build_likelihood_contribution(self, M, R, N):
- # if a M is given, return its times method and its domains
- # supplier and discard R and N
- if M is not None:
- return (M.times, M.domain, M.codomain, M.target, M.cotarget)
-
- if N is not None:
- if R is not None:
- return (lambda z: R.adjoint_times(N.inverse_times(R.times(z))),
- R.domain, R.codomain, R.domain, R.codomain)
- else:
- return (N.inverse_times,
- N.domain, N.codomain, N.target, N.cotarget)
- else:
- raise ValueError(about._errors.cstring(
- "ERROR: At least M or N must be given."))
-
- def _multiply(self, x, W=None, spam=None, reset=None, note=True,
- x0=None, tol=1E-3, clevel=1, limii=1000, **kwargs):
-
- if W is None:
- W = self.S
- (result, convergence) = conjugate_gradient(self._inverse_multiply,
- x,
- W=W,
- spam=spam,
- reset=reset,
- note=note)(x0=x0,
- tol=tol,
- clevel=clevel,
- limii=limii)
- # evaluate
- if not convergence:
- about.warnings.cprint("WARNING: conjugate gradient failed.")
-
- return result
-
- def _inverse_multiply(self, x, **kwargs):
- result = self.S_inverse_times(x)
- result += self.M_times(x)
- return result
-
-
-class propagator_operator_old(operator):
- """
- .. __
- .. / /_
- .. _______ _____ ______ ______ ____ __ ____ __ ____ __ / _/ ______ _____
- .. / _ / / __/ / _ | / _ | / _ / / _ / / _ / / / / _ | / __/
- .. / /_/ / / / / /_/ / / /_/ / / /_/ / / /_/ / / /_/ / / /_ / /_/ / / /
- .. / ____/ /__/ \______/ / ____/ \______| \___ / \______| \___/ \______/ /__/ operator class
- .. /__/ /__/ /______/
-
- NIFTY subclass for propagator operators (of a certain family)
-
- The propagator operators :math:`D` implemented here have an inverse
- formulation like :math:`(S^{-1} + M)`, :math:`(S^{-1} + N^{-1})`, or
- :math:`(S^{-1} + R^\dagger N^{-1} R)` as appearing in Wiener filter
- theory.
-
- Parameters
- ----------
- S : operator
- Covariance of the signal prior.
- M : operator
- Likelihood contribution.
- R : operator
- Response operator translating signal to (noiseless) data.
- N : operator
- Covariance of the noise prior or the likelihood, respectively.
-
- See Also
- --------
- conjugate_gradient
-
- Notes
- -----
- The propagator will puzzle the operators `S` and `M` or `R`, `N` or
- only `N` together in the predefined from, a domain is set
- automatically. The application of the inverse is done by invoking a
- conjugate gradient.
- Note that changes to `S`, `M`, `R` or `N` auto-update the propagator.
-
- Examples
- --------
- >>> f = field(rg_space(4), val=[2, 4, 6, 8])
- >>> S = power_operator(f.target, spec=1)
- >>> N = diagonal_operator(f.domain, diag=1)
- >>> D = propagator_operator(S=S, N=N) # D^{-1} = S^{-1} + N^{-1}
- >>> D(f).val
- array([ 1., 2., 3., 4.])
-
- Attributes
- ----------
- domain : space
- A space wherein valid arguments live.
- codomain : space
- An alternative space wherein valid arguments live; commonly the
- codomain of the `domain` attribute.
- sym : bool
- Indicates that the operator is self-adjoint.
- uni : bool
- Indicates that the operator is not unitary.
- imp : bool
- Indicates that volume weights are implemented in the `multiply`
- instance methods.
- target : space
- The space wherein the operator output lives.
- _A1 : {operator, function}
- Application of :math:`S^{-1}` to a field.
- _A2 : {operator, function}
- Application of all operations not included in `A1` to a field.
- RN : {2-tuple of operators}, *optional*
- Contains `R` and `N` if given.
-
- """
-
- def __init__(self, S=None, M=None, R=None, N=None):
- """
- Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
- and `RN` if required.
-
- Parameters
- ----------
- S : operator
- Covariance of the signal prior.
- M : operator
- Likelihood contribution.
- R : operator
- Response operator translating signal to (noiseless) data.
- N : operator
- Covariance of the noise prior or the likelihood, respectively.
-
- """
- # check signal prior covariance
- if(S is None):
- raise Exception(about._errors.cstring(
- "ERROR: insufficient input."))
- elif(not isinstance(S, operator)):
- raise ValueError(about._errors.cstring("ERROR: invalid input."))
- space1 = S.domain
-
- # check likelihood (pseudo) covariance
- if(M is None):
- if(N is None):
- raise Exception(about._errors.cstring(
- "ERROR: insufficient input."))
- elif(not isinstance(N, operator)):
- raise ValueError(about._errors.cstring(
- "ERROR: invalid input."))
- if(R is None):
- space2 = N.domain
- elif(not isinstance(R, operator)):
- raise ValueError(about._errors.cstring(
- "ERROR: invalid input."))
- else:
- space2 = R.domain
- elif(not isinstance(M, operator)):
- raise ValueError(about._errors.cstring("ERROR: invalid input."))
- else:
- space2 = M.domain
-
- # set spaces
- self.domain = space2
-
- if(self.domain.check_codomain(space1)):
- self.codomain = space1
- else:
- raise ValueError(about._errors.cstring("ERROR: invalid input."))
- self.target = self.domain
- self.cotarget = self.codomain
-
- # define A1 == S_inverse
- self.S = S
- if(isinstance(S, diagonal_operator)):
- self._A1 = S._inverse_multiply # S.imp == True
- else:
- self._A1 = S.inverse_times
-
- # define A2 == M == R_adjoint N_inverse R == N_inverse
- if(M is None):
- if(R is not None):
- self.RN = (R, N)
- if(isinstance(N, diagonal_operator)):
- self._A2 = self._standard_M_times_1
- else:
- self._A2 = self._standard_M_times_2
- elif(isinstance(N, diagonal_operator)):
- self._A2 = N._inverse_multiply # N.imp == True
- else:
- self._A2 = N.inverse_times
- elif(isinstance(M, diagonal_operator)):
- self._A2 = M._multiply # M.imp == True
- else:
- self._A2 = M.times
-
- self.sym = True
- self.uni = False
- self.imp = True
-
- # applies > R_adjoint N_inverse R assuming N is diagonal
- def _standard_M_times_1(self, x, **kwargs):
- # N.imp = True
- return self.RN[0].adjoint_times(self.RN[1]._inverse_multiply(self.RN[0].times(x)))
-
- def _standard_M_times_2(self, x, **kwargs): # applies > R_adjoint N_inverse R
- return self.RN[0].adjoint_times(self.RN[1].inverse_times(self.RN[0].times(x)))
-
- # > applies A1 + A2 in self.codomain
- def _inverse_multiply_1(self, x, **kwargs):
- return self._A1(x, pseudo=True) + self._A2(x.transform(self.domain)).transform(self.codomain)
-
- def _inverse_multiply_2(self, x, **kwargs): # > applies A1 + A2 in self.domain
- transformed_x = x.transform(self.codomain)
- aed_x = self._A1(transformed_x, pseudo=True)
- #print (vars(aed_x),)
- transformed_aed = aed_x.transform(self.domain)
- #print (vars(transformed_aed),)
- temp_to_add = self._A2(x)
- added = transformed_aed + temp_to_add
- return added
- # return
- # self._A1(x.transform(self.codomain),pseudo=True).transform(self.domain)+self._A2(x)
-
- def _briefing(self, x): # > prepares x for `multiply`
- # inspect x
- if not isinstance(x, Field):
- return (Field(self.domain, codomain=self.codomain,
- val=x),
- False)
- # check x.domain
- elif x.domain == self.domain:
- return (x, False)
- elif x.domain == self.codomain:
- return (x, True)
- # transform
- else:
- return (x.transform(new_domain=self.codomain,
- overwrite=False),
- True)
-
- def _debriefing(self, x, x_, in_codomain): # > evaluates x and x_ after `multiply`
- if x_ is None:
- return None
- # inspect x
- elif isinstance(x, Field):
- # repair ...
- if in_codomain == True and x.domain != self.codomain:
- x_ = x_.transform(new_domain=x.domain) # ... domain
- if x_.codomain != x.codomain:
- x_.set_codomain(new_codomain=x.codomain) # ... codomain
- return x_
-
- def times(self, x, W=None, spam=None, reset=None, note=False, x0=None, tol=1E-4, clevel=1, limii=None, **kwargs):
- """
- Applies the propagator to a given object by invoking a
- conjugate gradient.
-
- Parameters
- ----------
- x : {scalar, list, array, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the domain of the operator.
- force : bool
- Indicates wheter to return a field instead of ``None`` in case
- the conjugate gradient fails.
-
- Returns
- -------
- Dx : field
- Mapped field with suitable domain.
-
- See Also
- --------
- conjugate_gradient
-
- Other Parameters
- ----------------
- W : {operator, function}, *optional*
- Operator `W` that is a preconditioner on `A` and is applicable to a
- field (default: None).
- spam : function, *optional*
- Callback function which is given the current `x` and iteration
- counter each iteration (default: None).
- reset : integer, *optional*
- Number of iterations after which to restart; i.e., forget previous
- conjugated directions (default: sqrt(b.dim)).
- note : bool, *optional*
- Indicates whether notes are printed or not (default: False).
- x0 : field, *optional*
- Starting guess for the minimization.
- tol : scalar, *optional*
- Tolerance specifying convergence; measured by current relative
- residual (default: 1E-4).
- clevel : integer, *optional*
- Number of times the tolerance should be undershot before
- exiting (default: 1).
- limii : integer, *optional*
- Maximum number of iterations performed (default: 10 * b.dim).
-
- """
- if W is None:
- W = self.S
- # prepare
- x_, in_codomain = self._briefing(x)
- # apply operator
- if(in_codomain):
- A = self._inverse_multiply_1
- else:
- A = self._inverse_multiply_2
- x_, convergence = conjugate_gradient(A, x_, W=W, spam=spam, reset=reset, note=note)(
- x0=x0, tol=tol, clevel=clevel, limii=limii)
- # evaluate
- if not convergence:
- about.warnings.cprint("WARNING: conjugate gradient failed.")
- return self._debriefing(x, x_, in_codomain)
-
- def inverse_times(self, x, **kwargs):
- """
- Applies the inverse propagator to a given object.
-
- Parameters
- ----------
- x : {scalar, list, array, field}
- Scalars are interpreted as constant arrays, and an array will
- be interpreted as a field on the domain of the operator.
-
- Returns
- -------
- DIx : field
- Mapped field with suitable domain.
-
- """
- # prepare
- x_, in_codomain = self._briefing(x)
- # apply operator
- if(in_codomain):
- x_ = self._inverse_multiply_1(x_)
- else:
- x_ = self._inverse_multiply_2(x_)
- # evaluate
- return self._debriefing(x, x_, in_codomain)
-
- def __repr__(self):
- return ""
diff --git a/nifty/operators/nifty_probing.py b/nifty/operators/nifty_probing.py
deleted file mode 100644
index c20c91704c0efac8899c4cfebf7600677d60fc58..0000000000000000000000000000000000000000
--- a/nifty/operators/nifty_probing.py
+++ /dev/null
@@ -1,496 +0,0 @@
-# NIFTY (Numerical Information Field Theory) has been developed at the
-# Max-Planck-Institute for Astrophysics.
-#
-# Copyright (C) 2015 Max-Planck-Society
-#
-# Author: Theo Steininger
-# Project homepage:
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-from __future__ import division
-
-from nifty.config import about
-from nifty.spaces.space import Space
-from nifty.field import Field
-from nifty.nifty_utilities import direct_vdot
-
-
-class prober(object):
- """
- .. __ __
- .. / / /__/
- .. ______ _____ ______ / /___ __ __ ___ ____ __
- .. / _ | / __/ / _ | / _ | / / / _ | / _ /
- .. / /_/ / / / / /_/ / / /_/ / / / / / / / / /_/ /
- .. / ____/ /__/ \______/ \______/ /__/ /__/ /__/ \____ / class
- .. /__/ /______/
-
- NIFTY class for probing (using multiprocessing)
-
- This is the base NIFTY probing class from which other probing classes
- (e.g. diagonal probing) are derived.
-
- When called, a probing class instance evaluates an operator or a
- function using random fields, whose components are random variables
- with mean 0 and variance 1. When an instance is called it returns the
- mean value of f(probe), where probe is a random field with mean 0 and
- variance 1. The mean is calculated as 1/N Sum[ f(probe_i) ].
-
- Parameters
- ----------
- op : operator
- The operator specified by `op` is the operator to be probed.
- If no operator is given, then probing will be done by applying
- `function` to the probes. (default: None)
- function : function, *optional*
- If no operator has been specified as `op`, then specification of
- `function` is non optional. This is the function, that is applied
- to the probes. (default: `op.times`)
- domain : space, *optional*
- If no operator has been specified as `op`, then specification of
- `domain` is non optional. This is the space that the probes live
- in. (default: `op.domain`)
- target : domain, *optional*
- `target` is the codomain of `domain`
- (default: `op.domain.get_codomain()`)
- random : string, *optional*
- the distribution from which the probes are drawn. `random` can be
- either "pm1" or "gau". "pm1" is a uniform distribution over {+1,-1}
- or {+1,+i,-1,-i}, respectively. "gau" is a normal distribution with
- zero-mean and unit-variance (default: "pm1")
- ncpu : int, *optional*
- the number of cpus to be used from parallel probing. (default: 2)
- nrun : int, *optional*
- the number of probes to be evaluated. If `nrun infinite variance.")
- return (sum_of_probes, None)
- else:
- var = 1/(num-1)*(sum_of_squares - 1/num*(sum_of_probes**2))
- return (sum_of_probes*(1./num), var)
- else:
- return sum_of_probes*(1./num)
-
- def print_progress(self, num): # > prints progress status upto 10 dots
- tenths = 1+(10*num//self.nrun)
- about.infos.cflush(("\b")*10+('.')*tenths+(' ')*(10-tenths))
- """
- def _single_probing(self,zipped): # > performs one probing operation
- # generate probe
- np.random.seed(zipped[0])
- probe = self.gen_probe()
- # do the actual probing
- return self.probing(zipped[1],probe)
- """
-
- def probe(self): # > performs the probing operations one after another
- # initialize the variables
- sum_of_probes = 0
- sum_of_squares = 0
- num = 0
-
- for ii in xrange(self.nrun):
- print ('running probe ', ii)
- temp_probe = self.generate_probe()
- temp_result = self.evaluate_probe(probe=temp_probe)
-
- if temp_result is not None:
- sum_of_probes += temp_result
- if self.varQ:
- sum_of_squares += ((temp_result).conjugate())*temp_result
- num += 1
- self.print_progress(num)
- about.infos.cflush(" done.")
- # evaluate
- return self.finalize(sum_of_probes, sum_of_squares, num)
-
- def __call__(self, loop=False, **kwargs):
- """
-
- Starts the probing process.
- All keyword arguments that can be given to `configure` can also be
- given to `__call__` and have the same effect.
-
- Parameters
- ----------
- loop : bool, *optional*
- if `loop` is True, then multiprocessing will be disabled and
- all probes are evaluated by a single worker (default: False)
-
- Returns
- -------
- results : see **Returns** in `evaluate`
-
- other parameters
- ----------------
- kwargs : see **Parameters** in `configure`
-
- """
- self.configure(**kwargs)
- return self.probe()
-
- def __repr__(self):
- return ""
-
-
-class _specialized_prober(object):
- def __init__(self, operator, domain=None, inverseQ=False, **kwargs):
- # remove a potentially supplied function keyword argument
- try:
- kwargs.pop('function')
- except(KeyError):
- pass
- else:
- about.warnings.cprint(
- "WARNING: Dropped the supplied function keyword-argument!")
-
- if domain is None and not inverseQ:
- kwargs.update({'domain': operator.domain})
- elif domain is None and inverseQ:
- kwargs.update({'domain': operator.target})
- else:
- kwargs.update({'domain': domain})
- self.operator = operator
-
- self.prober = prober(function=self._probing_function,
- **kwargs)
-
- def _probing_function(self, probe):
- return None
-
- def __call__(self, *args, **kwargs):
- return self.prober(*args, **kwargs)
-
- def __getattr__(self, attr):
- return getattr(self.prober, attr)
-
-
-class trace_prober(_specialized_prober):
- def __init__(self, operator, **kwargs):
- super(trace_prober, self).__init__(operator=operator,
- inverseQ=False,
- **kwargs)
-
- def _probing_function(self, probe):
- return direct_vdot(probe.conjugate(), self.operator.times(probe))
-
-
-class inverse_trace_prober(_specialized_prober):
- def __init__(self, operator, **kwargs):
- super(inverse_trace_prober, self).__init__(operator=operator,
- inverseQ=True,
- **kwargs)
-
- def _probing_function(self, probe):
- return direct_vdot(probe.conjugate(),
- self.operator.inverse_times(probe))
-
-
-class diagonal_prober(_specialized_prober):
- def __init__(self, **kwargs):
- super(diagonal_prober, self).__init__(inverseQ=False,
- **kwargs)
-
- def _probing_function(self, probe):
- return probe.conjugate()*self.operator.times(probe)
-
-
-class inverse_diagonal_prober(_specialized_prober):
- def __init__(self, operator, **kwargs):
- super(inverse_diagonal_prober, self).__init__(operator=operator,
- inverseQ=True,
- **kwargs)
-
- def _probing_function(self, probe):
- return probe.conjugate()*self.operator.inverse_times(probe)
diff --git a/nifty/operators/propagator_operator/propagator_operator.py b/nifty/operators/propagator_operator/propagator_operator.py
index 039d5e80461ffc36be050a2475e33dbf03baf10a..fa949bfbed286eea5d61cd4a61f4b0e03ba79fa3 100644
--- a/nifty/operators/propagator_operator/propagator_operator.py
+++ b/nifty/operators/propagator_operator/propagator_operator.py
@@ -1,9 +1,11 @@
# -*- coding: utf-8 -*-
-from nifty.config import about
from nifty.minimization import ConjugateGradient
from nifty.operators.linear_operator import LinearOperator
+import logging
+logger = logging.getLogger('NIFTy.PropagatorOperator')
+
class PropagatorOperator(LinearOperator):
"""
@@ -114,15 +116,15 @@ class PropagatorOperator(LinearOperator):
self.field_type == M_target and
self.target == M_target and
self.field_type_target == M_field_type_target):
- raise ValueError(about._errors.cstring(
- "ERROR: The domains and targets of the prior " +
+ raise ValueError(
+ "The domains and targets of the prior " +
"signal covariance and the likelihood contribution must be " +
- "the same in the sense of '=='."))
+ "the same in the sense of '=='.")
if inverter is not None:
self.inverter = inverter
else:
- self.inverter = conjugate_gradient()
+ self.inverter = ConjugateGradient()
# ---Mandatory properties and methods---
@@ -170,8 +172,8 @@ class PropagatorOperator(LinearOperator):
return (N.inverse_times,
N.domain, N.field_type, N.target, N.field_type_target)
else:
- raise ValueError(about._errors.cstring(
- "ERROR: At least M or N must be given."))
+ raise ValueError(
+ "At least M or N must be given.")
def _multiply(self, x, W=None, spam=None, reset=None, note=False,
x0=None, tol=1E-4, clevel=1, limii=None, **kwargs):
@@ -190,7 +192,7 @@ class PropagatorOperator(LinearOperator):
limii=limii)
# evaluate
if not convergence:
- about.warnings.cprint("WARNING: conjugate gradient failed.")
+ logger.warn("conjugate gradient failed.")
return result
diff --git a/nifty/operators/smoothing_operator/smoothing_operator.py b/nifty/operators/smoothing_operator/smoothing_operator.py
index 4f5e5b66374d28b813682e51960140b3e83acdd1..f912bb3537b39a1ff136e1d2e3a42be152d20a08 100644
--- a/nifty/operators/smoothing_operator/smoothing_operator.py
+++ b/nifty/operators/smoothing_operator/smoothing_operator.py
@@ -1,6 +1,5 @@
import numpy as np
-from nifty.config import about
import nifty.nifty_utilities as utilities
from nifty import RGSpace, LMSpace
from nifty.operators.endomorphic_operator import EndomorphicOperator
@@ -17,16 +16,16 @@ class SmoothingOperator(EndomorphicOperator):
if len(self.domain) != 1:
raise ValueError(
- about._errors.cstring(
+
'ERROR: SmoothOperator accepts only exactly one '
- 'space as input domain.')
+ 'space as input domain.'
)
if self.field_type != ():
- raise ValueError(about._errors.cstring(
+ raise ValueError(
'ERROR: SmoothOperator field-type must be an '
'empty tuple.'
- ))
+ )
self._sigma = sigma
diff --git a/nifty/power_conversion.py b/nifty/power_conversion.py
deleted file mode 100644
index ef0a4aedbdcb6a5b3e5ac4578abb2613ea975a53..0000000000000000000000000000000000000000
--- a/nifty/power_conversion.py
+++ /dev/null
@@ -1,311 +0,0 @@
-## NIFTY (Numerical Information Field Theory) has been developed at the
-## Max-Planck-Institute for Astrophysics.
-##
-## Copyright (C) 2015 Max-Planck-Society
-##
-## Author: Marco Selig
-## Project homepage:
-##
-## This program is free software: you can redistribute it and/or modify
-## it under the terms of the GNU General Public License as published by
-## the Free Software Foundation, either version 3 of the License, or
-## (at your option) any later version.
-##
-## This program is distributed in the hope that it will be useful,
-## but WITHOUT ANY WARRANTY; without even the implied warranty of
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-## See the GNU General Public License for more details.
-##
-## You should have received a copy of the GNU General Public License
-## along with this program. If not, see .
-
-import numpy as np
-from numpy import pi
-from nifty.config import about
-from nifty.field import Field
-from nifty.nifty_simple_math import sqrt, exp, log
-
-
-def power_backward_conversion_lm(k_space, p, mean=None):
- """
- This function is designed to convert a theoretical/statistical power
- spectrum of a log-normal field to the theoretical power spectrum of
- the underlying Gaussian field.
- The function only works for power spectra defined for lm_spaces
-
- Parameters
- ----------
- k_space : nifty.rg_space,
- a regular grid space with the attribute `Fourier = True`
- p : np.array,
- the power spectrum of the log-normal field.
- Needs to have the same number of entries as
- `k_space.get_power_indices()[0]`
- mean : float, *optional*
- specifies the mean of the log-normal field. If `mean` is not
- specified the function will use the monopole of the power spectrum.
- If it is specified the function will NOT use the monopole of the
- spectrum. (default: None)
- WARNING: a mean that is too low can violate positive definiteness
- of the log-normal field. In this case the function produces an
- error.
-
- Returns
- -------
- mean : float,
- the recovered mean of the underlying Gaussian distribution.
- p1 : np.array,
- the power spectrum of the underlying Gaussian field, where the
- monopole has been set to zero. Eventual monopole power has been
- shifted to the mean.
-
- References
- ----------
- .. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter power spectrum";
- `arXiv:1312.1354 `_
- """
-
- p = np.copy(p)
- if(mean is not None):
- p[0] = 4*pi*mean**2
-
- klen = k_space.get_power_indices()[0]
- C_0_Omega = Field(k_space,val=0)
- C_0_Omega.val[:len(klen)] = p*sqrt(2*klen+1)/sqrt(4*pi)
- C_0_Omega = C_0_Omega.transform()
-
- if(np.any(C_0_Omega.val<0.)):
- raise ValueError(about._errors.cstring("ERROR: spectrum or mean incompatible with positive definiteness.\n Try increasing the mean."))
- return None
-
- lC = log(C_0_Omega)
-
- Z = lC.transform()
-
- spec = Z.val[:len(klen)]
-
- mean = (spec[0]-0.5*sqrt(4*pi)*log((p*(2*klen+1)/(4*pi)).sum()))/sqrt(4*pi)
-
- spec[0] = 0.
-
- spec = spec*sqrt(4*pi)/sqrt(2*klen+1)
-
- spec = np.real(spec)
-
- if(np.any(spec<0.)):
- spec = spec*(spec>0.)
- about.warnings.cprint("WARNING: negative modes set to zero.")
-
- return mean.real,spec
-
-
-def power_forward_conversion_lm(k_space,p,mean=0):
- """
- This function is designed to convert a theoretical/statistical power
- spectrum of a Gaussian field to the theoretical power spectrum of
- the exponentiated field.
- The function only works for power spectra defined for lm_spaces
-
- Parameters
- ----------
- k_space : nifty.rg_space,
- a regular grid space with the attribute `Fourier = True`
- p : np.array,
- the power spectrum of the Gaussian field.
- Needs to have the same number of entries as
- `k_space.get_power_indices()[0]`
- m : float, *optional*
- specifies the mean of the Gaussian field (default: 0).
-
- Returns
- -------
- p1 : np.array,
- the power spectrum of the exponentiated Gaussian field.
-
- References
- ----------
- .. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter power spectrum";
- `arXiv:1312.1354 `_
- """
- m = mean
- klen = k_space.get_power_indices()[0]
- C_0_Omega = Field(k_space,val=0)
- C_0_Omega.val[:len(klen)] = p*sqrt(2*klen+1)/sqrt(4*pi)
- C_0_Omega = C_0_Omega.transform()
-
- C_0_0 = (p*(2*klen+1)/(4*pi)).sum()
-
- exC = exp(C_0_Omega+C_0_0+2*m)
-
- Z = exC.transform()
-
- spec = Z.val[:len(klen)]
-
- spec = spec*sqrt(4*pi)/sqrt(2*klen+1)
-
- spec = np.real(spec)
-
- if(np.any(spec<0.)):
- spec = spec*(spec>0.)
- about.warnings.cprint("WARNING: negative modes set to zero.")
-
- return spec
-
-
-def power_backward_conversion_rg(k_space, p, mean=None, bare=True):
- """
- This function is designed to convert a theoretical/statistical power
- spectrum of a log-normal field to the theoretical power spectrum of
- the underlying Gaussian field.
- The function only works for power spectra defined for rg_spaces
-
- Parameters
- ----------
- k_space : nifty.rg_space,
- a regular grid space with the attribute `Fourier = True`
- p : np.array,
- the power spectrum of the log-normal field.
- Needs to have the same number of entries as
- `k_space.get_power_indices()[0]`
- mean : float, *optional*
- specifies the mean of the log-normal field. If `mean` is not
- specified the function will use the monopole of the power spectrum.
- If it is specified the function will NOT use the monopole of the
- spectrum (default: None).
- WARNING: a mean that is too low can violate positive definiteness
- of the log-normal field. In this case the function produces an
- error.
- bare : bool, *optional*
- whether `p` is the bare power spectrum or not (default: True).
-
- Returns
- -------
- mean : float,
- the recovered mean of the underlying Gaussian distribution.
- p1 : np.array,
- the power spectrum of the underlying Gaussian field, where the
- monopole has been set to zero. Eventual monopole power has been
- shifted to the mean.
-
- References
- ----------
- .. [#] M. Greiner and T.A. Ensslin, "Log-transforming the matter
- power spectrum";
- `arXiv:1312.1354 `_
- """
-
- pindex = k_space.power_indices['pindex']
- weight = k_space.get_weight()
-
- monopole_index = pindex.argmin()
-
- # Cast the supplied spectrum
- spec = k_space.enforce_power(p)
- # Now we mimick the weightning behaviour of
- # spec = power_operator(k_space,spec=p,bare=bare).get_power(bare=False)
- # by appliying the weight from the k_space
- if bare:
- spec *= weight
-
- #TODO: Does this realy set the mean to the monopole as promised in the docs? -> Check!
- if mean is None:
- mean = 0.
- else:
- spec[0] = 0.
-
- p_val = pindex.apply_scalar_function(lambda x: spec[x],
- dtype=spec.dtype.type)
- power_field = Field(k_space, val=p_val, zerocenter=True).transform()
- power_field += (mean**2)
-
- if power_field.min() < 0:
- raise ValueError(about._errors.cstring(
- "ERROR: spectrum or mean incompatible with positive " +
- "definiteness. \n Try increasing the mean."))
-
- log_of_power_field = power_field.apply_scalar_function(np.log,
- inplace=True)
- power_spectrum_1 = log_of_power_field.power()**(0.5)
- power_spectrum_1[0] = log_of_power_field.transform()[monopole_index]
-
- power_spectrum_0 = k_space.calc_weight(p_val).sum() + (mean**2)
- power_spectrum_0 = np.log(power_spectrum_0)
- power_spectrum_0 *= (0.5 / weight)
-
- log_mean = weight * (power_spectrum_1[0] - power_spectrum_0)
-
- power_spectrum_1[0] = 0.
-
- # Mimik
- # power_operator(k_space,spec=power_spectrum_1,bare=False).\
- # get_power(bare=True).real
- if bare:
- power_spectrum_1 /= weight
-
- return log_mean.real, power_spectrum_1.real
-
-
-def power_forward_conversion_rg(k_space, p, mean=0, bare=True):
- """
- This function is designed to convert a theoretical/statistical power
- spectrum of a Gaussian field to the theoretical power spectrum of
- the exponentiated field.
- The function only works for power spectra defined for rg_spaces
-
- Parameters
- ----------
- k_space : nifty.rg_space,
- a regular grid space with the attribute `Fourier = True`
- p : np.array,
- the power spectrum of the Gaussian field.
- Needs to have the same number of entries as
- `k_space.get_power_indices()[0]`
- mean : float, *optional*
- specifies the mean of the Gaussian field (default: 0).
- bare : bool, *optional*
- whether `p` is the bare power spectrum or not (default: True).
-
- Returns
- -------
- p1 : np.array,
- the power spectrum of the exponentiated Gaussian field.
-
- References
- ----------
- .. [#] M. Greiner and T.A. Ensslin,
- "Log-transforming the matter power spectrum";
- `arXiv:1312.1354 `_
- """
-
- pindex = k_space.power_indices['pindex']
- weight = k_space.get_weight()
- # Cast the supplied spectrum
- spec = k_space.enforce_power(p)
- # Now we mimick the weightning behaviour of
- # spec = power_operator(k_space,spec=p,bare=bare).get_power(bare=False)
- # by appliying the weight from the k_space
- if bare:
- spec *= weight
-
- S_val = pindex.apply_scalar_function(lambda x: spec[x],
- dtype=spec.dtype.type)
-
- # S_x is a field
- S_x = Field(k_space, val=S_val, zerocenter=True).transform()
- # s_0 is a scalar
- s_0 = k_space.calc_weight(S_val, power=1).sum()
-
- # Calculate the new power_field
- S_x += s_0
- S_x += 2*mean
-
- power_field = S_x.apply_scalar_function(np.exp, inplace=True)
-
- new_spec = power_field.power()**(0.5)
-
- # Mimik
- # power_operator(k_space,spec=p1,bare=False).get_power(bare=True).real
- if bare:
- new_spec /= weight
-
- return new_spec.real
\ No newline at end of file
diff --git a/nifty/probing/operator_prober.py b/nifty/probing/operator_prober.py
index 04adf2f7eb7afffefc8ba260c5252b55a44bac40..a57bf53bd92a57ef127265e6c61900453bd6acdc 100644
--- a/nifty/probing/operator_prober.py
+++ b/nifty/probing/operator_prober.py
@@ -2,7 +2,6 @@
import abc
-from nifty.config import about
from prober import Prober
@@ -19,9 +18,8 @@ class OperatorProber(Prober):
compute_variance=compute_variance)
if not isinstance(operator, self.valid_operator_class):
- raise TypeError(about._errors.cstring(
- "WARNING: operator must be an instance of %s" %
- str(self.valid_operator_class)))
+ raise TypeError("Operator must be an instance of %s" %
+ str(self.valid_operator_class))
self._operator = operator
# ---Mandatory properties and methods---
diff --git a/nifty/probing/prober.py b/nifty/probing/prober.py
index 480e1073ee203878d7fd2141ee08cfe5de359307..e68f347c2fe167e15aa399ac32ae3605c5ca5ad4 100644
--- a/nifty/probing/prober.py
+++ b/nifty/probing/prober.py
@@ -4,7 +4,6 @@ import abc
import numpy as np
-from nifty.config import about
from nifty.field import Field
from d2o import STRATEGIES as DISTRIBUTION_STRATEGIES
@@ -40,9 +39,8 @@ class Prober(object):
def distribution_strategy(self, distribution_strategy):
distribution_strategy = str(distribution_strategy)
if distribution_strategy not in DISTRIBUTION_STRATEGIES['global']:
- raise ValueError(about._errors.cstring(
- "ERROR: distribution_strategy must be a global-type "
- "strategy."))
+ raise ValueError("distribution_strategy must be a global-type "
+ "strategy.")
self._distribution_strategy = distribution_strategy
@property
@@ -60,8 +58,8 @@ class Prober(object):
@random_type.setter
def random_type(self, random_type):
if random_type not in ["pm1", "normal"]:
- raise ValueError(about._errors.cstring(
- "ERROR: unsupported random type: '" + str(random_type) + "'."))
+ raise ValueError(
+ "unsupported random type: '" + str(random_type) + "'.")
else:
self._random_type = random_type
diff --git a/nifty/random.py b/nifty/random.py
index a1fcf6eff164dc42e60a123a30bd25fe3e2dc2c2..98b87dfa05afd2664a3926b36b5f5b457d3e1bab 100644
--- a/nifty/random.py
+++ b/nifty/random.py
@@ -2,6 +2,7 @@
import numpy as np
+
class Random(object):
@staticmethod
def pm1(dtype=np.dtype('int'), shape=1):
diff --git a/nifty/spaces/gl_space/gl_space.py b/nifty/spaces/gl_space/gl_space.py
index 0feb0199b4b76aebfdf290f002bae4ae7a9e8dd6..3718170da7c8a7f6e12865a954156ae164d3e226 100644
--- a/nifty/spaces/gl_space/gl_space.py
+++ b/nifty/spaces/gl_space/gl_space.py
@@ -6,10 +6,13 @@ import numpy as np
from d2o import arange, STRATEGIES as DISTRIBUTION_STRATEGIES
from nifty.spaces.space import Space
-from nifty.config import about, nifty_configuration as gc,\
+from nifty.config import nifty_configuration as gc,\
dependency_injector as gdi
import nifty.nifty_utilities as utilities
+import logging
+logger = logging.getLogger('NIFTy.GLSpace')
+
gl = gdi.get('libsharp_wrapper_gl')
GL_DISTRIBUTION_STRATEGIES = DISTRIBUTION_STRATEGIES['global']
@@ -97,8 +100,8 @@ class GLSpace(Space):
"""
# check imports
if not gc['use_libsharp']:
- raise ImportError(about._errors.cstring(
- "ERROR: libsharp_wrapper_gl not available or not loaded."))
+ raise ImportError(
+ "libsharp_wrapper_gl not available or not loaded.")
super(GLSpace, self).__init__(dtype)
@@ -191,11 +194,11 @@ class GLSpace(Space):
def _parse_nlat(self, nlat):
nlat = int(nlat)
if nlat < 2:
- raise ValueError(about._errors.cstring(
- "ERROR: nlat must be a positive number."))
+ raise ValueError(
+ "nlat must be a positive number.")
elif nlat % 2 != 0:
- raise ValueError(about._errors.cstring(
- "ERROR: nlat must be a multiple of 2."))
+ raise ValueError(
+ "nlat must be a multiple of 2.")
return nlat
def _parse_nlon(self, nlon):
@@ -204,7 +207,6 @@ class GLSpace(Space):
else:
nlon = int(nlon)
if nlon != 2 * self.nlat - 1:
- about.warnings.cprint(
- "WARNING: nlon was set to an unrecommended value: "
- "nlon <> 2*nlat-1.")
+ logger.warn("nlon was set to an unrecommended value: "
+ "nlon <> 2*nlat-1.")
return nlon
diff --git a/nifty/spaces/hp_space/hp_space.py b/nifty/spaces/hp_space/hp_space.py
index 4638092e8be4de2ece9e6af5475f2ac45fbee397..c8d1fffcca87418ba662bb04d4ad0558eb4449cb 100644
--- a/nifty/spaces/hp_space/hp_space.py
+++ b/nifty/spaces/hp_space/hp_space.py
@@ -35,8 +35,6 @@ from __future__ import division
import numpy as np
-from d2o import arange, STRATEGIES as DISTRIBUTION_STRATEGIES
-from nifty.config import about
from nifty.spaces.space import Space
from nifty.config import nifty_configuration as gc, \
dependency_injector as gdi
@@ -120,7 +118,7 @@ class HPSpace(Space):
"""
# check imports
if not gc['use_healpy']:
- raise ImportError("ERROR: healpy not available or not loaded.")
+ raise ImportError("healpy not available or not loaded.")
super(HPSpace, self).__init__(dtype)
@@ -200,6 +198,6 @@ class HPSpace(Space):
def _parse_nside(self, nside):
nside = int(nside)
if nside & (nside - 1) != 0 or nside < 2:
- raise ValueError(about._errors.cstring(
- "ERROR: nside must be positive and a multiple of 2."))
+ raise ValueError(
+ "nside must be positive and a multiple of 2.")
return nside
diff --git a/nifty/spaces/lm_space/lm_space.py b/nifty/spaces/lm_space/lm_space.py
index 1d9e66e2867b58aae1b5bb18673be61084e7db70..aa2061706546601992edb3694cd879e79cd5c170 100644
--- a/nifty/spaces/lm_space/lm_space.py
+++ b/nifty/spaces/lm_space/lm_space.py
@@ -4,14 +4,16 @@ import numpy as np
from nifty.spaces.space import Space
-from nifty.config import about,\
- nifty_configuration as gc,\
+from nifty.config import nifty_configuration as gc,\
dependency_injector as gdi
from lm_helper import _distance_array_helper
from d2o import arange
+import logging
+logger = logging.getLogger('NIFTy.LMSpace')
+
gl = gdi.get('libsharp_wrapper_gl')
hp = gdi.get('healpy')
@@ -106,8 +108,8 @@ class LMSpace(Space):
# check imports
if not gc['use_libsharp'] and not gc['use_healpy']:
- raise ImportError(about._errors.cstring(
- "ERROR: neither libsharp_wrapper_gl nor healpy activated."))
+ raise ImportError(
+ "neither libsharp_wrapper_gl nor healpy activated.")
super(LMSpace, self).__init__(dtype)
self._lmax = self._parse_lmax(lmax)
@@ -176,10 +178,9 @@ class LMSpace(Space):
def _parse_lmax(self, lmax):
lmax = np.int(lmax)
if lmax < 1:
- raise ValueError(about._errors.cstring(
- "ERROR: negative lmax is not allowed."))
+ raise ValueError(
+ "negative lmax is not allowed.")
# exception lmax == 2 (nside == 1)
if (lmax % 2 == 0) and (lmax > 2):
- about.warnings.cprint(
- "WARNING: unrecommended parameter (lmax <> 2*n+1).")
+ logger.warn("unrecommended parameter (lmax <> 2*n+1).")
return lmax
diff --git a/nifty/spaces/power_space/power_indices.py b/nifty/spaces/power_space/power_indices.py
index 586e03dafd2e721d08bd3a209b405a804f23caec..1d5621352265eea1d87dbb7d00051f137d34536c 100644
--- a/nifty/spaces/power_space/power_indices.py
+++ b/nifty/spaces/power_space/power_indices.py
@@ -4,8 +4,7 @@ import numpy as np
from d2o import distributed_data_object,\
STRATEGIES as DISTRIBUTION_STRATEGIES
-from nifty.config import about,\
- nifty_configuration as gc,\
+from nifty.config import nifty_configuration as gc,\
dependency_injector as gdi
MPI = gdi[gc['mpi_module']]
@@ -316,9 +315,9 @@ class PowerIndices(object):
return_index=True)[1]
return pundex
else:
- raise NotImplementedError(about._errors.cstring(
- "ERROR: _compute_pundex_d2o not implemented for given "
- "distribution_strategy"))
+ raise NotImplementedError(
+ "_compute_pundex_d2o not implemented for given "
+ "distribution_strategy")
def _bin_power_indices(self, index_dict, **kwargs):
"""
@@ -401,134 +400,3 @@ class PowerIndices(object):
pundex_ = self._compute_pundex(pindex_, kindex_)
return pindex_, kindex_, rho_, pundex_
-
-#
-#class LMPowerIndices(PowerIndices):
-#
-# def __init__(self, lmax, dim, distribution_strategy,
-# log=False, nbin=None, binbounds=None):
-# """
-# Returns an instance of the PowerIndices class. Given the shape and
-# the density of a underlying rectangular grid it provides the user
-# with the pindex, kindex, rho and pundex. The indices are bined
-# according to the supplied parameter scheme. If wanted, computed
-# results are stored for future reuse.
-#
-# Parameters
-# ----------
-# shape : tuple, list, ndarray
-# Array-like object which specifies the shape of the underlying
-# rectangular grid
-# dgrid : tuple, list, ndarray
-# Array-like object which specifies the step-width of the
-# underlying grid
-# zerocenter : boolean, tuple/list/ndarray of boolean *optional*
-# Specifies which dimensions are zerocentered. (default:False)
-# log : bool *optional*
-# Flag specifying if the binning of the default indices is
-# performed on logarithmic scale.
-# nbin : integer *optional*
-# Number of used bins for the binning of the default indices.
-# binbounds : {list, array}
-# Array-like inner boundaries of the used bins of the default
-# indices.
-# """
-# # Basic inits and consistency checks
-# self.lmax = np.uint(lmax)
-# self.dim = np.uint(dim)
-# super(LMPowerIndices, self).__init__(
-# distribution_strategy=distribution_strategy,
-# log=log,
-# nbin=nbin,
-# binbounds=binbounds)
-#
-# def compute_kdict(self):
-# """
-# Calculates an n-dimensional array with its entries being the
-# lengths of the k-vectors from the zero point of the grid.
-#
-# Parameters
-# ----------
-# None : All information is taken from the parent object.
-#
-# Returns
-# -------
-# nkdict : distributed_data_object
-# """
-#
-# if self.distribution_strategy != 'not':
-# about.warnings.cprint(
-# "WARNING: full kdict is temporarily stored on every node " +
-# "altough disribution strategy != 'not'!")
-#
-# if 'healpy' in gdi:
-# nkdict = hp.Alm.getlm(self.lmax, i=None)[0]
-# else:
-# nkdict = self._getlm()[0]
-# nkdict = distributed_data_object(
-# nkdict,
-# distribution_strategy=self.distribution_strategy,
-# comm=self._comm)
-# return nkdict
-#
-# def _getlm(self): # > compute all (l,m)
-# index = np.arange(self.dim)
-# n = 2 * self.lmax + 1
-# m = np.ceil((n - np.sqrt(n**2 - 8 * (index - self.lmax))) / 2
-# ).astype(np.int)
-# l = index - self.lmax * m + m * (m - 1) // 2
-# return l, m
-#
-# def _compute_indices_d2o(self, nkdict):
-# """
-# Internal helper function which computes pindex, kindex, rho and pundex
-# from a given nkdict
-# """
-# ##########
-# # kindex #
-# ##########
-# kindex = np.arange(self.lmax + 1, dtype=np.float)
-#
-# ##########
-# # pindex #
-# ##########
-# pindex = nkdict.copy(dtype=np.int)
-#
-# #######
-# # rho #
-# #######
-# rho = (2 * kindex + 1).astype(np.int)
-#
-# ##########
-# # pundex #
-# ##########
-# pundex = self._compute_pundex_d2o(pindex, kindex)
-#
-# return pindex, kindex, rho, pundex
-#
-# def _compute_indices_np(self, nkdict):
-# """
-# Internal helper function which computes pindex, kindex, rho and pundex
-# from a given nkdict
-# """
-# ##########
-# # kindex #
-# ##########
-# kindex = np.arange(self.lmax + 1, dtype=np.float)
-#
-# ##########
-# # pindex #
-# ##########
-# pindex = nkdict.astype(dtype=np.int, copy=True)
-#
-# #######
-# # rho #
-# #######
-# rho = (2 * kindex + 1).astype(np.int)
-#
-# ##########
-# # pundex #
-# ##########
-# pundex = self._compute_pundex_np(pindex, kindex)
-#
-# return pindex, kindex, rho, pundex
diff --git a/nifty/spaces/power_space/power_space.py b/nifty/spaces/power_space/power_space.py
index 05a0fe58556bc9c535b3f015d852baf0f45434fb..a9fd0d4da55761f54c7416d7ffdc40e8a18e15ab 100644
--- a/nifty/spaces/power_space/power_space.py
+++ b/nifty/spaces/power_space/power_space.py
@@ -4,7 +4,6 @@ import numpy as np
from power_index_factory import PowerIndexFactory
-from nifty.config import about
from nifty.spaces.space import Space
from nifty.spaces.rg_space import RGSpace
from nifty.nifty_utilities import cast_axis_to_tuple
@@ -23,11 +22,11 @@ class PowerSpace(Space):
self._ignore_for_hash += ['_pindex', '_kindex', '_rho']
if not isinstance(harmonic_domain, Space):
- raise ValueError(about._errors.cstring(
- "ERROR: harmonic_domain must be a Space."))
+ raise ValueError(
+ "harmonic_domain must be a Space.")
if not harmonic_domain.harmonic:
- raise ValueError(about._errors.cstring(
- "ERROR: harmonic_domain must be a harmonic space."))
+ raise ValueError(
+ "harmonic_domain must be a harmonic space.")
self._harmonic_domain = harmonic_domain
power_index = PowerIndexFactory.get_power_index(
@@ -87,8 +86,8 @@ class PowerSpace(Space):
axes = cast_axis_to_tuple(axes, len(total_shape))
if len(axes) != 1:
- raise ValueError(about._errors.cstring(
- "ERROR: axes must be of length 1."))
+ raise ValueError(
+ "axes must be of length 1.")
reshaper = [1, ] * len(total_shape)
reshaper[axes[0]] = self.shape[0]
@@ -106,13 +105,13 @@ class PowerSpace(Space):
return result_x
def get_distance_array(self, distribution_strategy):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: There is no get_distance_array implementation for "
- "PowerSpace."))
+ raise NotImplementedError(
+ "There is no get_distance_array implementation for "
+ "PowerSpace.")
def get_smoothing_kernel_function(self, sigma):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: There is no smoothing function for PowerSpace."))
+ raise NotImplementedError(
+ "There is no smoothing function for PowerSpace.")
# ---Added properties and methods---
diff --git a/nifty/spaces/rg_space/rg_space.py b/nifty/spaces/rg_space/rg_space.py
index cb043dbb94317397536e6020e732e9fc159978fd..1bd9bfae949965e1ff9e663532f89e4433b0527d 100644
--- a/nifty/spaces/rg_space/rg_space.py
+++ b/nifty/spaces/rg_space/rg_space.py
@@ -40,8 +40,6 @@ from d2o import distributed_data_object,\
from nifty.spaces.space import Space
-from nifty.config import about
-
class RGSpace(Space):
"""
@@ -253,8 +251,8 @@ class RGSpace(Space):
elif distribution_strategy in DISTRIBUTION_STRATEGIES['not']:
slice_of_first_dimension = slice(0, shape[0])
else:
- raise ValueError(about._errors.cstring(
- "ERROR: Unsupported distribution strategy"))
+ raise ValueError(
+ "Unsupported distribution strategy")
dists = self._distance_array_helper(slice_of_first_dimension)
nkdict.set_local_data(dists)
@@ -322,4 +320,3 @@ class RGSpace(Space):
temp = np.empty(len(self.shape), dtype=bool)
temp[:] = zerocenter
return tuple(temp)
-
diff --git a/nifty/spaces/space/space.py b/nifty/spaces/space/space.py
index aae386251317abcac5d7185ec359f02e12dca3c3..531cc217ad326403cf66efe72f243c6c0e0623c7 100644
--- a/nifty/spaces/space/space.py
+++ b/nifty/spaces/space/space.py
@@ -145,9 +145,6 @@ from __future__ import division
import abc
import numpy as np
-import pylab as pl
-
-from nifty.config import about
class Space(object):
@@ -232,18 +229,18 @@ class Space(object):
@abc.abstractproperty
def shape(self):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: There is no generic shape for the Space base class."))
+ raise NotImplementedError(
+ "There is no generic shape for the Space base class.")
@abc.abstractproperty
def dim(self):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: There is no generic dim for the Space base class."))
+ raise NotImplementedError(
+ "There is no generic dim for the Space base class.")
@abc.abstractproperty
def total_volume(self):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: There is no generic volume for the Space base class."))
+ raise NotImplementedError(
+ "There is no generic volume for the Space base class.")
@abc.abstractmethod
def copy(self):
@@ -276,8 +273,8 @@ class Space(object):
return x
def compute_k_array(self, distribution_strategy):
- raise NotImplementedError(about._errors.cstring(
- "ERROR: There is no generic k_array for Space base class."))
+ raise NotImplementedError(
+ "There is no generic k_array for Space base class.")
def hermitian_decomposition(self, x, axes=None):
raise NotImplementedError
diff --git a/setup.py b/setup.py
index d1c1482ea25aa70e860e9cb48cdcc353938c4e58..d5fb0f53ee7b6b8acc9b2e677aa94a006e5f555c 100644
--- a/setup.py
+++ b/setup.py
@@ -44,9 +44,9 @@ setup(name="ift_nifty",
]),
include_dirs=[numpy.get_include()],
dependency_links=[
- 'git+https://gitlab.mpcdf.mpg.de/ift/keepers.git#egg=keepers-0.1.1',
+ 'git+https://gitlab.mpcdf.mpg.de/ift/keepers.git#egg=keepers-0.1.2',
'git+https://gitlab.mpcdf.mpg.de/ift/d2o.git#egg=d2o-1.0.4'],
- install_requires=['keepers>=0.1.1', 'd2o>=1.0.4'],
+ install_requires=['keepers>=0.1.2', 'd2o>=1.0.4'],
package_data={'nifty.demos': ['demo_faraday_map.npy'],
},
license="GPLv3",