Commit 41dd2ed7 authored by Ultima's avatar Ultima
Browse files

Deleted nifty_explicit.py and nifty_probing_old.py

Many bugfixes and new tests for rg_space.
Fixed imp part of dependency_injector.
parent 6c84c41b
......@@ -6,9 +6,6 @@ from nifty_dependency_injector import dependency_injector
from nifty_configuration import variable,\
configuration
global_dependency_injector = dependency_injector(
['h5py',
('mpi4py.MPI', 'MPI'),
......@@ -19,6 +16,7 @@ global_dependency_injector = dependency_injector(
'healpy',
'libsharp_wrapper_gl'])
variable_fft_module = variable('fft_module',
['pyfftw', 'gfft', 'gfft_fallback'],
lambda z: z in global_dependency_injector)
......@@ -61,3 +59,5 @@ variable_default_comm = variable(
global_configuration['mpi_module']], z))
global_configuration.register(variable_default_comm)
......@@ -3,7 +3,6 @@
import imp
import sys
class dependency_injector(object):
def __init__(self, modules=[]):
self.registry = {}
......@@ -35,16 +34,28 @@ class dependency_injector(object):
self.registry[key_name] = loaded_module
except KeyError:
try:
fp, pathname, description = imp.find_module(module_name)
loaded_module = \
imp.load_module(module_name, fp, pathname, description)
loaded_module = recursive_import(module_name)
# print module_name
# fp, pathname, description = imp.find_module(module_name)
# print pathname
# loaded_module = \
# imp.load_module(module_name, fp, pathname, description)
#
# print loaded_module
self.registry[key_name] = loaded_module
except ImportError:
pass
finally:
# Since we may exit via an exception, close fp explicitly.
try:
fp.close()
except (UnboundLocalError, AttributeError):
pass
# finally:
# # Since we may exit via an exception, close fp explicitly.
# try:
# fp.close()
# except (UnboundLocalError, AttributeError):
# pass
def recursive_import(name):
m = __import__(name)
for n in name.split(".")[1:]:
m = getattr(m, n)
return m
......@@ -326,6 +326,9 @@ class lm_space(point_space):
raise TypeError(about._errors.cstring(
"ERROR: The given codomain must be a nifty lm_space."))
if self.comm is not codomain.comm:
return False
if self.datamodel is not codomain.datamodel:
return False
......@@ -388,10 +391,16 @@ class lm_space(point_space):
raise NotImplementedError
nlat = self.paradict['lmax'] + 1
nlon = self.paradict['lmax'] * 2 + 1
return gl_space(nlat=nlat, nlon=nlon, dtype=new_dtype)
return gl_space(nlat=nlat, nlon=nlon, dtype=new_dtype,
datamodel=self.datamodel,
comm=self.comm)
elif coname == 'hp' or (coname is None and not gc['lm2gl']):
nside = (self.paradict['lmax']+1) // 3
return hp_space(nside=nside)
return hp_space(nside=nside,
datamodel=self.datamodel,
comm=self.comm)
else:
raise ValueError(about._errors.cstring(
"ERROR: unsupported or incompatible codomain '"+coname+"'."))
......@@ -1065,6 +1074,9 @@ class gl_space(point_space):
if self.datamodel is not codomain.datamodel:
return False
if self.comm is not codomain.comm:
return False
if isinstance(codomain, lm_space):
nlat = self.paradict['nlat']
nlon = self.paradict['nlon']
......@@ -1093,9 +1105,13 @@ class gl_space(point_space):
mmax = nlat-1
# lmax,mmax = nlat-1,nlat-1
if self.dtype == np.dtype('float32'):
return lm_space(lmax=lmax, mmax=mmax, dtype=np.complex64)
return lm_space(lmax=lmax, mmax=mmax, dtype=np.complex64,
datamodel=self.datamodel,
comm=self.comm)
else:
return lm_space(lmax=lmax, mmax=mmax, dtype=np.complex128)
return lm_space(lmax=lmax, mmax=mmax, dtype=np.complex128,
datamodel=self.datamodel,
comm=self.comm)
def get_random_values(self, **kwargs):
"""
......@@ -1690,6 +1706,9 @@ class hp_space(point_space):
if self.datamodel is not codomain.datamodel:
return False
if self.comm is not codomain.comm:
return False
if isinstance(codomain, lm_space):
nside = self.paradict['nside']
lmax = codomain.paradict['lmax']
......@@ -1713,7 +1732,9 @@ class hp_space(point_space):
"""
lmax = 3*self.paradict['nside'] - 1
mmax = lmax
return lm_space(lmax=lmax, mmax=mmax, dtype=np.dtype('complex128'))
return lm_space(lmax=lmax, mmax=mmax, dtype=np.dtype('complex128'),
datamodel=self.datamodel,
comm=self.comm)
def get_random_values(self, **kwargs):
"""
......
......@@ -884,7 +884,8 @@ class point_space(space):
def copy(self):
return point_space(num=self.paradict['num'],
dtype=self.dtype,
datamodel=self.datamodel)
datamodel=self.datamodel,
comm=self.comm)
def getitem(self, data, key):
return data[key]
......@@ -1337,18 +1338,18 @@ class point_space(space):
# Now it's about to extract a powerspectrum from spec
# First of all just extract a numpy array. The shape is cared about
# later.
dtype = np.dtype('float')
# Case 1: spec is a function
if callable(spec):
# Try to plug in the kindex array in the function directly
try:
spec = np.array(spec(kindex), dtype=self.dtype)
spec = np.array(spec(kindex), dtype=dtype)
except:
# Second try: Use a vectorized version of the function.
# This is slower, but better than nothing
try:
spec = np.array(np.vectorize(spec)(kindex),
dtype=self.dtype)
dtype=dtype)
except:
raise TypeError(about._errors.cstring(
"ERROR: invalid power spectra function."))
......@@ -1359,21 +1360,13 @@ class point_space(space):
spec = spec.val.get_full_data()
except AttributeError:
spec = spec.val
spec = spec.astype(self.dtype).flatten()
spec = spec.astype(dtype).flatten()
# Case 3: spec is a scalar or something else:
else:
spec = np.array(spec, dtype=self.dtype).flatten()
spec = np.array(spec, dtype=dtype).flatten()
# Make some sanity checks
# Drop imaginary part
temp_spec = np.real(spec)
try:
np.testing.assert_allclose(spec, temp_spec)
except(AssertionError):
about.warnings.cprint("WARNING: Dropping imaginary part.")
spec = temp_spec
# check finiteness
if not np.all(np.isfinite(spec)):
about.warnings.cprint("WARNING: infinite value(s).")
......@@ -1535,8 +1528,10 @@ class point_space(space):
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
# Prepare the empty distributed_data_object
sample = distributed_data_object(global_shape=self.get_shape(),
dtype=self.dtype)
sample = distributed_data_object(
global_shape=self.get_shape(),
dtype=self.dtype,
distribution_strategy=self.datamodel)
# Case 1: uniform distribution over {-1,+1}/{1,i,-1,-i}
if arg['random'] == 'pm1':
......@@ -1594,7 +1589,11 @@ class point_space(space):
return x * self.get_weight(power=power)
def get_weight(self, power=1, split=False):
return np.prod(np.array(self.distances)**np.array(power))
splitted_weight = tuple(np.array(self.distances)**np.array(power))
if not split:
return np.prod(splitted_weight)
else:
return splitted_weight
def calc_dot(self, x, y):
"""
......@@ -1650,12 +1649,9 @@ class point_space(space):
iter : int, *optional*
Number of iterations performed in specific transformations.
"""
x = self.cast(x)
if codomain is None or self.check_codomain(codomain):
return x
else:
raise ValueError(about._errors.cstring(
"ERROR: unsupported transformation."))
raise AttributeError(about._errors.cstring(
"ERROR: fourier-transformation is ill-defined for " +
"(unstructured) point space."))
def calc_smooth(self, x, **kwargs):
"""
......@@ -1677,21 +1673,27 @@ class point_space(space):
def calc_real_Q(self, x):
try:
return x.isreal().all()
except(AttributeError):
except AttributeError:
return np.all(np.isreal(x))
def calc_bincount(self, x, weights=None, minlength=None):
try:
complex_weights_Q = issubclass(weights.dtype.type,
np.complexfloating)
except AttributeError:
complex_weights_Q = False
if self.datamodel == 'np':
if issubclass(np.dtype(weights).type, np.complexfloating):
real_bincount = np.bincount(x, weights=weights.real,
minlength=minlength)
imag_bincount = np.bincount(x, weights=weights.imag,
minlength=minlength)
return real_bincount + imag_bincount
if complex_weights_Q:
real_bincount = np.bincount(x, weights=weights.real,
minlength=minlength)
imag_bincount = np.bincount(x, weights=weights.imag,
minlength=minlength)
return real_bincount + imag_bincount
else:
return np.bincount(x, weights=weights, minlength=minlength)
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
if issubclass(np.dtype(weights).type, np.complexfloating):
if complex_weights_Q:
real_bincount = x.bincount(weights=weights.real,
minlength=minlength)
imag_bincount = x.bincount(weights=weights.imag,
......
......@@ -210,6 +210,8 @@ class distributed_data_object(object):
try:
temp.data[:] = function(self.data)
except:
about.warnings.cprint(
"WARNING: Trying to use np.vectorize!")
temp.data[:] = np.vectorize(function)(self.data)
else:
# Noting to do here. The value-empty array
......@@ -456,7 +458,8 @@ class distributed_data_object(object):
other = self.distributor.extract_local_data(other)
local_vdot = np.vdot(self.get_local_data(), other)
local_vdot_list = self.distributor._allgather(local_vdot)
global_vdot = np.sum(local_vdot_list)
global_vdot = np.result_type(self.dtype,
other.dtype).type(np.sum(local_vdot_list))
return global_vdot
def __getitem__(self, key):
......@@ -551,8 +554,13 @@ class distributed_data_object(object):
def var(self):
if self.shape == (0,):
return np.var(np.array([], dtype=self.dtype))
mean_of_the_square = self.mean(power=2)
square_of_the_mean = self.mean()**2
if issubclass(self.dtype.type, np.complexfloating):
mean_of_the_square = abs(self**2).mean()
square_of_the_mean = abs(self.mean())**2
else:
mean_of_the_square = self.mean(power=2)
square_of_the_mean = self.mean()**2
return mean_of_the_square - square_of_the_mean
def std(self):
......@@ -1242,7 +1250,7 @@ class distributor(object):
# gather the local from_keys. It is the same procedure as above
if from_found != 'd2o':
from_key_list = comm.allgather(to_key)
from_key_list = comm.allgather(from_key)
else:
from_index_list = comm.allgather(from_key.index)
from_key_list = map(lambda z: d2o_librarian[z],
......@@ -1265,9 +1273,9 @@ class distributor(object):
if comm.rank == i:
temp_shape = np.shape(data_update)
try:
temp_dtype = np.dtype(data_update).type
temp_dtype = np.dtype(data_update.dtype)
except(TypeError):
temp_dtype = np.array(data_update).dtype.type
temp_dtype = np.array(data_update).dtype
else:
temp_shape = None
temp_dtype = None
......@@ -1437,7 +1445,6 @@ class _slicing_distributor(distributor):
def _disperse_data_primitive(self, data, to_key, data_update, from_key,
copy, to_found, to_found_boolean, from_found,
from_found_boolean, **kwargs):
if np.isscalar(data_update):
from_key = None
......@@ -1459,6 +1466,7 @@ class _slicing_distributor(distributor):
prepared_data_update = data_update[from_key]
else:
prepared_data_update = data_update
return self.disperse_data_to_slices(
data=data,
to_slices=to_key,
......@@ -1567,7 +1575,6 @@ class _slicing_distributor(distributor):
def disperse_data_to_slices(self, data, to_slices,
data_update, from_slices=None, copy=True):
comm = self.comm
(to_slices, sliceified) = self._sliceify(to_slices)
......
......@@ -56,24 +56,30 @@ class point_space_paradict(space_paradict):
class rg_space_paradict(space_paradict):
def __init__(self, num, complexity, zerocenter):
self.ndim = len(np.array(num).flatten())
def __init__(self, shape, complexity, zerocenter):
self.ndim = len(np.array(shape).flatten())
space_paradict.__init__(
self, num=num, complexity=complexity, zerocenter=zerocenter)
self, shape=shape, complexity=complexity, zerocenter=zerocenter)
def __setitem__(self, key, arg):
if key not in ['num', 'complexity', 'zerocenter']:
if key not in ['shape', 'complexity', 'zerocenter']:
raise ValueError(about._errors.cstring(
"ERROR: Unsupported rg_space parameter"))
if key == 'num':
temp = list(np.array(arg, dtype=int).flatten())
if key == 'shape':
temp = np.array(arg, dtype=np.int).flatten()
if np.any(temp < 0):
raise ValueError("ERROR: negative number in shape.")
temp = list(temp)
if len(temp) != self.ndim:
raise ValueError(about._errors.cstring(
"ERROR: Number of dimensions does not match the init " +
"value."))
elif key == 'complexity':
temp = int(arg)
if temp not in [0, 1, 2]:
raise ValueError(about._errors.cstring(
"ERROR: Unsupported complexity parameter: " + str(temp)))
elif key == 'zerocenter':
temp = np.empty(self.ndim, dtype=bool)
temp[:] = arg
......@@ -112,8 +118,8 @@ class lm_space_paradict(space_paradict):
"ERROR: Unsupported rg_space parameter"))
if key == 'lmax':
temp = int(arg)
if(temp < 1):
temp = np.int(arg)
if temp < 1:
raise ValueError(about._errors.cstring(
"ERROR: lmax: nonpositive number."))
# exception lmax == 2 (nside == 1)
......
......@@ -236,7 +236,8 @@ class power_indices(object):
def _compute_kdict_from_pindex_kindex(self, pindex, kindex):
if isinstance(pindex, distributed_data_object):
tempindex = pindex.copy(dtype=kindex.dtype)
result = tempindex.apply_scalar_function(lambda x: kindex[x])
result = tempindex.apply_scalar_function(
lambda x: kindex[x.astype(np.dtype('int'))])
else:
result = kindex[pindex].astype(dtype=kindex.dtype)
return result
......@@ -266,30 +267,44 @@ class power_indices(object):
_compute_indices function as it is needed in _bin_power_indices,
too.
"""
##########
# pundex #
##########
# Prepare the local data
local_pindex = global_pindex.get_local_data()
# Compute the local pundices for the local pindices
(temp_uniqued_pindex, local_temp_pundex) = np.unique(local_pindex,
return_index=True)
# Shift the local pundices by the nodes' local_dim_offset
local_temp_pundex += global_pindex.distributor.local_dim_offset
# Prepare the pundex arrays used for the Allreduce operation
# pundex has the same length as the kindex array
local_pundex = np.zeros(shape=global_kindex.shape, dtype=np.int)
# Set the default value higher than the maximal possible pundex value
# so that MPI.MIN can sort out the default
local_pundex += np.prod(global_pindex.shape) + 1
# Set the default value higher than the length
global_pundex = np.empty_like(local_pundex)
# Store the individual pundices in the local_pundex array
local_pundex[temp_uniqued_pindex] = local_temp_pundex
# Use Allreduce to find the first occurences/smallest pundices
self.comm.Allreduce(local_pundex, global_pundex, op=MPI.MIN)
return global_pundex
if self.datamodel in DISTRIBUTION_STRATEGIES['slicing']:
##########
# pundex #
##########
# Prepare the local data
local_pindex = global_pindex.get_local_data()
# Compute the local pundices for the local pindices
(temp_uniqued_pindex, local_temp_pundex) = np.unique(
local_pindex,
return_index=True)
# Shift the local pundices by the nodes' local_dim_offset
local_temp_pundex += global_pindex.distributor.local_dim_offset
# Prepare the pundex arrays used for the Allreduce operation
# pundex has the same length as the kindex array
local_pundex = np.zeros(shape=global_kindex.shape, dtype=np.int)
# Set the default value higher than the maximal possible pundex
# value so that MPI.MIN can sort out the default
local_pundex += np.prod(global_pindex.shape) + 1
# Set the default value higher than the length
global_pundex = np.empty_like(local_pundex)
# Store the individual pundices in the local_pundex array
local_pundex[temp_uniqued_pindex] = local_temp_pundex
# Use Allreduce to find the first occurences/smallest pundices
self.comm.Allreduce(local_pundex, global_pundex, op=MPI.MIN)
return global_pundex
elif self.datamodel in DISTRIBUTION_STRATEGIES['not']:
##########
# pundex #
##########
pundex = np.unique(global_pindex.get_local_data(),
return_index=True)[1]
return pundex
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: _compute_pundex_d2o for given datamodel not " +
"implemented."))
def _compute_indices_np(self, nkdict):
"""
......
......@@ -151,8 +151,7 @@ class random(object):
# building kpack
if pindex is not None and kindex is not None:
pindex = distributed_data_object(pindex,
distribution_strategy='fftw')
pindex = domain.cast(pindex, dtype=np.dtype('int'))
kpack = [pindex, kindex]
else:
kpack = None
......@@ -317,7 +316,7 @@ class random(object):
x.imag = (vmax - vmin) * np.random.random(size=size) + vmin
elif(dtype in [np.dtype('int8'), np.dtype('int16'), np.dtype('int32'),
np.dtype('int64')]):
x = np.random.randint(
x = np.random.random_integers(
min(vmin, vmax), high=max(vmin, vmax), size=size)
else:
x = (vmax - vmin) * np.random.random(size=size) + vmin
......
This diff is collapsed.
This diff is collapsed.
......@@ -121,6 +121,10 @@ class _fftw_plan_and_info(object):
self.in_zero_centered_dimensions = domain.paradict['zerocenter']
self.out_zero_centered_dimensions = codomain.paradict['zerocenter']
self.overall_sign = (-1)**np.sum(
np.array(self.in_zero_centered_dimensions) *
np.array(self.out_zero_centered_dimensions))
self.local_node_dimensions = np.append((self.fftw_local_size[1],),
self.global_input_shape[1:])
self.offsetQ = self.fftw_local_size[2] % 2
......@@ -330,7 +334,8 @@ class fft_fftw(fft):
if val.distribution_strategy == 'fftw':
local_val = val.get_local_data()
else:
local_val = val.get_data(slice(local_start, local_end))
local_val = val.get_data(slice(local_start, local_end),
local_keys=True).get_local_data()
# Case 2: val is a numpy array carrying the full data
else:
local_val = val[slice(local_start, local_end)]
......@@ -344,16 +349,13 @@ class fft_fftw(fft):
p.input_array[:] = local_val
# execute the plan
p()
result = p.output_array * current_plan_and_info.\
get_domain_centering_mask()
"""
## renorm the result according to the convention of gfft
if current_plan_and_info.direction == 'FFTW_FORWARD':
result = result/float(result.size)
if p.has_output:
result = p.output_array * current_plan_and_info.\
get_domain_centering_mask()
else:
result *= float(result.size)
"""
result = local_val
assert(result.shape[0] == 0)
# build the return object according to the input val
# TODO: Check if comm is the same, too!
......@@ -362,7 +364,8 @@ class fft_fftw(fft):
return_val.set_local_data(data=result)
else:
return_val.set_data(data=result,
key=slice(local_start, local_end))
to_key=slice(local_start, local_end),
local_keys=True)
# If the values living in domain are purely real, the
# result of the fft is hermitian
......@@ -379,6 +382,11 @@ class fft_fftw(fft):
return_val.set_local_data(data=result)
return_val = return_val.get_full_data()
# The +-1 magic has a caveat: if a dimension was zero-centered
# in the harmonic as well as in position space, the result gets
# a global minus. The following multiplitcation compensates that.
return_val *= current_plan_and_info.overall_sign
return return_val
......@@ -431,6 +439,7 @@ class fft_gfft(fft):
d2oQ = True
temp = val.get_full_data()
else:
d2oQ = False
temp = val
# transform and return
if(domain.dtype == np.float64):
......
......@@ -33,8 +33,9 @@
"""
from __future__ import division
import os
import itertools
import numpy as np
import os
from scipy.special import erf
import pylab as pl
from matplotlib.colors import LogNorm as ln
......@@ -120,7 +121,7 @@ class rg_space(point_space):
"""
epsilon = 0.0001 # relative precision for comparisons
def __init__(self, num, zerocenter=False, complexity=0, dist=None,
def __init__(self, shape, zerocenter=False, complexity=0, distances=None,
harmonic=False, datamodel='fftw', fft_module='pyfftw',
comm=gc['default_comm']):
"""
......@@ -153,7 +154,7 @@ class rg_space(point_space):
None
"""
self.paradict = rg_space_paradict(num=num,
self.paradict = rg_space_paradict(shape=shape,
complexity=complexity,
zerocenter=zerocenter)
# set dtype
......@@ -171,24 +172,24 @@ class rg_space(point_space):
self.datamodel = datamodel
# set volume/distances
naxes = len(self.paradict['num'])
if dist is None:
dist = 1 / np.array(self.paradict['num'], dtype=np.float)
elif np.isscalar(dist):
dist = np.ones(naxes, dtype=np.float) * dist
naxes = len(self.paradict['shape'])
if distances is None: