Commit f2541899 authored by Ultima's avatar Ultima
Browse files

Improved d2o indexing and field casting.

parent e8fe6581
......@@ -34,8 +34,7 @@ from nifty_mpi_data import distributed_data_object
from nifty_power import *
from nifty_random import random
from nifty_simple_math import *
from nifty_tools import conjugate_gradient,\
steepest_descent
from nifty_paradict import space_paradict,\
point_space_paradict,\
nested_space_paradict
......
......@@ -63,8 +63,8 @@ D = propagator_operator(S=S, N=N, R=R) # define inform
m = D(j, W=S, tol=1E-3, note=True) # reconstruct map
s.plot(title="signal") # plot signal
s.plot(title="signal", save = 'plot_s.png') # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.min(), vmax=s.max()) # plot data
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max()) # plot map
d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png') # plot data
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png') # plot map
......@@ -1372,6 +1372,13 @@ class gl_space(point_space):
else:
return gl.weight(x,self.vol,p=np.float64(power),nlat=self.para[0],nlon=self.para[1],overwrite=False)
def get_weight(self, power = 1):
## TODO: Check if this function is compatible to the rest of the nifty code
## TODO: Can this be done more efficiently?
dummy = self.enforce_values(1)
weighted_dummy = self.calc_weight(dummy, power = power)
return weighted_dummy/dummy
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_transform(self,x,codomain=None,**kwargs):
......
......@@ -682,6 +682,9 @@ class space(object):
"""
raise NotImplementedError(about._errors.cstring("ERROR: no generic instance method 'calc_weight'."))
def get_weight(self, power=1):
raise NotImplementedError(about._errors.cstring("ERROR: no generic instance method 'get_weight'."))
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_dot(self,x,y):
......@@ -1608,8 +1611,11 @@ class point_space(space):
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
## weight
return x*self.vol**power
return x*self.get_weight(power = power)
#return x*self.vol**power
def get_weight(self, power = 1):
return self.vol**power
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_dot(self, x, y):
"""
......@@ -2291,7 +2297,11 @@ class nested_space(space):
"""
x = self.enforce_shape(np.array(x,dtype=self.datatype))
## weight
return x*self.get_meta_volume(total=False)**power
return x*self.get_weight(power = power)
def get_weight(self, power = 1):
return self.get_meta_volume(total=False)**power
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
......@@ -2669,7 +2679,7 @@ class field(object):
if val == None:
if kwargs == {}:
self.val = self.domain.cast(0)
self.val = self.domain.cast(0.)
else:
self.val = self.domain.get_random_values(codomain=self.target,
**kwargs)
......@@ -3349,8 +3359,9 @@ class field(object):
temp = self
else:
temp = self.copy_empty()
data_object = self.domain.apply_scalar_function(self.val,\
function, inplace)
data_object = self.domain.apply_scalar_function(self.val,
function,
inplace)
temp.set_val(data_object)
return temp
......
......@@ -163,11 +163,11 @@ class distributed_data_object(object):
**kwargs)
return temp_d2o
def apply_scalar_function(self, function, inplace=False):
def apply_scalar_function(self, function, inplace=False, dtype=None):
if inplace == True:
temp = self
else:
temp = self.copy_empty()
temp = self.copy_empty(dtype=dtype)
try:
temp.data[:] = function(self.data)
......@@ -260,34 +260,54 @@ class distributed_data_object(object):
temp_d2o.set_local_data(data = self.get_local_data().__abs__())
return temp_d2o
def __builtin_helper__(self, operator, other):
def __builtin_helper__(self, operator, other, inplace=False):
## Case 1: other is not a scalar
if not (np.isscalar(other) or np.shape(other) == (1,)):
## if self.shape != other.shape:
## raise AttributeError(about._errors.cstring(
## "ERROR: Shapes do not match!"))
try:
hermitian_Q = other.hermitian
except(AttributeError):
hermitian_Q = False
## extract the local data from the 'other' object
temp_data = self.distributor.extract_local_data(other)
temp_data = operator(temp_data)
## Case 2: other is a real scalar -> preserve hermitianity
elif np.isreal(other) or (self.dtype not in (np.complex, np.complex128,
np.complex256)):
hermitian_Q = self.hermitian
temp_data = operator(other)
## Case 3: other is complex
else:
hermitian_Q = False
temp_data = operator(other)
## write the new data into a new distributed_data_object
if inplace == True:
temp_d2o = self
else:
temp_d2o = self.copy_empty()
temp_d2o.set_local_data(data=temp_data)
temp_d2o.hermitian = hermitian_Q
return temp_d2o
"""
def __inplace_builtin_helper__(self, operator, other):
## Case 1: other is not a scalar
if not (np.isscalar(other) or np.shape(other) == (1,)):
temp_data = self.distributor.extract_local_data(other)
temp_data = operator(temp_data)
## Case 2: other is a real scalar -> preserve hermitianity
elif np.isreal(other):
hermitian_Q = self.hermitian
temp_data = operator(other)
## Case 3: other is complex
else:
temp_data = operator(other)
self.set_local_data(data=temp_data)
self.hermitian = hermitian_Q
return self
"""
def __add__(self, other):
return self.__builtin_helper__(self.get_local_data().__add__, other)
......@@ -296,8 +316,9 @@ class distributed_data_object(object):
return self.__builtin_helper__(self.get_local_data().__radd__, other)
def __iadd__(self, other):
return self.__inplace_builtin_helper__(self.get_local_data().__iadd__,
other)
return self.__builtin_helper__(self.get_local_data().__iadd__,
other,
inplace = True)
def __sub__(self, other):
return self.__builtin_helper__(self.get_local_data().__sub__, other)
......@@ -306,8 +327,9 @@ class distributed_data_object(object):
return self.__builtin_helper__(self.get_local_data().__rsub__, other)
def __isub__(self, other):
return self.__inplace_builtin_helper__(self.get_local_data().__isub__,
other)
return self.__builtin_helper__(self.get_local_data().__isub__,
other,
inplace = True)
def __div__(self, other):
return self.__builtin_helper__(self.get_local_data().__div__, other)
......@@ -316,8 +338,9 @@ class distributed_data_object(object):
return self.__builtin_helper__(self.get_local_data().__rdiv__, other)
def __idiv__(self, other):
return self.__inplace_builtin_helper__(self.get_local_data().__idiv__,
other)
return self.__builtin_helper__(self.get_local_data().__idiv__,
other,
inplace = True)
def __floordiv__(self, other):
return self.__builtin_helper__(self.get_local_data().__floordiv__,
......@@ -326,8 +349,9 @@ class distributed_data_object(object):
return self.__builtin_helper__(self.get_local_data().__rfloordiv__,
other)
def __ifloordiv__(self, other):
return self.__inplace_builtin_helper__(
self.get_local_data().__ifloordiv__, other)
return self.__builtin_helper__(
self.get_local_data().__ifloordiv__, other,
inplace = True)
def __mul__(self, other):
return self.__builtin_helper__(self.get_local_data().__mul__, other)
......@@ -336,8 +360,9 @@ class distributed_data_object(object):
return self.__builtin_helper__(self.get_local_data().__rmul__, other)
def __imul__(self, other):
return self.__inplace_builtin_helper__(self.get_local_data().__imul__,
other)
return self.__builtin_helper__(self.get_local_data().__imul__,
other,
inplace = True)
def __pow__(self, other):
return self.__builtin_helper__(self.get_local_data().__pow__, other)
......@@ -346,8 +371,9 @@ class distributed_data_object(object):
return self.__builtin_helper__(self.get_local_data().__rpow__, other)
def __ipow__(self, other):
return self.__inplace_builtin_helper__(self.get_local_data().__ipow__,
other)
return self.___builtin_helper__(self.get_local_data().__ipow__,
other,
inplace = True)
def __len__(self):
return self.shape[0]
......@@ -392,23 +418,29 @@ class distributed_data_object(object):
def __setitem__(self, key, data):
self.set_data(data, key)
def _minmaxhelper(self, function, **kwargs):
def _contraction_helper(self, function, **kwargs):
local = function(self.data, **kwargs)
local_list = self.distributor._allgather(local)
global_ = function(local_list, axis=0)
return global_
def amin(self, **kwargs):
return self._minmaxhelper(np.amin, **kwargs)
return self._contraction_helper(np.amin, **kwargs)
def nanmin(self, **kwargs):
return self._minmaxhelper(np.nanmin, **kwargs)
return self._contraction_helper(np.nanmin, **kwargs)
def amax(self, **kwargs):
return self._minmaxhelper(np.amax, **kwargs)
return self._contraction_helper(np.amax, **kwargs)
def nanmax(self, **kwargs):
return self._minmaxhelper(np.nanmax, **kwargs)
return self._contraction_helper(np.nanmax, **kwargs)
def sum(self, **kwargs):
return self._contraction_helper(np.sum, **kwargs)
def prod(self, **kwargs):
return self._contraction_helper(np.prod, **kwargs)
def mean(self, power=1):
## compute the local means and the weights for the mean-mean.
......@@ -731,8 +763,13 @@ class distributed_data_object(object):
for i in sliceified:
if i == True:
temp_shape += (1,)
if data.shape[j] == 1:
j +=1
else:
try:
temp_shape += (data.shape[j],)
except(IndexError):
temp_shape += (1,)
j += 1
## take into account that the sliceified tuple may be too short, because
## of a non-exaustive list of slices
......
This diff is collapsed.
......@@ -26,7 +26,7 @@ from nifty.nifty_core import space, \
point_space, \
nested_space, \
field
from nifty.nifty_tools import conjugate_gradient
from nifty_minimization import conjugate_gradient
from nifty_probing import trace_probing, \
diagonal_probing
......@@ -1804,7 +1804,8 @@ class power_operator(diagonal_operator):
#diag = self.domain.enforce_power(spec,size=np.max(pindex,axis=None,out=None)+1)[pindex]
temp_spec = self.domain.enforce_power(
spec,size=np.max(pindex,axis=None,out=None)+1)
diag = pindex.apply_scalar_function(lambda x: temp_spec[x])
diag = pindex.apply_scalar_function(lambda x: temp_spec[x],
dtype = temp_spec.dtype.type)
except(AttributeError):
raise ValueError(about._errors.cstring("ERROR: invalid input."))
## weight if ...
......
......@@ -8,7 +8,7 @@ from nifty.nifty_about import about
# If this fails fall back to local gfft_rg
try:
import pyfftw_BAD
import pyfftw
fft_machine='pyfftw'
except(ImportError):
try:
......@@ -183,6 +183,10 @@ if fft_machine == 'pyfftw':
to_center = np.array(to_center_input)
dimensions = np.array(dimensions_input)
## if none of the dimensions are zero centered, return a 1
if np.all(to_center == 0):
return 1
if np.all(dimensions == np.array(1)) or \
np.all(dimensions == np.array([1])):
return dimensions
......@@ -221,6 +225,8 @@ if fft_machine == 'pyfftw':
offset.reshape(offset.shape + \
(1,)*(np.array(args).ndim - 1)),1)),\
(2,)*to_center.size)
## Cast the core to the smallest integers we can get
core = core.astype(np.int8)
centering_mask = np.tile(core,dimensions//2)
## for the dimensions of odd size corresponding slices must be added
......@@ -249,7 +255,7 @@ if fft_machine == 'pyfftw':
def _get_plan_and_info(self,domain,codomain,**kwargs):
## generate a id-tuple which identifies the domain-codomain setting
temp_id = (domain.__identifier__(), codomain.__identifier__())
temp_id = (domain._identifier(), codomain._identifier())
## generate the plan_and_info object if not already there
if not temp_id in self.plan_dict:
self.plan_dict[temp_id]=_fftw_plan_and_info(domain, codomain,
......
......@@ -106,7 +106,7 @@ def power_backward_conversion_rg(k_space,p,mean=None,bare=True):
return logmean.real,p1.real
def power_forward_conversion_rg(k_space,p,mean=0,bare=True):
def power_forward_conversion_rg(k_space, p, mean=0, bare=True):
"""
This function is designed to convert a theoretical/statistical power
spectrum of a Gaussian field to the theoretical power spectrum of
......@@ -137,6 +137,44 @@ def power_forward_conversion_rg(k_space,p,mean=0,bare=True):
`arXiv:1312.1354 <http://arxiv.org/abs/1312.1354>`_
"""
pindex = k_space.power_indices['pindex']
weight = k_space.get_weight()
## Cast the supplied spectrum
spec = k_space.enforce_power(p)
## Now we mimick the weightning behaviour of
## spec = power_operator(k_space,spec=p,bare=bare).get_power(bare=False)
## by appliying the weight from the k_space
if bare == True:
spec *= weight
S_val = pindex.apply_scalar_function(lambda x: spec[x],
dtype=spec.dtype.type)
## S_x is a field
S_x = field(k_space, val=S_val, zerocenter=True).transform()
## s_0 is a scalar
s_0 = k_space.calc_weight(S_val, power = 1).sum()
## Calculate the new power_field
S_x += s_0
S_x += 2*mean
print S_x
print s_0
power_field = S_x.apply_scalar_function(np.exp, inplace=True)
new_spec = power_field.power()**(0.5)
## Mimik
## power_operator(k_space,spec=p1,bare=False).get_power(bare=True).real
if bare == True:
new_spec /= weight
return new_spec.real
"""
pindex = k_space.get_power_indices()[2]
spec = power_operator(k_space,spec=p,bare=bare).get_power(bare=False)
......@@ -154,3 +192,4 @@ def power_forward_conversion_rg(k_space,p,mean=0,bare=True):
else:
return p1.real
"""
\ No newline at end of file
......@@ -583,9 +583,9 @@ class rg_space(point_space):
"ERROR: Data has incompatible shape!"))
## Check the datatype
if x.dtype != self.datatype:
if x.dtype < self.datatype:
about.warnings.cflush(\
"WARNING: Datatypes are uneqal (own: "\
"WARNING: Datatypes are uneqal/of conflicting precision (own: "\
+ str(self.datatype) + " <> foreign: " + str(x.dtype) \
+ ") and will be casted! "\
+ "Potential loss of precision!\n")
......@@ -1161,13 +1161,18 @@ class rg_space(point_space):
y : numpy.ndarray
Weighted array.
"""
x = self.cast(x)
#x = self.cast(x)
if isinstance(x, distributed_data_object):
is_hermitianQ = x.hermitian
## weight
x = x * (np.prod(self.vol)**power)
x = x * self.get_weight(power = power)
if isinstance(x, distributed_data_object):
x.hermitian = is_hermitianQ
return x
def get_weight(self, power = 1):
return np.prod(self.vol)**power
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def calc_dot(self, x, y):
"""
......@@ -1426,11 +1431,7 @@ class rg_space(point_space):
fieldabs = abs(x)**2
power_spectrum = np.zeros(rho.shape)
"""
##TODO: Replace this super slow ndindex solution
for ii in np.ndindex(pindex.shape):
power_spectrum[pindex[ii]] += fieldabs[ii]
"""
## In order to make the summation over identical pindices fast,
## the pindex and the kindex must have the same distribution strategy
......@@ -1447,19 +1448,6 @@ class rg_space(point_space):
pindex.distributor._allgather(local_power_spectrum)
power_spectrum = np.sum(power_spectrum, axis = 0)
"""
## Iterate over the k-vectors, extract those fieldabs, where the pindex
## has the according value and build the sum of the resulting array
power_spectrum = np.zeros(rho.size, dtype = np.float)
for ii in xrange(rho.size):
## extract those fieldabs where the pindex equals the current ii
extracted_fieldabs = working_field[pindex == ii]
## sum the extracted field values up and store them
power_spectrum[ii] = np.sum(extracted_fieldabs)
"""
## Divide out the degeneracy factor
power_spectrum /= rho
return power_spectrum
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment