Commit 4d18d0b9 authored by Theo Steininger's avatar Theo Steininger

Merge branch 'smooth_operator' into 'master'

Smooth operator



See merge request !30
parents 369994b1 a54dd65c
......@@ -207,7 +207,7 @@ class Field(object):
if not self.domain[space_index].harmonic:
raise ValueError(about._errors.cstring(
"ERROR: Conversion of only one space at a time is allowed."))
"ERROR: The analyzed space must be harmonic."))
# Create the target PowerSpace instance:
# If the associated signal-space field was real, we extract the
......
......@@ -27,6 +27,8 @@ from diagonal_operator import DiagonalOperator
from endomorphic_operator import EndomorphicOperator
from smoothing_operator import SmoothingOperator
from fft_operator import *
from propagator_operator import PropagatorOperator
......@@ -50,4 +52,4 @@ from nifty_probing import prober,\
inverse_diagonal_prober
from nifty_minimization import conjugate_gradient,\
steepest_descent
\ No newline at end of file
steepest_descent
......@@ -44,9 +44,8 @@ class HPLMTransformation(SlicingTransformation):
"ERROR: domain needs to be a HPSpace"))
lmax = 3 * domain.nside - 1
mmax = lmax
result = LMSpace(lmax=lmax, mmax=mmax, dtype=np.dtype('float64'))
result = LMSpace(lmax=lmax, dtype=np.dtype('float64'))
cls.check_codomain(domain, result)
return result
......@@ -62,21 +61,16 @@ class HPLMTransformation(SlicingTransformation):
nside = domain.nside
lmax = codomain.lmax
mmax = codomain.mmax
if 3 * nside - 1 != lmax:
raise ValueError(about._errors.cstring(
'ERROR: codomain has 3*nside-1 != lmax.'))
if lmax != mmax:
raise ValueError(about._errors.cstring(
'ERROR: codomain has lmax != mmax.'))
return None
def _transformation_of_slice(self, inp, **kwargs):
lmax = self.codomain.lmax
mmax = self.codomain.mmax
mmax = lmax
if issubclass(inp.dtype.type, np.complexfloating):
[resultReal, resultImag] = [hp.map2alm(x.astype(np.float64,
......
......@@ -64,11 +64,6 @@ class LMHPTransformation(SlicingTransformation):
nside = codomain.nside
lmax = domain.lmax
mmax = domain.mmax
if lmax != mmax:
raise ValueError(about._errors.cstring(
'ERROR: domain has lmax != mmax.'))
if 3*nside - 1 != lmax:
raise ValueError(about._errors.cstring(
......@@ -79,7 +74,7 @@ class LMHPTransformation(SlicingTransformation):
def _transformation_of_slice(self, inp, **kwargs):
nside = self.codomain.nside
lmax = self.domain.lmax
mmax = self.domain.mmax
mmax = lmax
if issubclass(inp.dtype.type, np.complexfloating):
[resultReal, resultImag] = [ltf.buildLm(x, lmax=lmax)
......
......@@ -339,7 +339,10 @@ class FFTW(Transform):
inp = local_val
else:
if temp_val is None:
temp_val = np.empty_like(local_val)
temp_val = np.empty_like(
local_val,
dtype=self.codomain.dtype
)
inp = local_val[slice_list]
# This is in order to make FFTW behave properly when slicing input
......
from smoothing_operator import SmoothingOperator
import numpy as np
from nifty.config import about
import nifty.nifty_utilities as utilities
from nifty import RGSpace, LMSpace
from nifty.operators.endomorphic_operator import EndomorphicOperator
from nifty.operators.fft_operator import FFTOperator
class SmoothingOperator(EndomorphicOperator):
# ---Overwritten properties and methods---
def __init__(self, domain=(), field_type=(), sigma=None):
self._domain = self._parse_domain(domain)
self._field_type = self._parse_field_type(field_type)
if len(self.domain) != 1:
raise ValueError(
about._errors.cstring(
'ERROR: SmoothOperator accepts only exactly one '
'space as input domain.')
)
if self.field_type != ():
raise ValueError(about._errors.cstring(
'ERROR: SmoothOperator field-type must be an '
'empty tuple.'
))
self._sigma = sigma
def _inverse_times(self, x, spaces, types):
return self._smooth_helper(x, spaces, types, inverse=True)
def _times(self, x, spaces, types):
return self._smooth_helper(x, spaces, types)
# ---Mandatory properties and methods---
@property
def domain(self):
return self._domain
@property
def field_type(self):
return self._field_type
@property
def implemented(self):
return True
@property
def symmetric(self):
return False
@property
def unitary(self):
return False
# ---Added properties and methods---
@property
def sigma(self):
return self._sigma
def _smooth_helper(self, x, spaces, types, inverse=False):
if self.sigma == 0:
return x.copy()
# the domain of the smoothing operator contains exactly one space.
# Hence, if spaces is None, but we passed LinearOperator's
# _check_input_compatibility, we know that x is also solely defined
# on that space
if spaces is None:
spaces = (0,)
else:
spaces = utilities.cast_axis_to_tuple(spaces, len(x.domain))
Transformator = FFTOperator(x.domain[spaces[0]])
# transform to the (global-)default codomain and perform all remaining
# steps therein
transformed_x = Transformator(x, spaces=spaces)
codomain = transformed_x.domain[spaces[0]]
coaxes = transformed_x.domain_axes[spaces[0]]
# create the kernel using the knowledge of codomain about itself
axes_local_distribution_strategy = \
transformed_x.val.get_axes_local_distribution_strategy(axes=coaxes)
kernel = codomain.distance_array(
distribution_strategy=axes_local_distribution_strategy)
kernel.apply_scalar_function(
codomain.get_smoothing_kernel_function(self.sigma),
inplace=True)
# now, apply the kernel to transformed_x
# this is done node-locally utilizing numpys reshaping in order to
# apply the kernel to the correct axes
local_transformed_x = transformed_x.val.get_local_data(copy=False)
local_kernel = kernel.get_local_data(copy=False)
reshaper = [transformed_x.shape[i] if i in coaxes else 1
for i in xrange(len(transformed_x.shape))]
local_kernel = np.reshape(local_kernel, reshaper)
# apply the kernel
if inverse:
local_transformed_x /= local_kernel
else:
local_transformed_x *= local_kernel
transformed_x.val.set_local_data(local_transformed_x, copy=False)
result = Transformator.inverse_times(transformed_x, spaces=spaces)
return result
......@@ -3,7 +3,7 @@ from __future__ import division
import itertools
import numpy as np
from d2o import STRATEGIES as DISTRIBUTION_STRATEGIES
from d2o import arange, STRATEGIES as DISTRIBUTION_STRATEGIES
from nifty.spaces.space import Space
from nifty.config import about, nifty_configuration as gc,\
......@@ -135,9 +135,8 @@ class GLSpace(Space):
nlat = self.nlat
weight = np.array(list(itertools.chain.from_iterable(
itertools.repeat(x ** power, nlon)
for x in gl.vol(nlat))
))
itertools.repeat(x ** power, nlon)
for x in gl.vol(nlat))))
if axes is not None:
# reshape the weight array to match the input shape
......@@ -154,6 +153,31 @@ class GLSpace(Space):
return result_x
def get_distance_array(self, distribution_strategy):
dists = arange(start=0, stop=self.shape[0],
distribution_strategy=distribution_strategy)
dists = dists.apply_scalar_function(
lambda x: self._distance_array_helper(divmod(x, self.nlon)),
dtype=np.float)
return dists
def _distance_array_helper(self, qr_tuple):
lat = qr_tuple[0]*(np.pi/(self.nlat-1))
lon = qr_tuple[1]*(2*np.pi/(self.nlon-1))
numerator = np.sqrt(np.sin(lat)**2 +
(np.sin(lon) * np.cos(lat))**2)
denominator = np.cos(lon) * np.cos(lat)
return np.arctan(numerator / denominator)
def get_smoothing_kernel_function(self, sigma):
if sigma is None:
sigma = np.sqrt(2) * np.pi
return lambda x: np.exp((-0.5 * x**2) / sigma**2)
# ---Added properties and methods---
@property
......@@ -184,11 +208,3 @@ class GLSpace(Space):
"WARNING: nlon was set to an unrecommended value: "
"nlon <> 2*nlat-1.")
return nlon
......@@ -35,6 +35,7 @@ from __future__ import division
import numpy as np
from d2o import arange, STRATEGIES as DISTRIBUTION_STRATEGIES
from nifty.config import about
from nifty.spaces.space import Space
from nifty.config import nifty_configuration as gc, \
......@@ -158,6 +159,38 @@ class HPSpace(Space):
return result_x
def get_distance_array(self, distribution_strategy):
"""
Calculates distance from center to all the points on the sphere
Parameters
----------
distribution_strategy: Result d2o's distribution strategy
Returns
-------
dists: distributed_data_object
"""
dists = arange(
start=0, stop=self.shape[0],
distribution_strategy=distribution_strategy
)
# translate distances to 3D unit vectors on a sphere,
# extract the first entry (simulates the scalar product with (1,0,0))
# and apply arccos
dists = dists.apply_scalar_function(
lambda z: np.arccos(hp.pix2vec(self.nside, z)[0]),
dtype=np.float)
return dists
def get_smoothing_kernel_function(self, sigma):
if sigma is None:
sigma = np.sqrt(2) * np.pi
return lambda x: np.exp((-0.5 * x**2) / sigma**2)
# ---Added properties and methods---
@property
......
import numpy as np
cimport numpy as np
cpdef _distance_array_helper(np.ndarray[np.int_t] index_array, np.int_t lmax):
cdef np.int size = (lmax + 1) * (lmax + 1)
cdef np.ndarray res = np.zeros([len(index_array)], dtype=np.float128)
for idx, index_full in enumerate(index_array):
if index_full <= lmax:
index_half = index_full
else:
if (index_full - lmax) % 2 == 0:
index_half = (index_full + lmax) / 2;
else:
index_half = (index_full + lmax + 1) / 2;
m = (np.ceil(((2 * lmax + 1) -
np.sqrt((2 * lmax + 1)**2 -
8 * (index_half - lmax))) / 2)).astype(int)
res[idx] = index_half - m * (2 * lmax + 1 - m) // 2
return res
from __future__ import division
import numpy as np
......@@ -9,6 +8,10 @@ from nifty.config import about,\
nifty_configuration as gc,\
dependency_injector as gdi
from lm_helper import _distance_array_helper
from d2o import arange
gl = gdi.get('libsharp_wrapper_gl')
hp = gdi.get('healpy')
......@@ -72,7 +75,7 @@ class LMSpace(Space):
Pixel volume of the :py:class:`lm_space`, which is always 1.
"""
def __init__(self, lmax, mmax=None, dtype=np.dtype('complex128')):
def __init__(self, lmax, dtype=np.dtype('complex128')):
"""
Sets the attributes for an lm_space class instance.
......@@ -108,15 +111,22 @@ class LMSpace(Space):
super(LMSpace, self).__init__(dtype)
self._lmax = self._parse_lmax(lmax)
self._mmax = self._parse_mmax(mmax)
def hermitian_decomposition(self, x, axes=None):
return (x.real, x.imag)
def distance_array(self, distribution_strategy):
dists = arange(start=0, stop=self.shape[0],
distribution_strategy=distribution_strategy)
dists = dists.apply_scalar_function(
lambda x: _distance_array_helper(x, self.lmax),
dtype=np.float)
def compute_k_array(self, distribution_strategy):
# return a d2o with the l-value at the individual pixels
# e.g. for 0<=l<=2: [0,1,2, 1,1,2,2, 2,2]
pass
return dists
def get_smoothing_kernel_function(self, sigma):
if sigma is None:
sigma = np.sqrt(2) * np.pi / (self.lmax + 1)
return lambda x: np.exp(-0.5 * x * (x + 1) * sigma**2)
# ---Mandatory properties and methods---
......@@ -161,7 +171,7 @@ class LMSpace(Space):
@property
def mmax(self):
return self._mmax
return self._lmax
def _parse_lmax(self, lmax):
lmax = np.int(lmax)
......@@ -173,20 +183,3 @@ class LMSpace(Space):
about.warnings.cprint(
"WARNING: unrecommended parameter (lmax <> 2*n+1).")
return lmax
def _parse_mmax(self, mmax):
if mmax is None:
mmax = self.lmax
else:
mmax = int(mmax)
if mmax < 1:
raise ValueError(about._errors.cstring(
"ERROR: mmax < 1 is not allowed."))
if mmax > self.lmax:
raise ValueError(about._errors.cstring(
"ERROR: mmax > lmax is not allowed."))
if mmax != self.lmax:
about.warnings.cprint(
"WARNING: unrecommended parameter combination (mmax <> lmax).")
return mmax
......@@ -45,7 +45,7 @@ class PowerIndices(object):
self.distribution_strategy = distribution_strategy
# Compute the global k_array
self.k_array = self.domain.compute_k_array(distribution_strategy)
self.k_array = self.domain.get_distance_array(distribution_strategy)
# Initialize the dictonary which stores all individual index-dicts
self.global_dict = {}
# Set self.default_parameters
......
......@@ -48,10 +48,6 @@ class PowerSpace(Space):
self._pundex = power_index['pundex']
self._k_array = power_index['k_array']
def compute_k_array(self, distribution_strategy):
raise NotImplementedError(about._errors.cstring(
"ERROR: There is no k_array implementation for PowerSpace."))
def pre_cast(self, x, axes=None):
if callable(x):
return x(self.kindex)
......@@ -109,6 +105,15 @@ class PowerSpace(Space):
return result_x
def get_distance_array(self, distribution_strategy):
raise NotImplementedError(about._errors.cstring(
"ERROR: There is no get_distance_array implementation for "
"PowerSpace."))
def get_smoothing_kernel_function(self, sigma):
raise NotImplementedError(about._errors.cstring(
"ERROR: There is no smoothing function for PowerSpace."))
# ---Added properties and methods---
@property
......
......@@ -152,64 +152,6 @@ class RGSpace(Space):
self._distances = self._parse_distances(distances)
self._zerocenter = self._parse_zerocenter(zerocenter)
def compute_k_array(self, distribution_strategy):
"""
Calculates an n-dimensional array with its entries being the
lengths of the k-vectors from the zero point of the grid.
Parameters
----------
None : All information is taken from the parent object.
Returns
-------
nkdict : distributed_data_object
"""
shape = self.shape
# prepare the distributed_data_object
nkdict = distributed_data_object(
global_shape=shape,
dtype=np.float128,
distribution_strategy=distribution_strategy)
if distribution_strategy in DISTRIBUTION_STRATEGIES['slicing']:
# get the node's individual slice of the first dimension
slice_of_first_dimension = slice(
*nkdict.distributor.local_slice[0:2])
elif distribution_strategy in DISTRIBUTION_STRATEGIES['not']:
slice_of_first_dimension = slice(0, shape[0])
else:
raise ValueError(about._errors.cstring(
"ERROR: Unsupported distribution strategy"))
dists = self._compute_k_array_helper(slice_of_first_dimension)
nkdict.set_local_data(dists)
return nkdict
def _compute_k_array_helper(self, slice_of_first_dimension):
dk = self.distances
shape = self.shape
inds = []
for a in shape:
inds += [slice(0, a)]
cords = np.ogrid[inds]
dists = ((np.float128(0) + cords[0] - shape[0] // 2) * dk[0])**2
# apply zerocenterQ shift
if self.zerocenter[0] == False:
dists = np.fft.fftshift(dists)
# only save the individual slice
dists = dists[slice_of_first_dimension]
for ii in range(1, len(shape)):
temp = ((cords[ii] - shape[ii] // 2) * dk[ii])**2
if self.zerocenter[ii] == False:
temp = np.fft.fftshift(temp)
dists = dists + temp
dists = np.sqrt(dists)
return dists
def hermitian_decomposition(self, x, axes=None):
# compute the hermitian part
flipped_x = self._hermitianize_inverter(x, axes=axes)
......@@ -284,6 +226,70 @@ class RGSpace(Space):
result_x = x*weight
return result_x
def get_distance_array(self, distribution_strategy):
"""
Calculates an n-dimensional array with its entries being the
lengths of the k-vectors from the zero point of the grid.
Parameters
----------
None : All information is taken from the parent object.
Returns
-------
nkdict : distributed_data_object
"""
shape = self.shape
# prepare the distributed_data_object
nkdict = distributed_data_object(
global_shape=shape,
dtype=np.float128,
distribution_strategy=distribution_strategy)
if distribution_strategy in DISTRIBUTION_STRATEGIES['slicing']:
# get the node's individual slice of the first dimension
slice_of_first_dimension = slice(
*nkdict.distributor.local_slice[0:2])
elif distribution_strategy in DISTRIBUTION_STRATEGIES['not']:
slice_of_first_dimension = slice(0, shape[0])
else:
raise ValueError(about._errors.cstring(
"ERROR: Unsupported distribution strategy"))
dists = self._distance_array_helper(slice_of_first_dimension)
nkdict.set_local_data(dists)
return nkdict
def _distance_array_helper(self, slice_of_first_dimension):
dk = self.distances
shape = self.shape
inds = []
for a in shape:
inds += [slice(0, a)]
cords = np.ogrid[inds]
dists = ((np.float128(0) + cords[0] - shape[0] // 2) * dk[0])**2
# apply zerocenterQ shift
if self.zerocenter[0] == False:
dists = np.fft.fftshift(dists)
# only save the individual slice
dists = dists[slice_of_first_dimension]
for ii in range(1, len(shape)):
temp = ((cords[ii] - shape[ii] // 2) * dk[ii])**2
if self.zerocenter[ii] == False:
temp = np.fft.fftshift(temp)
dists = dists + temp
dists = np.sqrt(dists)
return dists
def get_smoothing_kernel_function(self, sigma):
if sigma is None:
sigma = np.sqrt(2) * np.max(self.distances)
return lambda x: np.exp(-2. * np.pi**2 * x**2 * sigma**2)