diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 89e54d0607e3e6c8f2fd7919f0b826f79334e527..b5f6331fed26d60c7e7f1080947f0fd8d70b491a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -13,7 +13,7 @@ before_script: - apt-get update - > apt-get install -y build-essential python python-pip python-dev git - autoconf gsl-bin libgsl-dev wget python-numpy cython + autoconf gsl-bin libgsl-dev wget python-numpy - pip install --upgrade -r ci/requirements_base.txt - chmod +x ci/*.sh diff --git a/README.md b/README.md index 15e0816e541503aa53be22770f2b312f8192f568..70468564b3fca38a29c2034b686f03de3d9d6926 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Summary a versatile library designed to enable the development of signal inference algorithms that operate regardless of the underlying spatial grid and its resolution. Its object-oriented framework is written in -Python, although it accesses libraries written in Cython, C++, and C for +Python, although it accesses libraries written in C++ and C for efficiency. NIFTY offers a toolkit that abstracts discretized representations of @@ -71,7 +71,6 @@ Installation - [Python](http://www.python.org/) (v2.7.x) - [NumPy](http://www.numpy.org/) - - [Cython](http://cython.org/) ### Download @@ -95,7 +94,7 @@ Starting with a fresh Ubuntu installation move to a folder like - Using pip install numpy etc...: - sudo pip install numpy cython + sudo pip install numpy - Install pyHealpix: @@ -147,10 +146,9 @@ MacPorts, missing ones need to be installed manually. It may also be mentioned that one should only use one package manager, as multiple ones may cause trouble. -- Install basic packages numpy and cython: +- Install numpy: sudo port install py27-numpy - sudo port install py27-cython - Install pyHealpix: diff --git a/ci/requirements.txt b/ci/requirements.txt index 424f78f5a6757773b236896f08586eead7ed6c8a..638b653efb5f31e6cf852aac378c95015804ca5e 100644 --- a/ci/requirements.txt +++ b/ci/requirements.txt @@ -1,5 +1,4 @@ numpy -cython mpi4py matplotlib plotly diff --git a/nifty/operators/smoothing_operator/smooth_util.pyx b/nifty/operators/smoothing_operator/smooth_util.pyx deleted file mode 100644 index f1e714562c9c907774315bb2ee87cd39d93b1d18..0000000000000000000000000000000000000000 --- a/nifty/operators/smoothing_operator/smooth_util.pyx +++ /dev/null @@ -1,222 +0,0 @@ -#cython: nonecheck=False -#cython: boundscheck=True -#cython: wraparound=False -#cython: cdivision=True - -import numpy as np -cimport numpy as np -#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION - -cdef int SIGMA_RANGE = 3 - -cpdef np.float32_t gaussianKernel_f(np.ndarray[np.float32_t, ndim=1] mpower, - np.ndarray[np.float32_t, ndim=1] mk, - np.float32_t mu, - np.float32_t smooth_length): - cdef np.ndarray[np.float32_t, ndim=1] C = \ - np.exp(-(mk - mu) ** 2 / (2. * smooth_length ** 2)) - return np.sum(C * mpower) / np.sum(C) - -cpdef np.float64_t gaussianKernel(np.ndarray[np.float64_t, ndim=1] mpower, - np.ndarray[np.float64_t, ndim=1] mk, - np.float64_t mu, - np.float64_t smooth_length): - cdef np.ndarray[np.float64_t, ndim=1] C = \ - np.exp(-(mk - mu) ** 2 / (2. * smooth_length ** 2)) - return np.sum(C * mpower) / np.sum(C) - - -cpdef np.ndarray[np.float64_t, ndim=1] apply_kernel_along_array( - np.ndarray[np.float64_t, ndim=1] power, - np.int_t startindex, - np.int_t endindex, - np.ndarray[np.float64_t, ndim=1] distances, - np.float64_t smooth_length, - np.float64_t smoothing_width): - - if smooth_length == 0.0: - return power[startindex:endindex] - - if (smooth_length is None) or (smooth_length < 0): - smooth_length = distances[1]-distances[0] - - cdef np.ndarray[np.float64_t, ndim=1] p_smooth = \ - np.empty(endindex-startindex, dtype=np.float64) - cdef np.int64_t i, l, u - for i in xrange(startindex, endindex): - current_location = distances[i] - l = np.searchsorted(distances, - current_location - smoothing_width*smooth_length) - u = np.searchsorted(distances, - current_location + smoothing_width*smooth_length) - p_smooth[i-startindex] = gaussianKernel(power[l:u], - distances[l:u], - distances[i], - smooth_length) - - return p_smooth - -cpdef np.ndarray[np.float32_t, ndim=1] apply_kernel_along_array_f( - np.ndarray[np.float32_t, ndim=1] power, - np.int_t startindex, - np.int_t endindex, - np.ndarray[np.float32_t, ndim=1] distances, - np.float64_t smooth_length, - np.float64_t smoothing_width): - - if smooth_length == 0.0: - return power[startindex:endindex] - - if (smooth_length is None) or (smooth_length < 0): - smooth_length = distances[1]-distances[0] - - cdef np.ndarray[np.float32_t, ndim=1] p_smooth = \ - np.empty(endindex-startindex, dtype=np.float64) - cdef np.int64_t i, l, u - for i in xrange(startindex, endindex): - current_location = distances[i] - l = np.searchsorted(distances, - current_location - smoothing_width*smooth_length) - u = np.searchsorted(distances, - current_location + smoothing_width*smooth_length) - p_smooth[i-startindex] = gaussianKernel(power[l:u], - distances[l:u], - distances[i], - smooth_length) - - return p_smooth - -def getShape(a): - return tuple(a.shape) - -cpdef np.ndarray[np.float64_t, ndim=1] apply_along_axis( - np.int_t axis, - np.ndarray arr, - np.int_t startindex, - np.int_t endindex, - np.ndarray[np.float64_t, ndim=1] distances, - np.float64_t smooth_length, - np.float64_t smoothing_width): - cdef np.int_t nd = arr.ndim - if axis < 0: - axis += nd - if (axis >= nd): - raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis, nd)) - cdef np.ndarray[np.int_t, ndim=1] ind = np.zeros(nd-1, dtype=np.int) - cdef np.ndarray i = np.zeros(nd, dtype=object) - cdef tuple shape = getShape(arr) - cdef np.ndarray[np.int_t, ndim=1] indlist = np.asarray(range(nd)) - indlist = np.delete(indlist,axis) - i[axis] = slice(None, None) - cdef np.ndarray[np.int_t, ndim=1] outshape = \ - np.asarray(shape).take(indlist) - - i.put(indlist, ind) - - cdef np.ndarray[np.float64_t, ndim=1] slicedArr - cdef np.ndarray[np.float64_t, ndim=1] res - - cdef np.int_t Ntot = np.product(outshape) - cdef np.ndarray[np.int_t, ndim=1] holdshape = outshape - slicedArr = arr[tuple(i.tolist())] - - res = apply_kernel_along_array(slicedArr, - startindex, - endindex, - distances, - smooth_length, - smoothing_width) - - outshape = np.asarray(getShape(arr)) - outshape[axis] = endindex - startindex - outarr = np.zeros(outshape, dtype=np.float64) - outarr[tuple(i.tolist())] = res - cdef np.int_t n, k = 1 - while k < Ntot: - # increment the index - ind[nd-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1-nd)): - ind[n-1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - slicedArr = arr[tuple(i.tolist())] - res = apply_kernel_along_array(slicedArr, - startindex, - endindex, - distances, - smooth_length, - smoothing_width) - outarr[tuple(i.tolist())] = res - k += 1 - - return outarr - - -cpdef np.ndarray[np.float32_t, ndim=1] apply_along_axis_f( - np.int_t axis, - np.ndarray arr, - np.int_t startindex, - np.int_t endindex, - np.ndarray[np.float32_t, ndim=1] distances, - np.float64_t smooth_length, - np.float64_t smoothing_width): - cdef np.int_t nd = arr.ndim - if axis < 0: - axis += nd - if (axis >= nd): - raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." - % (axis, nd)) - cdef np.ndarray[np.int_t, ndim=1] ind = np.zeros(nd-1, dtype=np.int) - cdef np.ndarray i = np.zeros(nd, dtype=object) - cdef tuple shape = getShape(arr) - cdef np.ndarray[np.int_t, ndim=1] indlist = np.asarray(range(nd)) - indlist = np.delete(indlist,axis) - i[axis] = slice(None, None) - cdef np.ndarray[np.int_t, ndim=1] outshape = \ - np.asarray(shape).take(indlist) - - i.put(indlist, ind) - - cdef np.ndarray[np.float32_t, ndim=1] slicedArr - cdef np.ndarray[np.float32_t, ndim=1] res - - cdef np.int_t Ntot = np.product(outshape) - cdef np.ndarray[np.int_t, ndim=1] holdshape = outshape - slicedArr = arr[tuple(i.tolist())] - - res = apply_kernel_along_array_f(slicedArr, - startindex, - endindex, - distances, - smooth_length, - smoothing_width) - - outshape = np.asarray(getShape(arr)) - outshape[axis] = endindex - startindex - outarr = np.zeros(outshape, dtype=np.float32) - outarr[tuple(i.tolist())] = res - cdef np.int_t n, k = 1 - while k < Ntot: - # increment the index - ind[nd-1] += 1 - n = -1 - while (ind[n] >= holdshape[n]) and (n > (1-nd)): - ind[n-1] += 1 - ind[n] = 0 - n -= 1 - i.put(indlist, ind) - slicedArr = arr[tuple(i.tolist())] - res = apply_kernel_along_array(slicedArr, - startindex, - endindex, - distances, - smooth_length, - smoothing_width) - outarr[tuple(i.tolist())] = res - k += 1 - - return outarr - diff --git a/nifty/operators/smoothing_operator/smoothing_operator.py b/nifty/operators/smoothing_operator/smoothing_operator.py index 6d37277a1d94fe0b1d3b9a739a925a5380d7a693..f1f4e625a2edcfd09b4f4de96712bed0cf63885f 100644 --- a/nifty/operators/smoothing_operator/smoothing_operator.py +++ b/nifty/operators/smoothing_operator/smoothing_operator.py @@ -21,13 +21,136 @@ import numpy as np import nifty.nifty_utilities as utilities from nifty.operators.endomorphic_operator import EndomorphicOperator from nifty.operators.fft_operator import FFTOperator -from nifty.operators.smoothing_operator import smooth_util as su +#from nifty.operators.smoothing_operator import smooth_util as su from d2o import STRATEGIES +def smoothingHelper (x,sigma,dxmax=None): + """ Performs the precomputations for Gaussian smoothing on a 1D irregular grid. + + Parameters + ---------- + x: 1D floating point array or list containing the individual grid positions. + Points must be given in ascending order. + + sigma: The sigma of the Gaussian with which the function living on x should + be smoothed, in the same units as x. + dxmax: (optional) The maximum distance up to which smoothing is performed, + in the same units as x. Default is 3.01*sigma. + + Returns + ------- + ibegin: integer array of the same size as x + ibegin[i] is the minimum grid index to consider when computing the + smoothed value at grid index i + nval: integer array of the same size as x + nval[i] is the number of indices to consider when computing the + smoothed value at grid index i. + The indices go from ibegin[i] (included) to ibegin[i]+nval[i](excluded). + wgt0: integer array of the same size as x + cumulative sum of nval + wgt: float array with sum(nval) entries containing the normalized smoothing + weights. + The weights for grid point i start at wgt[wgt0[i]]. + """ + + if dxmax==None: + dxmax=3.01*sigma + + x=np.asarray(x) + + ibegin=np.searchsorted(x,x-dxmax) + nval=np.searchsorted(x,x+dxmax)-ibegin + wgt0=np.empty(x.size,dtype=np.int) + wgt0[0]=0 + wgt0[1:x.size]=np.cumsum(nval[0:x.size-1]) + + wgt=np.empty(nval.sum(),dtype=np.float64) + expfac=1./(2.*sigma*sigma) + for i in range(x.size): + t=x[ibegin[i]:ibegin[i]+nval[i]]-x[i] + t=np.exp(-t*t*expfac) + t*=1./np.sum(t) + wgt[wgt0[i]:wgt0[i]+nval[i]]=t + + return ibegin,nval,wgt0,wgt + +def apply_kernel_along_array(power, startindex, endindex, distances, + smooth_length, smoothing_width,ibegin,nval,wgt0,wgt): + + if smooth_length == 0.0: + return power[startindex:endindex] + + p_smooth = np.empty(endindex-startindex, dtype=np.float64) + + for i in xrange(startindex, endindex): + p_smooth[i-startindex]=np.sum(power[ibegin[i]:ibegin[i]+nval[i]]*wgt[wgt0[i]:wgt0[i]+nval[i]]) + + return p_smooth + +def getShape(a): + return tuple(a.shape) + +def apply_along_axis(axis, arr, startindex, endindex, distances, + smooth_length, smoothing_width): + + nd = arr.ndim + if axis < 0: + axis += nd + if (axis >= nd): + raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d." + % (axis, nd)) + ibegin,nval,wgt0,wgt=smoothingHelper(distances,smooth_length,smooth_length*smoothing_width) + + ind = np.zeros(nd-1, dtype=np.int) + i = np.zeros(nd, dtype=object) + shape = getShape(arr) + indlist = np.asarray(range(nd)) + indlist = np.delete(indlist,axis) + i[axis] = slice(None, None) + outshape = np.asarray(shape).take(indlist) + + i.put(indlist, ind) + + Ntot =np.product(outshape) + holdshape = outshape + slicedArr = arr[tuple(i.tolist())] + + res = apply_kernel_along_array(slicedArr, + startindex, + endindex, + distances, + smooth_length, + smoothing_width, ibegin,nval,wgt0,wgt) + + outshape = np.asarray(getShape(arr)) + outshape[axis] = endindex - startindex + outarr = np.zeros(outshape, dtype=np.float64) + outarr[tuple(i.tolist())] = res + k = 1 + while k < Ntot: + # increment the index + ind[nd-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1-nd)): + ind[n-1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + slicedArr = arr[tuple(i.tolist())] + res = apply_kernel_along_array(slicedArr, + startindex, + endindex, + distances, + smooth_length, + smoothing_width,ibegin,nval,wgt0,wgt) + outarr[tuple(i.tolist())] = res + k += 1 + + return outarr class SmoothingOperator(EndomorphicOperator): # ---Overwritten properties and methods--- - def __init__(self, domain=(), sigma=0, log_distances=False, + def __init__(self, domain, sigma, log_distances=False, default_spaces=None): super(SmoothingOperator, self).__init__(default_spaces) @@ -234,9 +357,10 @@ class SmoothingOperator(EndomorphicOperator): else: true_sigma = self.sigma + #MR: do not split for complex arrays! if data.dtype is np.dtype('float32'): distances = distances.astype(np.float32, copy=False) - smoothed_data = su.apply_along_axis_f( + smoothed_data = apply_along_axis( data_axis, data, startindex=true_start, endindex=true_end, @@ -245,7 +369,7 @@ class SmoothingOperator(EndomorphicOperator): smoothing_width=self._direct_smoothing_width) elif data.dtype is np.dtype('float64'): distances = distances.astype(np.float64, copy=False) - smoothed_data = su.apply_along_axis( + smoothed_data = apply_along_axis( data_axis, data, startindex=true_start, endindex=true_end, diff --git a/nifty/spaces/power_space/power_space.py b/nifty/spaces/power_space/power_space.py index 3b90d5d8513796ed404c938357e18c5bbcf38c3f..047adbc5f4c2d15e5aa5f66ec05814fc126d6a6a 100644 --- a/nifty/spaces/power_space/power_space.py +++ b/nifty/spaces/power_space/power_space.py @@ -30,7 +30,7 @@ class PowerSpace(Space): # ---Overwritten properties and methods--- - def __init__(self, harmonic_domain=RGSpace((1,)), + def __init__(self, harmonic_domain, distribution_strategy='not', log=False, nbin=None, binbounds=None): diff --git a/setup.py b/setup.py index 592c530859a8af1e618459aec21750522dfe3e21..54dfa4ed1fe87ae803fe2a2a19e7e03d8dd15f6e 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,6 @@ from setuptools import setup, find_packages -from Cython.Build import cythonize import numpy exec(open('nifty/version.py').read()) @@ -33,9 +32,6 @@ setup(name="ift_nifty", packages=find_packages(), package_dir={"nifty": "nifty"}, zip_safe=False, - ext_modules=cythonize([ - "nifty/operators/smoothing_operator/smooth_util.pyx" - ]), include_dirs=[numpy.get_include()], dependency_links=[ 'git+https://gitlab.mpcdf.mpg.de/ift/keepers.git#egg=keepers-0.3.7',