Commit 0b5464f3 authored by Martin Reinecke's avatar Martin Reinecke

first working version

parent e7bc9096
......@@ -19,5 +19,3 @@
from .nifty_config import dependency_injector,\
nifty_configuration
from .d2o_config import d2o_configuration
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2017 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
import os
import keepers
# pre-create the D2O configuration instance and set its path explicitly
d2o_configuration = keepers.get_Configuration(
name='D2O',
file_name='D2O.conf',
search_paths=[os.path.expanduser('~') + "/.config/nifty/",
os.path.expanduser('~') + "/.config/",
'./'])
......@@ -34,8 +34,6 @@ dependency_injector = keepers.DependencyInjector(
def _fft_module_checker(z):
if z == 'fftw_mpi':
return hasattr(dependency_injector.get('fftw'), 'FFTW_MPI')
if z == 'fftw':
return ('fftw' in dependency_injector)
if z == 'numpy':
......@@ -45,7 +43,7 @@ def _fft_module_checker(z):
# Initialize the variables
variable_fft_module = keepers.Variable(
'fft_module',
['fftw_mpi', 'fftw', 'numpy'],
['fftw', 'numpy'],
_fft_module_checker)
......
......@@ -370,7 +370,7 @@ class Field(Loggable, Versionable, object):
target_shape=field_val.shape,
axes=axes)
power_spectrum = pindex.bincount(weights=field_val,
power_spectrum = utilities.bincount_axis(pindex, weights=field_val,
axis=axes)
rho = pdomain.rho
if axes is not None:
......@@ -382,14 +382,14 @@ class Field(Loggable, Versionable, object):
return power_spectrum
@staticmethod
def _shape_up_pindex(pindex, target_shape, target_strategy, axes):
def _shape_up_pindex(pindex, target_shape, axes):
semiscaled_local_shape = [1, ] * len(target_shape)
for i in range(len(axes)):
semiscaled_local_shape[axes[i]] = pindex.local_shape[i]
local_data = pindex.get_local_data(copy=False)
semiscaled_local_shape[axes[i]] = pindex.shape[i]
local_data = pindex
semiscaled_local_data = local_data.reshape(semiscaled_local_shape)
result_obj = pindex.copy_empty(global_shape=target_shape)
result_obj.data[:] = semiscaled_local_data
result_obj = np.empty(target_shape, dtype=pindex.dtype)
result_obj[:] = semiscaled_local_data
return result_obj
......@@ -492,14 +492,10 @@ class Field(Loggable, Versionable, object):
result_val_list = [x.val for x in result_list]
# apply the rescaler to the random fields
result_val_list[0].apply_scalar_function(
lambda x: x * local_rescaler.real,
inplace=True)
result_val_list[0] *= local_rescaler.real
if not real_power:
result_val_list[1].apply_scalar_function(
lambda x: x * local_rescaler.imag,
inplace=True)
result_val_list[1] *= local_rescaler.imag
if real_signal:
result_val_list = [self._hermitian_decomposition(
......@@ -538,8 +534,8 @@ class Field(Loggable, Versionable, object):
# no flips are applied, one can use `is` to infer this case.
if flipped_val is val:
h = flipped_val.real
a = 1j * flipped_val.imag
h = flipped_val.real.copy()
a = 1j * flipped_val.imag.copy()
else:
flipped_val = flipped_val.conjugate()
h = (val + flipped_val)/2.
......@@ -600,7 +596,7 @@ class Field(Loggable, Versionable, object):
# Now use numpy advanced indexing in order to put the entries of the
# power spectrum into the appropriate places of the pindex array.
# Do this for every 'pindex-slice' in parallel using the 'slice(None)'s
local_pindex = pindex.get_local_data(copy=False)
local_pindex = pindex
local_blow_up = [slice(None)]*len(spec.shape)
# it is important to count from behind, since spec potentially grows
......
......@@ -20,7 +20,7 @@ from builtins import next
from builtins import range
import numpy as np
from itertools import product
import itertools
def get_slice_list(shape, axes):
"""
......@@ -110,3 +110,118 @@ def parse_domain(domain):
"Given object contains something that is not an "
"instance of DomainObject-class.")
return domain
def slicing_generator(shape, axes):
"""
Helper function which generates slice list(s) to traverse over all
combinations of axes, other than the selected axes.
Parameters
----------
shape: tuple
Shape of the data array to traverse over.
axes: tuple
Axes which should not be iterated over.
Yields
-------
list
The next list of indices and/or slice objects for each dimension.
Raises
------
ValueError
If shape is empty.
ValueError
If axes(axis) does not match shape.
"""
if not shape:
raise ValueError("ERROR: shape cannot be None.")
if axes:
if not all(axis < len(shape) for axis in axes):
raise ValueError("ERROR: axes(axis) does not match shape.")
axes_select = [0 if x in axes else 1 for x, y in enumerate(shape)]
axes_iterables =\
[list(range(y)) for x, y in enumerate(shape) if x not in axes]
for current_index in itertools.product(*axes_iterables):
it_iter = iter(current_index)
slice_list = [next(it_iter) if use_axis else
slice(None, None) for use_axis in axes_select]
yield slice_list
else:
yield [slice(None, None)]
return
def bincount_axis(obj, minlength=None, weights=None, axis=None):
if minlength is not None:
length = max(np.amax(obj) + 1, minlength)
else:
length = np.amax(obj) + 1
if obj.shape == ():
raise ValueError("object of too small depth for desired array")
data = obj
# if present, parse the axis keyword and transpose/reorder self.data
# such that all affected axes follow each other. Only if they are in a
# sequence flattening will be possible
if axis is not None:
# do the reordering
ndim = len(obj.shape)
axis = sorted(cast_axis_to_tuple(axis, length=ndim))
reordering = [x for x in range(ndim) if x not in axis]
reordering += axis
data = np.transpose(data, reordering)
if weights is not None:
weights = np.transpose(weights, reordering)
reord_axis = list(range(ndim-len(axis), ndim))
# semi-flatten the dimensions in `axis`, i.e. after reordering
# the last ones.
semi_flat_dim = reduce(lambda x, y: x*y,
data.shape[ndim-len(reord_axis):])
flat_shape = data.shape[:ndim-len(reord_axis)] + (semi_flat_dim, )
else:
flat_shape = (reduce(lambda x, y: x*y, data.shape), )
data = np.ascontiguousarray(data.reshape(flat_shape))
if weights is not None:
weights = np.ascontiguousarray(
weights.reshape(flat_shape))
# compute the local bincount results
# -> prepare the local result array
if weights is None:
result_dtype = np.int
else:
result_dtype = np.float
local_counts = np.empty(flat_shape[:-1] + (length, ),
dtype=result_dtype)
# iterate over all entries in the surviving axes and compute the local
# bincounts
for slice_list in slicing_generator(flat_shape,
axes=(len(flat_shape)-1, )):
if weights is not None:
current_weights = weights[slice_list]
else:
current_weights = None
local_counts[slice_list] = np.bincount(
data[slice_list],
weights=current_weights,
minlength=length)
# restore the original ordering
# place the bincount stuff at the location of the first `axis` entry
if axis is not None:
# axis has been sorted above
insert_position = axis[0]
new_ndim = len(local_counts.shape)
return_order = (list(range(0, insert_position)) +
[new_ndim-1, ] +
list(range(insert_position, new_ndim-1)))
local_counts = np.ascontiguousarray(
local_counts.transpose(return_order))
return local_counts
......@@ -37,7 +37,7 @@ class DiagonalOperator(EndomorphicOperator):
----------
domain : tuple of DomainObjects, i.e. Spaces and FieldTypes
The domain on which the Operator's input Field lives.
diagonal : {scalar, list, array, Field, d2o-object}
diagonal : {scalar, list, array, Field}
The diagonal entries of the operator.
bare : boolean
Indicates whether the input for the diagonal is bare or not
......@@ -181,7 +181,7 @@ class DiagonalOperator(EndomorphicOperator):
Parameters
----------
diagonal : {scalar, list, array, Field, d2o-object}
diagonal : {scalar, list, array, Field}
The diagonal entries of the operator.
bare : boolean
Indicates whether the input for the diagonal is bare or not
......@@ -226,16 +226,15 @@ class DiagonalOperator(EndomorphicOperator):
for space_index in spaces:
active_axes += x.domain_axes[space_index]
local_diagonal = self._diagonal.val.get_local_data(copy=False)
local_diagonal = self._diagonal.val
reshaper = [x.val.data.shape[i] if i in active_axes else 1
reshaper = [x.val.shape[i] if i in active_axes else 1
for i in range(len(x.shape))]
reshaped_local_diagonal = np.reshape(local_diagonal, reshaper)
# here the actual multiplication takes place
local_result = operation(reshaped_local_diagonal)(
x.val.get_local_data(copy=False))
local_result = operation(reshaped_local_diagonal)(x.val)
result_field = x.copy_empty(dtype=local_result.dtype)
result_field.val.set_local_data(local_result, copy=False)
result_field.val=local_result
return result_field
......@@ -19,7 +19,7 @@
from __future__ import division
import numpy as np
from .transformation import Transformation
from .rg_transforms import MPIFFT, SerialFFT
from .rg_transforms import SerialFFT
from .... import RGSpace, nifty_configuration
......@@ -33,9 +33,7 @@ class RGRGTransformation(Transformation):
if module is None:
module = nifty_configuration['fft_module']
if module == 'fftw_mpi':
self._transform = MPIFFT(self.domain, self.codomain)
elif module == 'fftw':
if module == 'fftw':
self._transform = SerialFFT(self.domain, self.codomain,
use_fftw=True)
elif module == 'numpy':
......
......@@ -25,8 +25,6 @@ from ...field import Field
from ... import nifty_utilities as utilities
from ... import nifty_configuration as nc
from d2o import STRATEGIES as DISTRIBUTION_STRATEGIES
class Prober(object):
"""
......@@ -38,13 +36,11 @@ class Prober(object):
"""
def __init__(self, domain=None, distribution_strategy=None, probe_count=8,
def __init__(self, domain=None, probe_count=8,
random_type='pm1', probe_dtype=np.float,
compute_variance=False):
self._domain = utilities.parse_domain(domain)
self._distribution_strategy = \
self._parse_distribution_strategy(distribution_strategy)
self._probe_count = self._parse_probe_count(probe_count)
self._random_type = self._parse_random_type(random_type)
self.compute_variance = bool(compute_variance)
......@@ -57,20 +53,6 @@ class Prober(object):
def domain(self):
return self._domain
@property
def distribution_strategy(self):
return self._distribution_strategy
def _parse_distribution_strategy(self, distribution_strategy):
if distribution_strategy is None:
distribution_strategy = nc['default_distribution_strategy']
else:
distribution_strategy = str(distribution_strategy)
if distribution_strategy not in DISTRIBUTION_STRATEGIES['global']:
raise ValueError("distribution_strategy must be a global-type "
"strategy.")
self._distribution_strategy = distribution_strategy
@property
def probe_count(self):
return self._probe_count
......@@ -109,8 +91,7 @@ class Prober(object):
""" a random-probe generator """
f = Field.from_random(random_type=self.random_type,
domain=self.domain,
dtype=self.probe_dtype,
distribution_strategy=self.distribution_strategy)
dtype=self.probe_dtype)
uid = self._uid_counter
self._uid_counter += 1
return (uid, f)
......
......@@ -34,11 +34,8 @@ setup(name="ift_nifty",
zip_safe=False,
include_dirs=[numpy.get_include()],
dependency_links=[
'git+https://gitlab.mpcdf.mpg.de/ift/keepers.git#egg=keepers-0.3.7',
'git+https://gitlab.mpcdf.mpg.de/ift/d2o.git#egg=d2o-1.1.1'],
install_requires=['keepers>=0.3.7', 'd2o>=1.1.1'],
package_data={'nifty.demos': ['demo_faraday_map.npy'],
},
'git+https://gitlab.mpcdf.mpg.de/ift/keepers.git#egg=keepers-0.3.7'],
install_requires=['keepers>=0.3.7'],
license="GPLv3",
classifiers=[
"Development Status :: 4 - Beta",
......
......@@ -42,6 +42,5 @@ class Test_ConjugateGradient(unittest.TestCase):
(position, convergence) = minimizer(A=covariance, x0=starting_point,
b=required_result)
assert_almost_equal(position.val.get_full_data(),
1./covariance_diagonal.val.get_full_data(),
decimal=3)
assert_almost_equal(position.val,
1./covariance_diagonal.val, decimal=3)
......@@ -49,4 +49,4 @@ class Test_DescentMinimizers(unittest.TestCase):
(energy, convergence) = minimizer(energy)
assert_almost_equal(energy.value, 0, decimal=5)
assert_almost_equal(energy.position.val.get_full_data(), 0., decimal=5)
assert_almost_equal(energy.position.val, 0., decimal=5)
......@@ -56,6 +56,5 @@ class ComposedOperator_Tests(unittest.TestCase):
rand1 = Field.from_random('normal', domain=(space1, space2))
tt1 = op.inverse_times(op.times(rand1))
assert_allclose(tt1.val.get_full_data(),
rand1.val.get_full_data())
assert_allclose(tt1.val, rand1.val)
......@@ -44,7 +44,7 @@ class DiagonalOperator_Tests(unittest.TestCase):
diag = Field.from_random('normal', domain=space)
D = DiagonalOperator(space, diagonal=diag, bare=bare, copy=copy)
tt1 = D.times(D.inverse_times(rand1))
assert_allclose(rand1.val.get_full_data(), tt1.val.get_full_data())
assert_allclose(rand1.val, tt1.val)
@expand(product(spaces, [True, False], [True, False]))
def test_times(self, space, bare, copy):
......@@ -83,12 +83,11 @@ class DiagonalOperator_Tests(unittest.TestCase):
diag = Field.from_random('normal', domain=space)
D = DiagonalOperator(space, diagonal=diag, copy=copy)
diag_op = D.diagonal()
assert_allclose(diag.val.get_full_data(), diag_op.val.get_full_data())
assert_allclose(diag.val, diag_op.val)
@expand(product(spaces, [True, False]))
def test_inverse(self, space, copy):
diag = Field.from_random('normal', domain=space)
D = DiagonalOperator(space, diagonal=diag, copy=copy)
diag_op = D.inverse_diagonal()
assert_allclose(1./diag.val.get_full_data(),
diag_op.val.get_full_data())
assert_allclose(1./diag.val, diag_op.val)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2017 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
import unittest
from numpy.testing import assert_equal
from keepers import Repository
from test.common import expand, generate_spaces
from nifty import Field
from nose.plugins.skip import SkipTest
import os
class SpaceSerializationTests(unittest.TestCase):
@expand([[space] for space in generate_spaces()])
def test_serialization(self, space):
try:
import h5py
except ImportError:
raise SkipTest
try:
os.remove('test.h5')
except:
pass
repo = Repository('test.h5')
repo.add(space, 'space')
field = Field(space,val=42.)
repo.add(field, 'field')
repo.commit()
assert_equal(space, repo.get('space'))
assert_equal(field, repo.get('field'))
try:
os.remove('test.h5')
except OSError:
pass
......@@ -24,7 +24,6 @@ from itertools import product
from types import LambdaType
from test.common import expand, generate_spaces, generate_harmonic_spaces
from d2o import distributed_data_object
from nifty.spaces import *
......@@ -39,7 +38,7 @@ class SpaceInterfaceTests(unittest.TestCase):
attr_expected_type[1]))
@expand(product(generate_harmonic_spaces(), [
['get_distance_array', 'not', distributed_data_object],
['get_distance_array', np.ndarray],
['get_fft_smoothing_kernel_function', 2.0, LambdaType],
]))
def test_method_ret_type(self, space, method_expected_type):
......
......@@ -22,7 +22,6 @@ import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises,\
assert_almost_equal, assert_array_almost_equal
from d2o import distributed_data_object
from nifty import LMSpace
from test.common import expand
......@@ -111,10 +110,10 @@ class LMSpaceFunctionalityTests(unittest.TestCase):
def test_hermitianize_inverter(self):
l = LMSpace(5)
v = distributed_data_object(global_shape=l.shape, dtype=np.complex128)
v = np.empty(l.shape, dtype=np.complex128)
v[:] = np.random.random(l.shape) + 1j*np.random.random(l.shape)
inverted = l.hermitianize_inverter(v, axes=(0,))
assert_array_almost_equal(inverted.get_full_data(), v.get_full_data())
assert_array_almost_equal(inverted, v)
@expand(get_weight_configs())
def test_weight(self, x, power, axes, inplace, expected):
......@@ -127,4 +126,4 @@ class LMSpaceFunctionalityTests(unittest.TestCase):
@expand(get_distance_array_configs())
def test_distance_array(self, lmax, expected):
l = LMSpace(lmax)
assert_almost_equal(l.get_distance_array().data, expected)
assert_almost_equal(l.get_distance_array(), expected)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment