Commit 3d9281f9 authored by Ultima's avatar Ultima

'Finished' the cython line-integrator.

Started working on the los response operator.
Switched off a sanity check in the d2o slicingdistributor's advanced indexing routine, because it was extremely slow (assert: is the first dimension ordered?).
parent 1cf1e763
......@@ -2020,8 +2020,10 @@ class field(object):
"""
if new_val is not None:
if copy:
new_val = self.domain.unary_operation(new_val, 'copy')
self.val = self.domain.cast(new_val)
new_val = self._map(
lambda z: self.domain.unary_operation(z, 'copy'),
new_val)
self.val = self._map(lambda z: self.domain.cast(z), new_val)
return self.val
def get_val(self):
......
......@@ -1835,11 +1835,11 @@ class _slicing_distributor(distributor):
get_full_data(target_rank=j)
if j == rank:
result = temp_result
if not all(result[i] <= result[i + 1]
for i in xrange(len(result) - 1)):
raise ValueError(about._errors.cstring(
"ERROR: The first dimemnsion of list_key must be sorted!"))
# TODO: Implement fast check!
# if not all(result[i] <= result[i + 1]
# for i in xrange(len(result) - 1)):
# raise ValueError(about._errors.cstring(
# "ERROR: The first dimemnsion of list_key must be sorted!"))
result = [result]
for ii in xrange(1, len(from_list_key)):
......@@ -1877,10 +1877,11 @@ class _slicing_distributor(distributor):
local_selection = greater_than_lower * less_than_upper
result = [local_zeroth_key[local_selection]]
if not all(result[0][i] <= result[0][i + 1]
for i in xrange(len(result[0]) - 1)):
raise ValueError(about._errors.cstring(
"ERROR: The first dimemnsion of list_key must be sorted!"))
# TODO: Implement fast check!
# if not all(result[0][i] <= result[0][i + 1]
# for i in xrange(len(result[0]) - 1)):
# raise ValueError(about._errors.cstring(
# "ERROR: The first dimemnsion of list_key must be sorted!"))
for ii in xrange(1, len(from_list_key)):
current = from_list_key[ii]
......
This diff is collapsed.
# -*- coding: utf-8 -*-
import pyximport; pyximport.install()
from nifty_los_implementation import los_integrator
import numpy as np
from line_integrator import line_integrator
from nifty.keepers import about
from nifty.rg import rg_space
from nifty.operators import operator
class los_response(operator):
def __init__(self, domain, starts, ends, sigmas_low=None, sigmas_up=None,
zero_point=None, error_function=lambda x: 0.5):
if not isinstance(domain, rg_space):
raise TypeError(about._errors.cstring(
"ERROR: The domain must be a rg_space instance."))
self.domain = domain
if not callable(error_function):
raise ValueError(about._errors.cstring(
"ERROR: error_function must be callable."))
(self.starts,
self.ends,
self.sigmas_low,
self.sigmas_up,
self.zero_point) = self._parse_coordinates(self.domain,
starts, ends, sigmas_low,
sigmas_up, zero_point)
self.imp = True
self.uni = False
self.sym = False
def _parse_coordinates(self, domain, starts, ends, sigmas_low, sigmas_up,
zero_point):
# basic sanity checks
if not isinstance(starts, list):
raise TypeError(about._errors.cstring(
"ERROR: starts must be a list instance."))
if not isinstance(ends, list):
raise TypeError(about._errors.cstring(
"ERROR: ends must be a list instance."))
if not (len(domain.get_shape()) == len(starts) == len(ends)):
raise ValueError(about._errors.cstring(
"ERROR: The length of starts and ends must " +
"be the same as the number of dimension of the domain."))
number_of_dimensions = len(starts)
if zero_point is None:
zero_point = [0.] * number_of_dimensions
if np.shape(zero_point) != (number_of_dimensions,):
raise ValueError(about._errors.cstring(
"ERROR: The shape of zero_point must match the length of " +
"the starts and ends list"))
parsed_zero_point = list(zero_point)
# extract the number of line-of-sights and by the way check that
# all entries of starts and ends have the right shape
number_of_los = None
for i in xrange(2*number_of_dimensions):
if i < number_of_dimensions:
temp_entry = starts[i]
else:
temp_entry = ends[i-number_of_dimensions]
if isinstance(temp_entry, np.ndarray):
if len(np.shape(temp_entry)) != 1:
raise ValueError(about._errors.cstring(
"ERROR: The numpy ndarrays in starts " +
"and ends must be flat."))
if number_of_los is None:
number_of_los = len(temp_entry)
elif number_of_los != len(temp_entry):
raise ValueError(about._errors.cstring(
"ERROR: The length of all numpy ndarrays in starts " +
"and ends must be the same."))
elif np.isscalar(temp_entry):
pass
else:
raise TypeError(about._errors.cstring(
"ERROR: The entries of starts and ends must be either " +
"scalar or numpy ndarrays."))
if number_of_los is None:
number_of_los = 1
starts = [np.array([x]) for x in starts]
ends = [np.array([x]) for x in ends]
# Parse the coordinate arrays/scalars in the starts and ends list
parsed_starts = self._parse_startsends(starts, number_of_los)
parsed_ends = self._parse_startsends(ends, number_of_los)
# check that sigmas_up/lows have the right shape and parse scalars
parsed_sigmas_up = self._parse_sigmas_uplows(sigmas_up, number_of_los)
parsed_sigmas_low = self._parse_sigmas_uplows(sigmas_low,
number_of_los)
return (parsed_starts, parsed_ends, parsed_sigmas_up,
parsed_sigmas_low, parsed_zero_point)
def _parse_startsends(self, coords, number_of_los):
result_coords = [None]*len(coords)
for i in xrange(len(coords)):
temp_array = np.empty(number_of_los, dtype=np.float)
temp_array[:] = coords[i]
result_coords[i] = temp_array
return result_coords
def _parse_sigmas_uplows(self, sig, number_of_los):
if sig is None:
parsed_sig = np.zeros(number_of_los, dtype=np.float)
elif isinstance(sig, np.ndarray):
if np.shape(sig) != (number_of_los,):
raise ValueError(about._errors.cstring(
"ERROR: The length of sigmas_up/sigmas_low must be " +
" the same as the number of line-of-sights."))
parsed_sig = sig.astype(np.float)
elif np.isscalar(sig):
parsed_sig = np.empty(number_of_los, dtype=np.float)
parsed_sig[:] = sig
else:
raise TypeError(about._errors.cstring(
"ERROR: sigmas_up/sigmas_low must either be a scalar or a " +
"numpy ndarray."))
return parsed_sig
def convert_indices_to_physical(self, pixel_coordinates):
# first of all, compute the phyiscal distance of the given pixel
# from the zeroth-pixel
phyiscal_distance = np.array(pixel_coordinates) * \
np.array(self.domain.distances)
# add the offset of the zeroth pixel with respect to the coordinate
# system
physical_position = phyiscal_distance + np.array(self.zero_point)
return physical_position.tolist()
def convert_physical_to_indices(self, physical_position):
# compute the distance to the zeroth pixel
relative_position = np.array(physical_position) - \
np.array(self.zero_point)
# rescale the coordinates to the uniform grid
pixel_coordinates = relative_position / np.array(self.domain.distances)
return pixel_coordinates.tolist()
# -*- coding: utf-8 -*-
import numpy as np
cimport numpy as np
cimport cython
class los_integrator(object):
def __init__(self, shape, start, end):
self.dtype = np.dtype('float')
self.shape = tuple(shape)
self.start = np.array(start, dtype=self.dtype)
self.end = np.array(end, dtype=self.dtype)
assert(np.all(np.array(self.shape) != 0))
assert(len(self.shape) == len(self.start) == len(self.end))
assert(len(self.start.shape) == len(self.end.shape) == 1)
def integrate(self):
if np.all(self.start == self.end):
return self._empty_results()
try:
projected_start = self._project_to_cuboid(mode='start')
projected_end = self._project_to_cuboid(mode='end')
except ValueError:
return self._empty_results()
(indices, weights) = self._integrate_through_cuboid(projected_start,
projected_end)
return (indices, weights)
def _empty_results(self):
return ([np.array([], dtype=np.dtype('int'))] * len(self.shape),
np.array([], dtype=np.dtype('float')))
def _project_to_cuboid(self, mode):
if mode == 'start':
a = self.start
b = self.end
elif mode == 'end':
a = self.end
b = self.start
else:
raise ValueError
if np.all(np.zeros_like(a) <= a) and np.all(a <= np.array(self.shape)):
return a
c = b - a
surface_list = []
for i in xrange(len(self.shape)):
surface_list += [[i, 0]]
surface_list += [[i, self.shape[i]]]
translator_list = map(lambda z:
self._get_translator_to_surface(a, c, *z),
surface_list)
# sort the translators according to their norm, save the sorted indices
translator_index_list = np.argsort(map(np.linalg.norm,
translator_list))
# iterate through the indices -from short to long translators- and
# take the first translator which brings a to the actual surface of
# the cuboid and not just to one of the parallel planes
found = False
for i in translator_index_list:
p = a + translator_list[i]
if np.all(np.zeros_like(p) <= p) and \
np.all(p <= np.array(self.shape)):
found = True
break
if not found:
raise ValueError(
"ERROR: Line-of-sight does not go through cuboid.")
return p
def _get_translator_to_surface(self, point, full_direction,
dimension_index, surface):
"""
translates 'point' along the vector 'direction' such that the
dimension with index 'dimension_index' has the value 'surface'
"""
direction_scaler = np.divide((surface - point[dimension_index]),
full_direction[dimension_index])
if direction_scaler < 0 or direction_scaler > 1:
return point * np.nan
scaled_direction = full_direction * direction_scaler
return scaled_direction
def _integrate_through_cuboid(self, start, end):
# estimate the maximum number of cells that could be hit
# the current estimator is: norm of the vector times number of dims
num_estimate = np.ceil(np.linalg.norm(end - start))*len(start)
index_list = np.empty((num_estimate, len(start)),
dtype=np.dtype('int'))
weight_list = np.empty((num_estimate), self.dtype)
current_position = start
i = 0
while True:
next_position, weight = self._get_next_position(current_position,
end)
floor_current_position = np.floor(current_position)
index_list[i] = floor_current_position
weight_list[i] = weight
if np.all(np.floor(current_position) == np.floor(end)):
break
current_position = next_position
i += 1
return list(index_list[:i].T), weight_list[:i]
def _get_next_position(self, position, end_position):
full_direction = end_position - position
surface_list = []
for i in xrange(len(position)):
surface_list += [[i, strong_floor(position[i])]]
surface_list += [[i, strong_ceil(position[i])]]
translator_list = map(lambda z: self._get_translator_to_surface(
position,
full_direction,
*z),
surface_list)
# index_of_best_translator = np.argsort(np.linalg.norm(translator_list,
# axis=1))[0]
translator_list = np.array(translator_list)
index_of_best_translator = np.linalg.norm(translator_list, axis=1)
index_of_best_translator = np.argsort(index_of_best_translator)[0]
best_translator = translator_list[index_of_best_translator]
# if the surounding surfaces are not reachable, it must be the case
# that the current position is in the same cell as the endpoint
if np.isnan(np.linalg.norm(best_translator)):
floor_position = np.floor(position)
# check if position is in the same cell as the endpoint
assert(np.all(floor_position == np.floor(end_position)))
weight = np.linalg.norm(end_position - position)
next_position = None
else:
next_position = position + best_translator
weight = np.linalg.norm(best_translator)
return (next_position, weight)
def strong_floor(x):
floor_x = np.floor(x)
if floor_x == x:
return floor_x-1
else:
return floor_x
def strong_ceil(x):
ceil_x = np.ceil(x)
if ceil_x == x:
return ceil_x+1
else:
return ceil_x
......@@ -2845,7 +2845,8 @@ class response_operator(operator):
None
"""
if not isinstance(domain, space):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
raise TypeError(about._errors.cstring(
"ERROR: The domain must be a space instance."))
self.domain = domain
if self.domain.check_codomain(codomain):
......
......@@ -20,8 +20,17 @@
## along with this program. If not, see <http://www.gnu.org/licenses/>.
from distutils.core import setup
#from Cython.Build import cythonize
from distutils.extension import Extension
from Cython.Distutils import build_ext
import sys
import os
ext_modules=[Extension(
"line_integrator_vector",
["operators/line_integrator.pyx"])]
setup(name="ift_nifty",
version="1.0.6",
author="Marco Selig",
......@@ -32,6 +41,13 @@ setup(name="ift_nifty",
url="http://www.mpa-garching.mpg.de/ift/nifty/",
packages=["nifty", "nifty.demos", "nifty.rg", "nifty.lm",
"nifty.operators", "nifty.dummys", "nifty.keepers"],
cmdclass={'build_ext': build_ext},
ext_modules = ext_modules,
#ext_modules=cythonize(["operators/line_integrator_vector.pyx"]),
package_dir={"nifty": ""},
data_files=[(os.path.expanduser('~') + "/.nifty", ["nifty_config"])],
license="GPLv3")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment