diff --git a/lm/nifty_lm.py b/lm/nifty_lm.py
index d1923aed87fbe662faaee6d5bd0431abc5c052d3..447f9804164345830d6ca820d7e601430b317109 100644
--- a/lm/nifty_lm.py
+++ b/lm/nifty_lm.py
@@ -213,6 +213,11 @@ class lm_space(point_space):
         """
         return self.paradict['mmax']
 
+    def shape(self):
+        mmax = self.paradict('mmax')
+        lmax = self.paradict('lmax')
+        return np.array([(mmax+1)*(lmax+1)-(lmax+1)*(mmax//2)], dtype=int)
+
     def dim(self,split=False):
         """
             Computes the dimension of the space, i.e.\  the number of spherical
@@ -237,9 +242,11 @@ class lm_space(point_space):
         """
         ## dim = (mmax+1)*(lmax-mmax/2+1)
         if(split):
-            return np.array([(self.para[0]+1)*(self.para[1]+1)-(self.para[1]+1)*self.para[1]//2],dtype=np.int)
+            return self.shape()
+            #return np.array([(self.para[0]+1)*(self.para[1]+1)-(self.para[1]+1)*self.para[1]//2],dtype=np.int)
         else:
-            return (self.para[0]+1)*(self.para[1]+1)-(self.para[1]+1)*self.para[1]//2
+            return np.prod(self.shape())
+            #return (self.para[0]+1)*(self.para[1]+1)-(self.para[1]+1)*self.para[1]//2
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
@@ -1035,7 +1042,7 @@ class gl_space(point_space):
         self.datatype = datatype
 
         self.discrete = False
-        self.vol = gl.vol(self.para[0],nlon=self.para[1]).astype(self.datatype)
+        self.vol = gl.vol(self.paradict['nlat'],nlon=self.paradict['nlon']).astype(self.datatype)
 
 
     @property
@@ -1074,6 +1081,9 @@ class gl_space(point_space):
         """
         return self.paradict['nlon']
 
+    def shape(self):
+        return np.array([(self.paradict['nlat']*self.paradict['nlon'])], dtype=np.int)
+
     def dim(self,split=False):
         """
             Computes the dimension of the space, i.e.\  the number of pixels.
@@ -1091,9 +1101,11 @@ class gl_space(point_space):
         """
         ## dim = nlat*nlon
         if(split):
-            return np.array([self.para[0]*self.para[1]],dtype=np.int)
+            return self.shape()
+            #return np.array([self.para[0]*self.para[1]],dtype=np.int)
         else:
-            return self.para[0]*self.para[1]
+            return np.prod(self.shape())
+            #return self.para[0]*self.para[1]
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
@@ -1699,7 +1711,7 @@ class hp_space(point_space):
 
         self.datatype = np.float64
         self.discrete = False
-        self.vol = np.array([4*pi/(12*self.para[0]**2)],dtype=self.datatype)
+        self.vol = np.array([4*pi/(12*self.paradict['nside']**2)],dtype=self.datatype)
 
     @property
     def para(self):
@@ -1725,6 +1737,8 @@ class hp_space(point_space):
         """
         return self.paradict['nside']
 
+    def shape(self):
+        return np.array([12*self.paradict['nside']**2], dtype=np.int)
 
     def dim(self,split=False):
         """
@@ -1743,9 +1757,11 @@ class hp_space(point_space):
         """
         ## dim = 12*nside**2
         if(split):
-            return np.array([12*self.para[0]**2],dtype=np.int)
+            return self.shape()
+            #return np.array([12*self.para[0]**2],dtype=np.int)
         else:
-            return 12*self.para[0]**2
+            return np.prod(self.shape())
+            #return 12*self.para[0]**2
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
diff --git a/nifty_core.py b/nifty_core.py
index 00e0c38e3280b078410874b8ce4e5229736ca71c..799d99db8ab0513d87a619b9a3529bcb8af69f73 100644
--- a/nifty_core.py
+++ b/nifty_core.py
@@ -1011,12 +1011,9 @@ class space(object):
         self.discrete = True
         self.vol = np.real(np.array([1],dtype=self.datatype))
         
-        self.shape = None
-
     @property
     def para(self):
         return self.paradict['default']
-        #return self.distributed_val
     
     @para.setter
     def para(self, x):
@@ -1033,12 +1030,12 @@ class space(object):
 
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-    def getitem(self, key):
+    def getitem(self, data, key):
         raise NotImplementedError(about._errors.cstring("ERROR: no generic instance method 'getitem'."))
         
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-    def setitem(self, key):
+    def setitem(self, data, key):
         raise NotImplementedError(about._errors.cstring("ERROR: no generic instance method 'getitem'."))
         
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++            
@@ -1053,6 +1050,8 @@ class space(object):
     def norm(self, x, q):
         raise NotImplementedError(about._errors.cstring("ERROR: no generic instance method 'norm'."))
 
+    def shape(self):
+        raise NotImplementedError(about._errors.cstring("ERROR: no generic instance method 'shape'."))
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
     def dim(self,split=False):
@@ -1217,8 +1216,30 @@ class space(object):
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
     
-    def cast(self, x):
-        return self.enforce_values(x)
+    def cast(self, x, verbose=False):
+        """
+            Computes valid field values from a given object, trying
+            to translate the given data into a valid form. Thereby it is as 
+            benevolent as possible. 
+
+            Parameters
+            ----------
+            x : {float, numpy.ndarray, nifty.field}
+                Object to be transformed into an array of valid field values.
+
+            Returns
+            -------
+            x : numpy.ndarray, distributed_data_object
+                Array containing the field values, which are compatible to the
+                space.
+
+            Other parameters
+            ----------------
+            verbose : bool, *optional*
+                Whether the method should raise a warning if information is 
+                lost during casting (default: False).
+        """
+        return self.enforce_values(x, extend=True)
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
@@ -1827,13 +1848,24 @@ class point_space(space):
     def para(self):
         temp = np.array([self.paradict['num']], dtype=int)
         return temp
-        #return self.distributed_val
     
     @para.setter
     def para(self, x):
         self.paradict['num'] = x
         
-    ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++      
+    ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+    def getitem(self, data, key):
+        return data[key]
+        
+
+    ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+    def setitem(self, data, update, key):
+        data[key]=update
+           
+    
+    ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++          
+    
+    
     def unary_operation(self, x, op='None', **kwargs):
         """
         x must be a numpy array which is compatible with the space!
@@ -1947,6 +1979,10 @@ class point_space(space):
         """
         return self.para[0]
 
+    def shape(self):
+        return np.array([self.paradict['num']])
+
+        
     def dim(self,split=False):
         """
             Computes the dimension of the space, i.e.\  the number of points.
@@ -1964,9 +2000,11 @@ class point_space(space):
         """
         ## dim = num
         if(split):
-            return np.array([self.para[0]],dtype=np.int)
+            return self.shape()
+            #return np.array([self.para[0]],dtype=np.int)
         else:
-            return self.para[0]
+            return np.prod(self.shape())
+            #return self.para[0]
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
@@ -4711,6 +4749,12 @@ class nested_space(space):
                 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
+    def shape(self):
+        temp = []
+        for i in range(self.paradict.ndim):
+            temp = np.append(temp, self.paradict[i])
+        return temp
+        
     def dim(self,split=False):
         """
             Computes the dimension of the product space.
@@ -4729,9 +4773,11 @@ class nested_space(space):
                 Dimension(s) of the space.
         """
         if(split):
-            return self.para
+            return self.shape()
+            #return self.para
         else:
-            return np.prod(self.para,axis=0,dtype=None,out=None)
+            return np.prod(self.shape())
+            #return np.prod(self.para,axis=0,dtype=None,out=None)
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
@@ -5436,7 +5482,10 @@ class field(object):
         else:
             self.domain.check_codomain(target)
         self.target = target
-
+        
+        self.val = self.domain.cast(val)
+        
+        """
         self.distributed_val = distributed_data_object(global_shape=domain.dim(split=True), dtype=domain.datatype)                
 
         ## check values
@@ -5454,6 +5503,8 @@ class field(object):
     @val.setter
     def val(self, x):
         return self.distributed_val.set_full_data(x)
+        
+    """
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
     def dim(self,split=False):
@@ -5792,7 +5843,7 @@ class field(object):
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
-    def transform(self,target=None,overwrite=False,**kwargs):
+    def transform(self, target=None, overwrite=False, **kwargs):
         """
             Computes the transform of the field using the appropriate conjugate
             transformation.
@@ -5821,12 +5872,16 @@ class field(object):
             target = self.target
         else:
             self.domain.check_codomain(target) ## a bit pointless
+        
+        new_val = self.domain.calc_transform(self.val, 
+                                                  codomain=target,
+                                                  **kwargs)        
         if(overwrite):
-            self.val = self.domain.calc_transform(self.val,codomain=target,field_val=self.distributed_val, **kwargs)
+            self.val = new_val
             self.target = self.domain
             self.domain = target
         else:
-            return field(target,val=self.domain.calc_transform(self.val,codomain=target, field_val=self.distributed_val, **kwargs),target=self.domain)
+            return field(target, val=new_val, target=self.domain)
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
diff --git a/nifty_mpi_data.py b/nifty_mpi_data.py
index 14c3a4f89547003c2eea3d8c071673bab5ea9622..ddf898181327ecfee35931a703b8df9260222513 100644
--- a/nifty_mpi_data.py
+++ b/nifty_mpi_data.py
@@ -98,32 +98,53 @@ class distributed_data_object(object):
     """
     def __init__(self,  global_data=None, global_shape=None, dtype=None, distribution_strategy='fftw', *args, **kwargs):
         if global_data != None:
-            global_data_input = np.array(global_data, copy=True, order='C')
+            if np.array(global_data).shape == ():
+                global_data_input = None
+                dtype = np.array(global_data).dtype.type
+            else:
+                global_data_input = np.array(global_data, copy=True, order='C')
         else:
             global_data_input = None
-            
+
         self.distributor = self._get_distributor(distribution_strategy)(global_data=global_data_input, global_shape=global_shape, dtype=dtype, **kwargs)
         self.set_full_data(data=global_data_input, **kwargs)
         
+            
         self.distribution_strategy = distribution_strategy
         self.dtype = self.distributor.dtype
         self.shape = self.distributor.global_shape
+        self.hermitian = False        
         
         self.init_args = args 
         self.init_kwargs = kwargs
-    
+        
+        ## If the input data was a scalar, set the whole array to this value
+        if global_data != None and np.array(global_data).shape == ():
+            self.set_local_data(self.get_local_data() + np.array(global_data))    
+            self.hermitian = True
         
     def copy(self):
         temp_d2o = self.copy_empty()        
         temp_d2o.set_local_data(self.get_local_data(), copy=True)
+        temp_d2o.hermitian = self.hermitian
         return temp_d2o
     
-    def copy_empty(self):
-        temp_d2o = distributed_data_object(global_shape=self.shape,
-                                           dtype=self.dtype,
-                                           distribution_strategy=self.distribution_strategy,
+    def copy_empty(self, global_shape=None, dtype=None, 
+                   distribution_strategy=None, **kwargs):
+        if global_shape == None:
+            global_shape = self.shape
+        if dtype == None:
+            dtype = self.dtype
+        if distribution_strategy == None:
+            distribution_strategy = self.distribution_strategy
+
+        kwargs.update(self.init_kwargs)
+        
+        temp_d2o = distributed_data_object(global_shape=global_shape,
+                                           dtype=dtype,
+                                           distribution_strategy=distribution_strategy,
                                            *self.init_args,
-                                           **self.init_kwargs)
+                                           **kwargs)
         return temp_d2o
     
     def apply_function(self, function):
@@ -207,6 +228,9 @@ class distributed_data_object(object):
     def __len__(self):
         return self.shape[0]
     
+    def dim(self):
+        return np.prod(self.shape)
+        
     def vdot(self, other):
         if isinstance(other, distributed_data_object):        
             other = other.get_local_data()
@@ -331,7 +355,21 @@ class distributed_data_object(object):
         median = np.median(self.get_full_data())
         return median
         
-
+    def iscomplex(self):
+        temp_d2o = self.copy_empty(dtype=bool)
+        temp_d2o.set_local_data(np.iscomplex(self.data))
+        return temp_d2o
+    
+    def isreal(self):
+        temp_d2o = self.copy_empty(dtype=bool)
+        temp_d2o.set_local_data(np.isreal(self.data))
+        return temp_d2o
+    
+    def is_completely_real(self):
+        local_realiness = np.all(self.isreal())
+        global_realiness = self.distributor._allgather(local_realiness)
+        return np.all(global_realiness)
+    
     def set_local_data(self, data, copy=False):
         """
             Stores data directly in the local data attribute. No distribution 
@@ -348,6 +386,7 @@ class distributed_data_object(object):
             None
         
         """
+        self.hermitian = False
         self.data = np.array(data).astype(self.dtype, copy=copy)
     
     def set_data(self, data, key, *args, **kwargs):
@@ -370,6 +409,7 @@ class distributed_data_object(object):
             None
         
         """
+        self.hermitian = False
         (slices, sliceified) = self.__sliceify__(key)        
         self.distributor.disperse_data(self.data, self.__enfold__(data, sliceified), slices, *args, **kwargs)        
     
@@ -393,6 +433,7 @@ class distributed_data_object(object):
             None
         
         """
+        self.hermitian = False
         self.data = self.distributor.distribute_data(data=data, **kwargs)
     
 
@@ -579,7 +620,7 @@ class _fftw_distributor(object):
             if alias != None:
                 self.global_shape = dset.shape
             else:                
-                if global_data == None:
+                if global_data == None or np.array(global_data).shape == ():
                     if global_shape == None:
                         raise TypeError(nifty_core.about._errors.\
                         cstring("ERROR: Neither data nor shape supplied!"))
@@ -606,11 +647,10 @@ class _fftw_distributor(object):
                     self.dtype = np.array(global_data).dtype.type
                 else:
                     raise TypeError(nifty_core.about._errors.\
-                    cstring("ERROR: Failed setting datatype. Neither data,\
-                     nor datatype supplied."))
+                    cstring("ERROR: Failed setting datatype. Neither data, "+\
+                     "nor datatype supplied."))
         else:
             self.dtype=None
-        
         self.dtype = comm.bcast(self.dtype, root=0)
         if alias != None:        
             f.close()        
@@ -618,7 +658,8 @@ class _fftw_distributor(object):
         self._my_dtype_converter = dtype_converter()
         
         if not self._my_dtype_converter.known_np_Q(self.dtype):
-            raise TypeError(nifty_core.about._errors.cstring("ERROR: The data type is not known to mpi4py."))
+            raise TypeError(nifty_core.about._errors.cstring(\
+            "ERROR: The datatype "+str(self.dtype)+" is not known to mpi4py."))
 
         self.mpi_dtype  = self._my_dtype_converter.to_mpi(self.dtype)
         
@@ -629,14 +670,18 @@ class _fftw_distributor(object):
         self.local_shape = (self.local_length,) + tuple(self.global_shape[1:])
         self.local_dim = np.product(self.local_shape)
         self.local_dim_list = np.empty(comm.size, dtype=np.int)
-        comm.Allgather([np.array(self.local_dim,dtype=np.int), MPI.INT], [self.local_dim_list, MPI.INT])
+        comm.Allgather([np.array(self.local_dim,dtype=np.int), MPI.INT],\
+            [self.local_dim_list, MPI.INT])
         self.local_dim_offset = np.sum(self.local_dim_list[0:comm.rank])
         
-        self.local_slice = np.array([self.local_start, self.local_end, self.local_length, self.local_dim, self.local_dim_offset], dtype=np.int)
+        self.local_slice = np.array([self.local_start, self.local_end,\
+            self.local_length, self.local_dim, self.local_dim_offset],\
+            dtype=np.int)
         ## collect all local_slices 
         ## [start, stop, length=stop-start, dimension, dimension_offset]
         self.all_local_slices = np.empty((comm.size,5),dtype=np.int)
-        comm.Allgather([np.array((self.local_slice,),dtype=np.int), MPI.INT], [self.all_local_slices, MPI.INT])
+        comm.Allgather([np.array((self.local_slice,),dtype=np.int), MPI.INT],\
+            [self.all_local_slices, MPI.INT])
         
         self.comm = comm
         
@@ -707,7 +752,8 @@ class _fftw_distributor(object):
         ## store it individually. If not, take the data on node 0 and scatter 
         ## it...
         if np.all(data_available_Q):
-            return data[self.local_start:self.local_end].astype(self.dtype, copy=False)    
+            return data[self.local_start:self.local_end].astype(self.dtype,\
+                copy=False)    
         ## ... but only if node 0 has actually data!
         elif data_available_Q[0] == False:# or np.all(data_available_Q==False):
             return np.zeros(self.local_shape, dtype=self.dtype)
@@ -717,30 +763,38 @@ class _fftw_distributor(object):
                 data = np.empty(self.global_shape)            
             if rank == 0:
                 if np.all(data.shape != self.global_shape):
-                    raise TypeError(nifty_core.about._errors.cstring("ERROR: Input data has the wrong shape!"))
+                    raise TypeError(nifty_core.about._errors.cstring(\
+                        "ERROR: Input data has the wrong shape!"))
             ## Scatter the data!            
             _scattered_data = np.zeros(self.local_shape, dtype = self.dtype)
             _dim_list = self.all_local_slices[:,3]
             _dim_offset_list = self.all_local_slices[:,4]
-            #comm.Scatterv([data.astype(np.float64, copy=False), _dim_list, _dim_offset_list, MPI.DOUBLE], [_scattered_data, MPI.DOUBLE], root=0)
-            comm.Scatterv([data, _dim_list, _dim_offset_list, self.mpi_dtype], [_scattered_data, self.mpi_dtype], root=0)
+            comm.Scatterv([data, _dim_list, _dim_offset_list, self.mpi_dtype],\
+                [_scattered_data, self.mpi_dtype], root=0)
             return _scattered_data
         return None
     
-    def _disperse_data_primitive(self, data, data_update, slice_objects, source_rank='all', comm=None):
+    def _disperse_data_primitive(self, data, data_update, slice_objects,\
+                                source_rank='all', comm=None):
         if comm == None:
             comm = self.comm            
-        ## compute the part of the slice which is relevant for the individual node      
+        ## compute the part of the slice which is relevant for the 
+        ## individual node      
         localized_start, localized_stop = self._backshift_and_decycle(
-            slice_objects[0], self.local_start)
-        local_slice = (slice(localized_start,localized_stop,slice_objects[0].step),) + slice_objects[1:]
+            slice_objects[0], self.local_start, self.local_end,\
+                self.global_shape[0])
+        local_slice = (slice(localized_start, localized_stop,\
+                        slice_objects[0].step),) + slice_objects[1:]
         
         ## compute the parameter sets and list for the data splitting
         local_slice_shape = data[local_slice].shape        
         local_affected_data_length = local_slice_shape[0]
         local_affected_data_length_list=np.empty(comm.size, dtype=np.int)        
-        comm.Allgather([np.array(local_affected_data_length, dtype=np.int), MPI.INT], [local_affected_data_length_list, MPI.INT])        
-        local_affected_data_length_offset_list = np.append([0],np.cumsum(local_affected_data_length_list)[:-1])
+        comm.Allgather(\
+            [np.array(local_affected_data_length, dtype=np.int), MPI.INT],\
+            [local_affected_data_length_list, MPI.INT])        
+        local_affected_data_length_offset_list = np.append([0],\
+                            np.cumsum(local_affected_data_length_list)[:-1])
         
         
         if source_rank == 'all':
@@ -750,15 +804,34 @@ class _fftw_distributor(object):
             o = local_affected_data_length_offset_list
             l = local_affected_data_length
             update_slice = (slice(o[r], o[r]+l),) 
-            data[local_slice] = np.array(data_update[update_slice], copy=False).astype(self.dtype)
+            data[local_slice] = np.array(data_update[update_slice],\
+                                    copy=False).astype(self.dtype)
             
         else:
             ## Scatterv the relevant part from the source_rank to the others 
             ## and plug it into data[local_slice]
-            local_affected_data_dim_list= np.array(local_affected_data_length_list) * np.product(local_slice_shape[1:])                    
-            local_affected_data_dim_offset_list = np.append([0],np.cumsum(local_affected_data_dim_list)[:-1])
-            local_dispersed_data = np.zeros(local_slice_shape, dtype=self.dtype)
-            comm.Scatterv([np.array(data_update, copy=False).astype(self.dtype), local_affected_data_dim_list, local_affected_data_dim_offset_list, self.mpi_dtype],
+            
+            ## if the first slice object has a negative step size, the ordering 
+            ## of the Scatterv function must be reversed         
+            order = slice_objects[0].step
+            if order == None:
+                order = 1
+            else:
+                order = np.sign(order)
+
+            local_affected_data_dim_list = \
+                np.array(local_affected_data_length_list) *\
+                    np.product(local_slice_shape[1:])                    
+
+            local_affected_data_dim_offset_list = np.append([0],\
+                np.cumsum(local_affected_data_dim_list[::order])[:-1])[::order]
+                
+            local_dispersed_data = np.zeros(local_slice_shape,\
+                dtype=self.dtype)
+            comm.Scatterv(\
+                [np.array(data_update, copy=False).astype(self.dtype),\
+                    local_affected_data_dim_list,\
+                    local_affected_data_dim_offset_list, self.mpi_dtype],
                           [local_dispersed_data, self.mpi_dtype], 
                           root=source_rank)                            
             data[local_slice] = local_dispersed_data
@@ -766,7 +839,8 @@ class _fftw_distributor(object):
         
     
     
-    def disperse_data(self, data, data_update, slice_objects, comm=None, **kwargs):
+    def disperse_data(self, data, data_update, slice_objects, comm=None,\
+                        **kwargs):
         if comm == None:
             comm = self.comm            
         slice_objects_list = comm.allgather(slice_objects)
@@ -774,13 +848,17 @@ class _fftw_distributor(object):
         if all(x == slice_objects_list[0] for x in slice_objects_list):
             ## in this case, the _disperse_data_primitive can simply be called 
             ##with target_rank = 'all'
-            self._disperse_data_primitive(data=data, data_update=data_update, slice_objects=slice_objects, source_rank='all', comm=comm)
-        ## if the different nodes got different slices, disperse the data individually
+            self._disperse_data_primitive(data=data, data_update=data_update,\
+                slice_objects=slice_objects, source_rank='all', comm=comm)
+        ## if the different nodes got different slices, disperse the data 
+        ## individually
         else:
             i = 0        
             for temp_slices in slice_objects_list:
                 ## make the collect_data call on all nodes            
-                self._disperse_data_primitive(data=data, data_update=data_update, slice_objects=temp_slices, source_rank=i, comm=comm)
+                self._disperse_data_primitive(data=data,\
+                    data_update=data_update, slice_objects=temp_slices,\
+                    source_rank=i, comm=comm)
                 i += 1
                  
         
@@ -789,7 +867,7 @@ class _fftw_distributor(object):
             comm = self.comm            
             
         localized_start, localized_stop = self._backshift_and_decycle(
-            slice_objects[0], self.local_start)
+            slice_objects[0], self.local_start, self.local_end, self.global_shape[0])
         local_slice = (slice(localized_start,localized_stop,slice_objects[0].step),)+slice_objects[1:]
         local_collected_data = np.ascontiguousarray(data[local_slice])
 
@@ -801,8 +879,19 @@ class _fftw_distributor(object):
         collected_data_shape = (collected_data_length,)+local_collected_data.shape[1:]
         local_collected_data_dim_list= np.array(local_collected_data_length_list) * np.product(local_collected_data.shape[1:])        
         
-        local_collected_data_dim_offset_list = np.append([0],np.cumsum(local_collected_data_dim_list)[:-1])
+        ## if the first slice object has a negative step size, the ordering 
+        ## of the Gatherv functions must be reversed         
+        order = slice_objects[0].step
+        if order == None:
+            order = 1
+        else:
+            order = np.sign(order)
+            
+        local_collected_data_dim_offset_list = np.append([0],np.cumsum(local_collected_data_dim_list[::order])[:-1])[::order]
+
+        local_collected_data_dim_offset_list = local_collected_data_dim_offset_list
         collected_data = np.empty(collected_data_shape, dtype=self.dtype)
+        
 
         if target_rank == 'all':
             comm.Allgatherv([local_collected_data, self.mpi_dtype], 
@@ -834,33 +923,82 @@ class _fftw_distributor(object):
         return individual_data
         
     
-    def _backshift_and_decycle(self, slice_object, shift):
-        ## initialize the step and step_size_compensation
+    def _backshift_and_decycle(self, slice_object, shifted_start, shifted_stop, global_length):
+        ## Crop the start value
+        if slice_object.start > global_length-1:
+            slice_object = slice(global_length-1, slice_object.stop,
+                                 slice_object.step)
+                                 
+        ## Reformulate negative indices                                  
+        if slice_object.start < 0 and slice_object.start != None:
+            temp_start = slice_object.start + global_length
+            if temp_start < 0:
+                raise ValueError(nifty_core.about._errors.cstring(\
+                "ERROR: Index is out of bounds!"))
+            slice_object = slice(temp_start, slice_object.stop,\
+            slice_object.step) 
+
+        if slice_object.stop < 0 and slice_object.stop != None:
+            temp_stop = slice_object.stop + global_length
+            if temp_stop < 0:
+                raise ValueError(nifty_core.about._errors.cstring(\
+                "ERROR: Index is out of bounds!"))
+            slice_object = slice(slice_object.start, temp_stop,\
+            slice_object.step) 
+                
+        ## initialize the step
         if slice_object.step == None:
             step = 1
         else:
             step = slice_object.step
-        if step < 1:
-            raise ValueError(nifty_core.about._errors.cstring("ERROR: Negative step-size is not supported!")) 
-        ## calculate the start index
-        if slice_object.start == None:
-            local_start = (-shift)%step ## step size compensation
-        else:
-            local_start = slice_object.start - shift
-            ## if the local_start is negative, pull it up to zero
-            local_start = local_start%step if local_start < 0 else local_start
-        ## calculate the stop index
-        if slice_object.stop == None:
-            local_stop = None
-        else:
-            local_stop = slice_object.stop - shift
-            ## if local_stop is negative, pull it up to zero
-            local_stop = 0 if local_stop < 0 else local_stop
+        
+        if step > 0:
+            shift = shifted_start
+            ## calculate the start index
+            if slice_object.start == None:
+                local_start = (-shift)%step ## step size compensation
+            else:
+                local_start = slice_object.start - shift
+                ## if the local_start is negative, pull it up to zero
+                local_start = local_start%step if local_start < 0 else local_start
+            ## calculate the stop index
+            if slice_object.stop == None:
+                local_stop = None
+            else:
+                local_stop = slice_object.stop - shift
+                ## if local_stop is negative, pull it up to zero
+                local_stop = 0 if local_stop < 0 else local_stop
+                
+        else: # if step < 0
+            step = -step
+            local_length = shifted_stop - shifted_start
+            ## calculate the start index. (Here, local_start > local_stop!)
+            if slice_object.start == None:
+                local_start = (local_length-1) -\
+                    (global_length-shifted_stop)%step #stepsize compensation
+            else:
+                local_start = slice_object.start - shifted_start
+                ## if the local_start is negative, pull it up to zero
+                local_start = 0 if local_start < 0 else local_start                
+                ## if the local_start is greater than the local length, pull
+                ## it down 
+                if local_start > local_length-1:
+                    overhead = local_start - (local_length-1)
+                    overhead = overhead - overhead%(-step)
+                    local_start = local_start - overhead
+            ## calculate the stop index
+            if slice_object.stop == None:
+                local_stop = None
+            else:
+                local_stop = slice_object.stop - shifted_start
+                ## if local_stop is negative, pull it up to zero
+                local_stop = 0 if local_stop < 0 else local_stop    
         ## Note: if start or stop are greater than the array length,
         ## numpy will automatically cut the index value down into the 
         ## array's range 
         return local_start, local_stop        
-        
+    
+    
     def consolidate_data(self, data, target_rank='all', comm = None):
         if comm == None:
             comm = self.comm            
@@ -942,7 +1080,7 @@ class _not_distributor(object):
         elif global_data != None:
             self.dtype = np.array(global_data).dtype.type
             
-        if global_data != None:
+        if global_data != None and np.array(global_data).shape != ():
             self.global_shape = np.array(global_data).shape
         elif global_shape != None:
             self.global_shape = global_shape
@@ -959,13 +1097,13 @@ class _not_distributor(object):
         return [thing,]
         
     def distribute_data(self, data, **kwargs):
-        return np.array(data).astype(self.dtype, copy=False)
+        return np.array(data).astype(self.dtype, copy=False).reshape(self.global_shape)
     
     def disperse_data(self, data, data_update, key, **kwargs):
         data[key] = np.array(data_update, copy=False).astype(self.dtype)
                      
-    def collect_data(self, data, slice_object,  **kwargs):
-        return data[slice_object]
+    def collect_data(self, data, slice_objects,  **kwargs):
+        return data[slice_objects]
         
     def consolidate_data(self, data, **kwargs):
         return data
@@ -984,7 +1122,8 @@ class dtype_converter:
         pre_dict = [
                     #[, MPI_CHAR],
                     #[, MPI_SIGNED_CHAR],
-                    #[, MPI_UNSIGNED_CHAR],                                        
+                    #[, MPI_UNSIGNED_CHAR],
+                    [np.bool, MPI.BYTE],
                     [np.int16, MPI.SHORT],
                     [np.uint16, MPI.UNSIGNED_SHORT],
                     [np.uint32, MPI.UNSIGNED_INT],
@@ -1048,10 +1187,13 @@ if __name__ == '__main__':
     if True:
     #if rank == 0:
         x = np.arange(100).reshape((10,10)).astype(np.int)
-        x = x**2
-        x = x[::-1,::-1] + x
+        #x = x**2
+        #x = x[::-1,::-1] + x
+        
         #print x
         #x = np.arange(3)
+
+
     else:
         x = None
     obj = distributed_data_object(global_data=x, distribution_strategy='fftw')
@@ -1070,12 +1212,13 @@ if __name__ == '__main__':
     #temp_index= (80,80,666)    
     #print ('rank', rank, ' local index: ', temp_index, ' globalized: ', obj.distributor.globalize_index(temp_index)) 
     
-    print obj.argmax_flat()
-    print obj.argmin_flat()
-    """
+
     MPI.COMM_WORLD.Barrier()
+    sl = slice(13,1,-3)
     if rank == 0:    
-        print ('erwuenscht', x[slice(1,10,2)])
+        print ('erwuenscht', x[sl])
+    print obj[sl]
+    """
     sl = slice(1,2+rank,1)
     print ('slice', rank, sl, obj[sl,2])
     print obj[1:5:2,1:3]
@@ -1087,5 +1230,4 @@ if __name__ == '__main__':
         d = [[555, 666],[777,888]]
     obj[sl] = d
     print obj.get_full_data()    
-    """
-   
+   """
diff --git a/nifty_paradict.py b/nifty_paradict.py
index adc1688bad2e4e053b9545a33d440dceb9d4d166..8a17b4dc79b1eb03c2b64203b9c75dc9ad8d45f5 100644
--- a/nifty_paradict.py
+++ b/nifty_paradict.py
@@ -29,7 +29,14 @@ class space_paradict(object):
         self.parameters = {}
         for key in kwargs:
             self[key] = kwargs[key]
-    
+            
+    def __eq__(self, other):
+        return (isinstance(other, self.__class__)
+            and self.__dict__ == other.__dict__)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)    
+
     def __repr__(self):
         return self.parameters.__repr__()
         
diff --git a/rg/fft_rg.py b/rg/fft_rg.py
index d4e6ebd32e73348637da0700aa3f1e5f54c4d096..f4b107b5307d00b47b3d03be1a2737fa6a20b360 100644
--- a/rg/fft_rg.py
+++ b/rg/fft_rg.py
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 
 import numpy as np
-from nifty import nifty_mpi_data
+from nifty.nifty_mpi_data import distributed_data_object
 
 
 # Try to import pyfftw. If this fails fall back to gfft. If this fails fall back to local gfft_rg
@@ -50,7 +50,7 @@ class fft(object):
         ----------
         None
     """
-    def transform(self,field_val,domain,codomain,**kwargs):
+    def transform(self, val, domain, codomain, **kwargs):
         """
             A generic ff-transform function. 
             
@@ -71,9 +71,10 @@ class fft(object):
 if fft_machine == 'pyfftw':
     ## The instances of plan_and_info store the fftw plan and all 
     ## other information needed in order to perform a mpi-fftw transformation            
-    class _fftw_plan_and_info(object):
+    class _fftw_plan_and_info(fft):
         def __init__(self,domain,codomain,fft_fftw_context,**kwargs):
-            self.compute_plan_and_info(domain,codomain,fft_fftw_context,**kwargs)
+            self.compute_plan_and_info(domain, codomain, fft_fftw_context, 
+                                       **kwargs)
             
         def set_plan(self, x):
             self.plan=x                    
@@ -90,19 +91,21 @@ if fft_machine == 'pyfftw':
         def get_codomain_centering_mask(self):
             return self.codomain_centering_mask
         
-        def compute_plan_and_info(self, domain, codomain,fft_fftw_context,**kwargs):
+        def compute_plan_and_info(self, domain, codomain, fft_fftw_context, 
+                                  **kwargs):
             
             self.input_dtype = 'complex128'
             self.output_dtype = 'complex128'
             
-            self.global_input_shape = domain.dim(split=True)   
-            self.global_output_shape = codomain.dim(split=True)
+            self.global_input_shape = domain.shape()   
+            self.global_output_shape = codomain.shape()
             self.fftw_local_size = pyfftw.local_size(self.global_input_shape)
 
-            self.in_zero_centered_dimensions = domain.zerocenter()[::-1]
-            self.out_zero_centered_dimensions = codomain.zerocenter()[::-1]
+            self.in_zero_centered_dimensions = domain.paradict['zerocenter']
+            self.out_zero_centered_dimensions = codomain.paradict['zerocenter']
 
-            self.local_node_dimensions = np.append((self.fftw_local_size[1],),self.global_input_shape[1:])
+            self.local_node_dimensions = np.append((self.fftw_local_size[1],), 
+                                                   self.global_input_shape[1:])
             self.offsetQ = self.fftw_local_size[2]%2
             
             if codomain.fourier == True:
@@ -147,10 +150,12 @@ if fft_machine == 'pyfftw':
             ## to a certain set of (field_val, domain, codomain) sets.
             self.plan_dict = {}
             
-            ## initialize the dictionary which stores the values from get_centering_mask
+            ## initialize the dictionary which stores the values from 
+            ## get_centering_mask
             self.centering_mask_dict = {}      
          
-        def get_centering_mask(self, to_center_input, dimensions_input, offset_input=0):
+        def get_centering_mask(self, to_center_input, dimensions_input, 
+                               offset_input=0):
             """
                 Computes the mask, used to (de-)zerocenter domain and target 
                 fields.
@@ -177,7 +182,8 @@ if fft_machine == 'pyfftw':
             to_center = np.array(to_center_input)
             dimensions = np.array(dimensions_input)  
             
-            if np.all(dimensions == np.array(1)) or np.all(dimensions == np.array([1])):
+            if np.all(dimensions == np.array(1)) or \
+                np.all(dimensions == np.array([1])):
                 return dimensions
             ## The dimensions of size 1 must be sorted out for computing the
             ## centering_mask. The depth of the array will be restored in the 
@@ -199,7 +205,8 @@ if fft_machine == 'pyfftw':
             offset[0] = int(offset_input)
             ## check for dimension match
             if to_center.size != dimensions.size:
-                raise TypeError('The length of the supplied lists does not match.')
+                raise TypeError(\
+                        'The length of the supplied lists does not match.')
 
             ## build up the value memory
             ## compute an identifier for the parameter set
@@ -207,7 +214,13 @@ if fft_machine == 'pyfftw':
             if not temp_id in self.centering_mask_dict:
                 ## use np.tile in order to stack the core alternation scheme 
                 ## until the desired format is constructed. 
-                core = np.fromfunction(lambda *args : (-1)**(np.tensordot(to_center,args+offset.reshape(offset.shape+(1,)*(np.array(args).ndim-1)),1)), (2,)*to_center.size)
+                core = np.fromfunction(
+                    lambda *args : (-1)**\
+                        (np.tensordot(to_center,args + \
+                        offset.reshape(offset.shape + \
+                            (1,)*(np.array(args).ndim - 1)),1)),\
+                    (2,)*to_center.size)
+                    
                 centering_mask = np.tile(core,dimensions//2)           
                 ## for the dimensions of odd size corresponding slices must be added
                 for i in range(centering_mask.ndim):
@@ -215,9 +228,11 @@ if fft_machine == 'pyfftw':
                     if (dimensions%2)[i]==0:
                         continue
                     ## prepare the slice object
-                    temp_slice=(slice(None),)*i + (slice(-2,-1,1),) + (slice(None),)*(centering_mask.ndim -1 - i)
+                    temp_slice = (slice(None),)*i + (slice(-2,-1,1),) +\
+                                 (slice(None),)*(centering_mask.ndim -1 - i)
                     ## append the slice to the centering_mask                    
-                    centering_mask = np.append(centering_mask,centering_mask[temp_slice],axis=i)                
+                    centering_mask = np.append(centering_mask, 
+                                            centering_mask[temp_slice],axis=i)                
                 ## Add depth to the centering_mask where the length of a 
                 ## dimension was one
                 temp_slice = ()
@@ -236,16 +251,17 @@ if fft_machine == 'pyfftw':
             temp_id = (domain.__identifier__(), codomain.__identifier__())
             ## generate the plan_and_info object if not already there                
             if not temp_id in self.plan_dict:
-                self.plan_dict[temp_id]=_fftw_plan_and_info(domain,codomain,self,**kwargs)
+                self.plan_dict[temp_id]=_fftw_plan_and_info(domain, codomain,
+                                                            self, **kwargs)
             return self.plan_dict[temp_id]
         
-        def transform(self,val,domain,codomain, field_val, **kwargs):
+        def transform(self, val, domain, codomain, **kwargs):
             """
                 The pyfftw transform function. 
                 
                 Parameters
                 ----------
-                field_val : distributed_data_object
+                val : distributed_data_object or numpy.ndarray
                     The value-array of the field which is supposed to 
                     be transformed.
                 
@@ -263,32 +279,72 @@ if fft_machine == 'pyfftw':
                 result : np.ndarray
                     Fourier-transformed pendant of the input field.
             """
-            current_plan_and_info=self._get_plan_and_info(domain,codomain,**kwargs)
-            ## Prepare the input data
-            
+            current_plan_and_info=self._get_plan_and_info(domain, codomain, 
+                                                          **kwargs)
+            ## Prepare the environment variables
             local_size = current_plan_and_info.fftw_local_size
             local_start = local_size[2]
             local_end = local_start + local_size[1]
-            val = field_val.get_data(slice(local_start,local_end))
-            val *= current_plan_and_info.get_codomain_centering_mask()
+            
+            ## Prepare the input data
+            ## Case 1: val is a distributed_data_object
+            if isinstance(val, distributed_data_object):
+                return_val = val.copy_empty(global_shape =\
+                    tuple(current_plan_and_info.global_output_shape),
+                    dtype = np.complex128)
+                ## If the distribution strategy of the d2o is fftw, extract 
+                ## the data directly
+                if val.distribution_strategy == 'fftw':
+                    local_val = val.get_local_data()
+                else:
+                    local_val = val.get_data(slice(local_start, local_end))
+            ## Case 2: val is a numpy array carrying the full data
+            else:
+                local_val = val[slice(local_start, local_end)]
+
+            local_val *= current_plan_and_info.get_codomain_centering_mask()
             
             ## Define a abbreviation for the fftw plan                
             p = current_plan_and_info.get_plan()
             ## load the field into the plan
             if p.has_input:
-                p.input_array[:] = val
+                p.input_array[:] = local_val
             ## execute the plan
             p()
-            result = p.output_array*current_plan_and_info.get_domain_centering_mask()
+            result = p.output_array * current_plan_and_info.\
+                                get_domain_centering_mask()
+
             ## renorm the result according to the convention of gfft            
             if current_plan_and_info.direction == 'FFTW_FORWARD':
                 result = result/float(result.size)
             else:
                 result *= float(result.size)
-            ## build a distributed_data_object
-            data_object = nifty_mpi_data.distributed_data_object(global_shape = tuple(current_plan_and_info.global_output_shape), dtype = np.complex128, distribution_strategy='fftw')
-            data_object.set_local_data(data=result)            
-            return data_object.get_full_data()
+                    
+            ## build the return object according to the input val
+            try:
+                if return_val.distribution_strategy == 'fftw':
+                    return_val.set_local_data(data = result)
+                else:
+                    return_val.set_data(data = result, 
+                                        key = slice(local_start, local_end)) 
+                
+                ## If the values living in domain are purely real, the 
+                ## result of the fft is hermitian
+                if domain.paradict['complexity'] == 0:
+                    return_val.hermitian = True
+                    
+            ## In case the input val was not a distributed data obect, the try
+            ## will produce a NameError
+            except(NameError): 
+                return_val = distributed_data_object(
+                    global_shape =\
+                        tuple(current_plan_and_info.global_output_shape), 
+                    dtype = np.complex128,
+                    distribution_strategy='fftw')
+                return_val.set_local_data(data = result)
+                return_val = return_val.get_full_data()
+                        
+            return return_val
             
             
         
@@ -309,7 +365,7 @@ elif fft_machine == 'gfft' or 'gfft_fallback':
                 
                 Parameters
                 ----------
-                val : numpy.ndarray 
+                val : numpy.ndarray or distributed_data_object
                     The value-array of the field which is supposed to 
                     be transformed.
                 
@@ -332,9 +388,43 @@ elif fft_machine == 'gfft' or 'gfft_fallback':
                 ftmachine = "fft"
             else:
                 ftmachine = "ifft"       
+            ## if the input is a distributed_data_object, extract the data
+            if isinstance(val, distributed_data_object):
+                d2oQ = True
+                val = val.get_full_data()
             ## transform and return
             if(domain.datatype==np.float64):
-                return gfft.gfft(val.astype(np.complex128),in_ax=[],out_ax=[],ftmachine=ftmachine,in_zero_center=domain.para[-naxes:].astype(np.bool).tolist(),out_zero_center=codomain.para[-naxes:].astype(np.bool).tolist(),enforce_hermitian_symmetry=bool(codomain.para[naxes]==1),W=-1,alpha=-1,verbose=False)
+                temp = gfft.gfft(val.astype(np.complex128), 
+                                in_ax=[], 
+                                out_ax=[], 
+                                ftmachine=ftmachine, 
+                                in_zero_center=domain.para[-naxes:].\
+                                    astype(np.bool).tolist(), 
+                                out_zero_center=codomain.para[-naxes:].\
+                                    astype(np.bool).tolist(), 
+                                enforce_hermitian_symmetry = \
+                                    bool(codomain.para[naxes]==1),
+                                W=-1,
+                                alpha=-1,
+                                verbose=False)
             else:
-                return gfft.gfft(val,in_ax=[],out_ax=[],ftmachine=ftmachine,in_zero_center=domain.para[-naxes:].astype(np.bool).tolist(),out_zero_center=codomain.para[-naxes:].astype(np.bool).tolist(),enforce_hermitian_symmetry=bool(codomain.para[naxes]==1),W=-1,alpha=-1,verbose=False)
-    
\ No newline at end of file
+                temp = gfft.gfft(val,
+                                in_ax=[],
+                                out_ax=[],
+                                ftmachine=ftmachine,
+                                in_zero_center=domain.para[-naxes:].\
+                                    astype(np.bool).tolist(),
+                                out_zero_center=codomain.para[-naxes:].\
+                                    astype(np.bool).tolist(),
+                                enforce_hermitian_symmetry = \
+                                    bool(codomain.para[naxes]==1),
+                                W=-1,
+                                alpha=-1,
+                                verbose=False)
+            if d2oQ == True:
+                val.set_full_data(temp)
+            else:
+                val = temp
+                
+            return val
+                
\ No newline at end of file
diff --git a/rg/nifty_rg.py b/rg/nifty_rg.py
index c1b51b9e4360be9ed1f2abecd1c38709eaec8ed4..7a964aa6c305df7a4bcdedd2056337e6473d73ed 100644
--- a/rg/nifty_rg.py
+++ b/rg/nifty_rg.py
@@ -43,7 +43,7 @@ from nifty.nifty_core import about,                                          \
                              space,                                          \
                              point_space,                                    \
                              field
-import nifty.nifty_mpi_data
+from nifty.nifty_mpi_data import distributed_data_object
 import nifty.smoothing as gs
 import powerspectrum as gp
 '''
@@ -55,7 +55,6 @@ except(ImportError):
 '''
 import fft_rg
 from nifty_paradict import rg_space_paradict
-  
 
 
 ##-----------------------------------------------------------------------------
@@ -123,7 +122,8 @@ class rg_space(point_space):
     """
     epsilon = 0.0001 ## relative precision for comparisons
 
-    def __init__(self,num,naxes=None,zerocenter=True,hermitian=True,purelyreal=True,dist=None,fourier=False):
+    def __init__(self, num, naxes=None, zerocenter=True, hermitian=True,\
+                purelyreal=True, dist=None, fourier=False):
         """
             Sets the attributes for an rg_space class instance.
 
@@ -153,35 +153,7 @@ class rg_space(point_space):
             -------
             None
         """
-        ## check parameters
-        '''
-        para = np.array([],dtype=np.int)
-        if(np.isscalar(num)):
-            num = np.array([num],dtype=np.int)
-        else:
-            num = np.array(num,dtype=np.int)
-        if(np.any(num%2)): ## module restriction
-            raise ValueError(about._errors.cstring("ERROR: unsupported odd number of grid points."))
-        if(naxes is None):
-            naxes = np.size(num)
-        elif(np.size(num)==1):
-            num = num*np.ones(naxes,dtype=np.int,order='C')
-        elif(np.size(num)!=naxes):
-            raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(num))+" <> "+str(naxes)+" )."))
-        para = np.append(para,num[::-1],axis=None)
-        para = np.append(para,2-(bool(hermitian) or bool(purelyreal))-bool(purelyreal),axis=None) ## {0,1,2}
-        if(np.isscalar(zerocenter)):
-            zerocenter = bool(zerocenter)*np.ones(naxes,dtype=np.int,order='C')
-        else:
-            zerocenter = np.array(zerocenter,dtype=np.bool)
-            if(np.size(zerocenter)==1):
-                zerocenter = zerocenter*np.ones(naxes,dtype=np.int,order='C')
-            elif(np.size(zerocenter)!=naxes):
-                raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(zerocenter))+" <> "+str(naxes)+" )."))
-        para = np.append(para,zerocenter[::-1]*-1,axis=None) ## -1 XOR 0 (centered XOR not)
-
-        self.para = para
-        '''
+
         complexity = 2-(bool(hermitian) or bool(purelyreal))-bool(purelyreal)
         self.paradict = rg_space_paradict(num=num, complexity=complexity, 
                                           zerocenter=zerocenter)        
@@ -201,22 +173,34 @@ class rg_space(point_space):
         if(dist is None):
             dist = 1/np.array(self.paradict['num'], dtype=self.datatype)
         elif(np.isscalar(dist)):
-            dist = self.datatype(dist)*np.ones(naxes,dtype=self.datatype,order='C')
+            dist = self.datatype(dist)*np.ones(naxes,dtype=self.datatype,\
+                                                order='C')
         else:
             dist = np.array(dist,dtype=self.datatype)
-            if(np.size(dist)==1):
+            if(np.size(dist) == 1):
                 dist = dist*np.ones(naxes,dtype=self.datatype,order='C')
             if(np.size(dist)!=naxes):
-                raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(dist))+" <> "+str(naxes)+" )."))
+                raise ValueError(about._errors.cstring(\
+                    "ERROR: size mismatch ( "+str(np.size(dist))+" <> "+\
+                    str(naxes)+" )."))
         if(np.any(dist<=0)):
-            raise ValueError(about._errors.cstring("ERROR: nonpositive distance(s)."))
-        self.vol = np.real(dist)[::-1]
+            raise ValueError(about._errors.cstring(\
+                "ERROR: nonpositive distance(s)."))
+        self.vol = np.real(dist)
 
         self.fourier = bool(fourier)
         
         ## Initializes the fast-fourier-transform machine, which will be used 
         ## to transform the space
         self.fft_machine = fft_rg.fft_factory()
+        
+        ## Initialize the power_indices object which takes care of kindex,
+        ## pindex, rho and the pundex for a given set of parameters
+        if self.fourier:        
+            self.power_indices = gp.power_indices(shape=self.shape(),
+                                dgrid = dist,
+                                zerocentered = self.paradict['zerocenter']
+                                )
 
     @property
     def para(self):
@@ -272,7 +256,8 @@ class rg_space(point_space):
             naxes : int
                 Number of axes of the regular grid.
         """
-        return (np.size(self.para)-1)//2
+#        return (np.size(self.para)-1)//2
+        return len(self.shape())
 
     def zerocenter(self):
         """
@@ -283,7 +268,8 @@ class rg_space(point_space):
             zerocenter : numpy.ndarray
                 Whether the grid is centered on zero for each axis or not.
         """
-        return self.para[-(np.size(self.para)-1)//2:][::-1].astype(np.bool)
+        #return self.para[-(np.size(self.para)-1)//2:][::-1].astype(np.bool)
+        return self.paradict['zerocenter']
 
     def dist(self):
         """
@@ -294,7 +280,10 @@ class rg_space(point_space):
             dist : np.ndarray
                 Distances between two grid points on each axis.
         """
-        return self.vol[::-1]
+        return self.vol
+ 
+    def shape(self):
+        return np.array(self.paradict['num'])
 
     def dim(self,split=False):
         """
@@ -313,10 +302,10 @@ class rg_space(point_space):
                 one-dimensional array with an entry for each axis is returned.
         """
         ## dim = product(n)
-        if(split):
-            return self.para[:(np.size(self.para)-1)//2]
+        if split == True:
+            return self.shape()
         else:
-            return np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
+            return np.prod(self.shape())
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
@@ -332,16 +321,22 @@ class rg_space(point_space):
                 Number of degrees of freedom of the space.
         """
         ## dof ~ dim
-        if(self.para[(np.size(self.para)-1)//2]<2):
-            return np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
+        if self.paradict['complexity'] < 2:
+            return np.prod(self.paradict['num'])
         else:
-            return 2*np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
+            return 2*np.prod(self.paradict['num'])
+
+#        if(self.para[(np.size(self.para)-1)//2]<2):
+#            return np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
+#        else:
+#            return 2*np.prod(self.para[:(np.size(self.para)-1)//2],axis=0,dtype=None,out=None)
 
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
 
-    def enforce_power(self,spec,size=None,**kwargs):
+    def enforce_power(self, spec, size=None, kindex=None, codomain=None,
+                      log=False, nbin=None, binbounds=None):
         """
             Provides a valid power spectrum array from a given object.
 
@@ -366,79 +361,116 @@ class rg_space(point_space):
             codomain : nifty.space, *optional*
                 A compatible codomain for power indexing (default: None).
             log : bool, *optional*
-                Flag specifying if the spectral binning is performed on logarithmic
-                scale or not; if set, the number of used bins is set
-                automatically (if not given otherwise); by default no binning
-                is done (default: None).
+                Flag specifying if the spectral binning is performed on 
+                logarithmic scale or not; if set, the number of used bins is 
+                set automatically (if not given otherwise); by default no 
+                binning is done (default: None).
             nbin : integer, *optional*
-                Number of used spectral bins; if given `log` is set to ``False``;
-                integers below the minimum of 3 induce an automatic setting;
-                by default no binning is done (default: None).
+                Number of used spectral bins; if given `log` is set to 
+                ``False``; iintegers below the minimum of 3 induce an automatic
+                setting; by default no binning is done (default: None).
             binbounds : {list, array}, *optional*
                 User specific inner boundaries of the bins, which are preferred
                 over the above parameters; by default no binning is done
-                (default: None).            vmin : {scalar, list, ndarray, field}, *optional*
-                Lower limit of the uniform distribution if ``random == "uni"``
-                (default: 0).
-
+                (default: None).
         """
-        if(size is None)or(callable(spec)):
-            ## explicit kindex
-            kindex = kwargs.get("kindex",None)
-            if(kindex is None):
-                ## quick kindex
-                if(self.fourier)and(not hasattr(self,"power_indices"))and(len(kwargs)==0):
-                    kindex = gp.nklength(gp.nkdict_fast(self.para[:(np.size(self.para)-1)//2],self.vol,fourier=True))
-                ## implicit kindex
-                else:
-                    try:
-                        self.set_power_indices(**kwargs)
-                    except:
-                        codomain = kwargs.get("codomain",self.get_codomain())
-                        codomain.set_power_indices(**kwargs)
-                        kindex = codomain.power_indices.get("kindex")
-                    else:
-                        kindex = self.power_indices.get("kindex")
-            size = len(kindex)
+        
+        
+        
+        ## Setting up the local variables: kindex 
+        ## The kindex is only necessary if spec is a function or if 
+        ## the size is not set explicitly 
+        if kindex == None and (size == None or callable(spec) == True):
+            ## Determine which space should be used to get the kindex
+            if self.fourier == True:
+                kindex_supply_space = self
+            else:
+                ## Check if the given codomain is compatible with the space  
+                try:                
+                    assert(self.check_codomain(codomain))
+                    kindex_supply_space = codomain
+                except(AssertionError):
+                    about.warnings.cprint("WARNING: Supplied codomain is "+\
+                    "incompatible. Generating a generic codomain. This can "+\
+                    "be expensive!")
+                    kindex_supply_space = self.get_codomain()
+            kindex = kindex_supply_space.\
+                        power_indices.get_index_dict(log=log, nbin=nbin,
+                                                     binbounds=binbounds)\
+                                                     ['kindex']
+        
 
-        if(isinstance(spec,field)):
-            spec = spec.val.astype(self.datatype)
-        elif(callable(spec)):
+        
+        ## Now it's about to extract a powerspectrum from spec
+        ## First of all just extract a numpy array. The shape is cared about
+        ## later.
+                    
+        ## Case 1: spec is a function
+        if callable(spec) == True:
+            ## Try to plug in the kindex array in the function directly            
             try:
-                spec = np.array(spec(kindex),dtype=self.datatype)
+                spec = np.array(spec(kindex), dtype=self.datatype)
             except:
-                raise TypeError(about._errors.cstring("ERROR: invalid power spectra function.")) ## exception in ``spec(kindex)``
-        elif(np.isscalar(spec)):
-            spec = np.array([spec],dtype=self.datatype)
+                ## Second try: Use a vectorized version of the function.
+                ## This is slower, but better than nothing
+                try:
+                    spec = np.vectorize(spec)(kindex)
+                except:
+                    raise TypeError(about._errors.cstring(
+                        "ERROR: invalid power spectra function.")) 
+    
+        ## Case 2: spec is a field:
+        elif isinstance(spec, field):
+            spec = spec[:]
+            spec = np.array(spec, dtype = self.datatype).flatten()
+            
+        ## Case 3: spec is a scalar or something else:
         else:
-            spec = np.array(spec,dtype=self.datatype)
-
-        ## drop imaginary part
-        spec = np.real(spec)
+            spec = np.array(spec, dtype = self.datatype).flatten()
+        
+            
+        ## Make some sanity checks
+        ## Drop imaginary part
+        temp_spec = np.real(spec)
+        try:
+            np.testing.assert_allclose(spec, temp_spec)
+        except(AssertionError):
+            about.warnings.cprint("WARNING: Dropping imaginary part.")
+        spec = temp_spec
+        
         ## check finiteness
-        if(not np.all(np.isfinite(spec))):
+        if not np.all(np.isfinite(spec)):
             about.warnings.cprint("WARNING: infinite value(s).")
+        
         ## check positivity (excluding null)
-        if(np.any(spec<0)):
-            raise ValueError(about._errors.cstring("ERROR: nonpositive value(s)."))
-        elif(np.any(spec==0)):
-            about.warnings.cprint("WARNING: nonpositive value(s).")
-
-        ## extend
-        if(np.size(spec)==1):
-            spec = spec*np.ones(size,dtype=spec.dtype,order='C')
-        ## size check
-        elif(np.size(spec)<size):
-            raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+str(np.size(spec))+" < "+str(size)+" )."))
-        elif(np.size(spec)>size):
-            about.warnings.cprint("WARNING: power spectrum cut to size ( == "+str(size)+" ).")
+        if np.any(spec<0):
+            raise ValueError(about._errors.cstring(
+                                "ERROR: nonpositive value(s)."))
+        if np.any(spec==0):
+            about.warnings.cprint("WARNING: nonpositive value(s).")            
+        
+        ## Set the size parameter        
+        size = len(kindex)
+        
+        ## Fix the size of the spectrum
+        ## If spec is singlevalued, expand it
+        if np.size(spec) == 1:
+            spec = spec*np.ones(size, dtype=spec.dtype, order='C')
+        ## If the size does not fit at all, throw an exception
+        elif np.size(spec) < size:
+            raise ValueError(about._errors.cstring("ERROR: size mismatch ( "+\
+                             str(np.size(spec))+" < "+str(size)+" )."))
+        elif np.size(spec) > size:
+            about.warnings.cprint("WARNING: power spectrum cut to size ( == "+\
+                                str(size)+" ).")
             spec = spec[:size]
-
+        
         return spec
 
+
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
-    def set_power_indices(self,**kwargs):
+    def set_power_indices(self, log=False, nbin=None, binbounds=None):
         """
             Sets the (un)indexing objects for spectral indexing internally.
 
@@ -474,48 +506,106 @@ class rg_space(point_space):
                 If the binning leaves one or more bins empty.
 
         """
-        if(not self.fourier):
-            raise AttributeError(about._errors.cstring("ERROR: power spectra indexing ill-defined."))
-        ## check storage
-        if(hasattr(self,"power_indices")):
-            config = self.power_indices.get("config")
-            ## check configuration
-            redo = False
-            if(config.get("log")!=kwargs.get("log",config.get("log"))):
-                config["log"] = kwargs.get("log")
-                redo = True
-            if(config.get("nbin")!=kwargs.get("nbin",config.get("nbin"))):
-                config["nbin"] = kwargs.get("nbin")
-                redo = True
-            if(np.any(config.get("binbounds")!=kwargs.get("binbounds",config.get("binbounds")))):
-                config["binbounds"] = kwargs.get("binbounds")
-                redo = True
-            if(not redo):
-                return None
-        else:
-            config = {"binbounds":kwargs.get("binbounds",None),"log":kwargs.get("log",None),"nbin":kwargs.get("nbin",None)}
-        ## power indices
-        about.infos.cflush("INFO: setting power indices ...")
-        pindex,kindex,rho = gp.get_power_indices2(self.para[:(np.size(self.para)-1)//2],self.vol,self.para[-((np.size(self.para)-1)//2):].astype(np.bool),fourier=True)
-        ## bin if ...
-        if(config.get("log") is not None)or(config.get("nbin") is not None)or(config.get("binbounds") is not None):
-            pindex,kindex,rho = gp.bin_power_indices(pindex,kindex,rho,**config)
-            ## check binning
-            if(np.any(rho==0)):
-                raise ValueError(about._errors.cstring("ERROR: empty bin(s).")) ## binning too fine
-        ## power undex
-        pundex = np.unique(pindex,return_index=True,return_inverse=False)[1]
-        ## storage
-        self.power_indices = {"config":config,"kindex":kindex,"pindex":pindex,"pundex":pundex,"rho":rho} ## alphabetical
-        about.infos.cprint(" done.")
+
+        about.warnings.cflush("WARNING: set_power_indices is a deprecated"+\
+                                "function. Please use the interface of"+\
+                                "self.power_indices in future!")
+        self.power_indices.set_default(log=log, nbin=nbin, binbounds=binbounds)
         return None
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-    #TODO: Redo casting.
-    def cast(self, x):
-        return self.enforce_values(x)
-        
+
+    def cast(self, x, verbose=False):
+        """
+            Computes valid field values from a given object, trying
+            to translate the given data into a valid form. Thereby it is as 
+            benevolent as possible. 
+
+            Parameters
+            ----------
+            x : {float, numpy.ndarray, nifty.field}
+                Object to be transformed into an array of valid field values.
+
+            Returns
+            -------
+            x : numpy.ndarray, distributed_data_object
+                Array containing the field values, which are compatible to the
+                space.
+
+            Other parameters
+            ----------------
+            verbose : bool, *optional*
+                Whether the method should raise a warning if information is 
+                lost during casting (default: False).
+        """
+        ## Case 1: x is a field
+        if isinstance(x, field):
+            if verbose:
+                ## Check if the domain matches
+                if(self != x.domain):
+                    about.warnings.cflush(\
+                    "WARNING: Getting data from foreign domain!")
+            ## Extract the data, whatever it is, and cast it again
+            return self.cast(x.val)
         
+        ## Case 2: x is a distributed_data_object
+        if isinstance(x, distributed_data_object):
+            ## Check the shape
+            if np.any(x.shape != self.shape()):           
+                ## Check if at least the number of degrees of freedom is equal
+                if x.dim() == self.dim():
+                    ## If the number of dof is equal or 1, use np.reshape...
+                    about.warnings.cflush(\
+                    "WARNING: Trying to reshape the data. This operation is "+\
+                    "expensive as it consolidates the full data!\n")
+                    temp = x.get_full_data()
+                    temp = np.reshape(temp, self.shape())             
+                    ## ... and cast again
+                    return self.cast(temp)
+              
+                else:
+                    raise ValueError(about._errors.cstring(\
+                    "ERROR: Data has incompatible shape!"))
+                    
+            ## Check the datatype
+            if x.dtype != self.datatype:
+                about.warnings.cflush(\
+                "WARNING: Datatypes are uneqal und will be casted! "+\
+                "Potential loss of precision!\n")
+                temp = x.copy_empty(dtype=self.datatype)
+                temp.set_local_data(x.get_local_data())
+                temp.hermitian = x.hermitian
+                x = temp
+            
+            ## Check hermitianity/reality
+            if self.paradict['complexity'] == 0:
+                if x.is_completely_real == False:
+                    about.warnings.cflush(\
+                    "WARNING: Data is not completely real. Imaginary part "+\
+                    "will be discarded!\n")
+                    temp = x.copy_empty()            
+                    temp.set_local_data(np.real(x.get_local_data()))
+                    x = temp
+            
+            elif self.paradict['complexity'] == 1:
+                if x.hermitian == False and about.hermitianize.status:
+                    about.warnings.cflush(\
+                    "WARNING: Data gets hermitianized. This operation is "+\
+                    "extremely expensive\n")
+                    temp = x.copy_empty()            
+                    temp.set_full_data(gp.nhermitianize_fast(x.get_full_data(), 
+                        (False, )*len(x.shape)))
+                    x = temp
+                
+            return x
+                
+        ## Case 3: x is something else
+        ## Use general d2o casting 
+        x = distributed_data_object(x, global_shape=self.shape(),\
+            dtype=self.datatype)        
+        ## Cast the d2o
+        return self.cast(x)
+            
     def enforce_values(self,x,extend=True):
         """
             Computes valid field values from a given object, taking care of
@@ -537,6 +627,8 @@ class rg_space(point_space):
                 Whether a scalar is extented to a constant array or not
                 (default: True).
         """
+        about.warnings.cflush(\
+            "WARNING: enforce_values is deprecated function. Please use self.cast")
         if(isinstance(x,field)):
             if(self==x.domain):
                 if(self.datatype is not x.domain.datatype):
@@ -677,6 +769,9 @@ class rg_space(point_space):
             check : bool
                 Whether or not the given codomain is compatible to the space.
         """
+        if codomain == None:
+            return False
+            
         if(not isinstance(codomain,space)):
             raise TypeError(about._errors.cstring("ERROR: invalid input."))
 
@@ -811,9 +906,9 @@ class rg_space(point_space):
             y : numpy.ndarray
                 Weighted array.
         """
-        x = self.enforce_shape(np.array(x,dtype=self.datatype))
+        x = self.cast(x)
         ## weight
-        return x*np.prod(self.vol,axis=0,dtype=None,out=None)**power
+        return x * np.prod(self.vol, axis=0, dtype=None, out=None)**power
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
     def calc_dot(self, x, y):
@@ -848,7 +943,7 @@ class rg_space(point_space):
 
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
-    def calc_transform(self,x,codomain=None,**kwargs):
+    def calc_transform(self, x, codomain=None, **kwargs):
         """
             Computes the transform of a given array of field values.
 
@@ -865,6 +960,38 @@ class rg_space(point_space):
             Tx : numpy.ndarray
                 Transformed array
         """
+        x = self.cast(x)
+        
+        if(codomain is None):
+            return x 
+        
+        ## Check if the given codomain is suitable for the transformation
+        if (not isinstance(codomain, rg_space)) or \
+                (not self.check_codomain(codomain)):
+            raise ValueError(about._errors.cstring(
+                                "ERROR: unsupported codomain."))
+        
+        if codomain.fourier == True:
+            ## correct for forward fft
+            x = self.calc_weight(x, power=1)
+        else:
+            ## correct for inverse fft
+            x = self.calc_weight(x, power=1)
+            x *= self.dim(split=False)
+        
+        ## Perform the transformation
+        Tx = self.fft_machine.transform(val=x, domain=self, codomain=codomain, 
+                                        **kwargs)
+
+        ## when the target space is purely real, the result of the 
+        ## transformation must be corrected accordingly. Using the casting 
+        ## method of codomain is sufficient
+        Tx = codomain.cast(Tx)
+        
+        return Tx
+
+        """
+
         x = self.enforce_shape(np.array(x,dtype=self.datatype))
 
         if(codomain is None):
@@ -903,7 +1030,7 @@ class rg_space(point_space):
             raise ValueError(about._errors.cstring("ERROR: unsupported transformation."))
 
         return Tx.astype(codomain.datatype)
-
+        """
     ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
     def calc_smooth(self,x,sigma=0,**kwargs):
diff --git a/rg/powerspectrum.py b/rg/powerspectrum.py
index e72485dd0592e60168ddd564fa0de8255ce9309e..1201a3b9117c50ca2b970fe2e2a2944ae38ecae2 100644
--- a/rg/powerspectrum.py
+++ b/rg/powerspectrum.py
@@ -438,6 +438,8 @@ class power_indices(object):
             else:
                 k = kindex
             dk = np.max(k[2:]-k[1:-1]) ## minimal dk
+            print ('k', k)
+            print ('dk', dk)
             if(nbin is None):
                 nbin = int((k[-1]-0.5*(k[2]+k[1]))/dk-0.5) ## maximal nbin
             else:
@@ -446,6 +448,7 @@ class power_indices(object):
             binbounds = np.r_[0.5*(3*k[1]-k[2]),0.5*(k[1]+k[2])+dk*np.arange(nbin-2)]
             if(log):
                 binbounds = np.exp(binbounds)
+            print nbin
         ## reordering
         reorder = np.searchsorted(binbounds,kindex)
         rho_ = np.zeros(len(binbounds)+1,dtype=rho.dtype)
@@ -475,7 +478,7 @@ if __name__ == '__main__':
     comm = MPI.COMM_WORLD
     rank = comm.rank
     size = comm.size
-    p = power_indices((4,4),(1,1), zerocentered=(True,True), nbin = 4)
+    p = power_indices((4,4),(1,1), zerocentered=(True,True), nbin = 5)
     """
     obj = p.default_indices['nkdict']
     for i in np.arange(size):
@@ -1062,13 +1065,14 @@ def nhermitianize_fast(field,zerocentered,special=False):
             index = tuple(ii*maxindex)
             field[index] *= np.sqrt(0.5)
     else: ## regular case
-        field = 0.5*(field+dummy)
+        #field = 0.5*(field+dummy)
+        field = dummy
     ## reshift zerocentered axes
     if(np.any(zerocentered==True)):
         field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
     return field
-
-
+    
+    
 def random_hermitian_pm1(datatype,zerocentered,shape):
 
     """