diff --git a/lm/nifty_lm.py b/lm/nifty_lm.py
index ce2c52ac55052e2cf498b7802814f3cb3de22641..218a48488a5621299bcaf36bb6987ea255b9192c 100644
--- a/lm/nifty_lm.py
+++ b/lm/nifty_lm.py
@@ -60,6 +60,8 @@ if gl is None and gc['lm2gl']:
     gc['lm2gl'] = False
 
 LM_DISTRIBUTION_STRATEGIES = []
+GL_DISTRIBUTION_STRATEGIES = []
+HP_DISTRIBUTION_STRATEGIES = []
 
 
 class lm_space(point_space):
@@ -195,6 +197,17 @@ class lm_space(point_space):
         self.paradict['lmax'] = x[0]
         self.paradict['mmax'] = x[1]
 
+    def _identifier(self):
+        # Extract the identifying parts from the vars(self) dict.
+        temp = [(ii[0],
+                 ((lambda x: tuple(x) if
+                  isinstance(x, np.ndarray) else x)(ii[1])))
+                for ii in vars(self).iteritems()
+                if ii[0] not in ['power_indices', 'comm']]
+        temp.append(('comm', self.comm.__hash__()))
+        # Return the sorted identifiers as a tuple.
+        return tuple(sorted(temp))
+
     def copy(self):
         return lm_space(lmax=self.paradict['lmax'],
                         mmax=self.paradict['mmax'],
@@ -891,7 +904,7 @@ class gl_space(point_space):
             An array containing the pixel sizes.
     """
 
-    def __init__(self, nlat, nlon=None, dtype=np.dtype('float'),
+    def __init__(self, nlat, nlon=None, dtype=np.dtype('float64'),
                  datamodel='np', comm=gc['default_comm']):
         """
             Sets the attributes for a gl_space class instance.
@@ -978,8 +991,10 @@ class gl_space(point_space):
             Since the :py:class:`gl_space` class only supports real-valued
             fields, the number of degrees of freedom is the number of pixels.
         """
-        # dof = dim
-        return self.get_dim(split=split)
+        if split:
+            return self.get_shape()
+        else:
+            return self.get_dim()
 
     def get_meta_volume(self, split=False):
         """
@@ -1100,7 +1115,8 @@ class gl_space(point_space):
                 Supported distributions are:
 
                 - "pm1" (uniform distribution over {+1,-1} or {+1,+i,-1,-i}
-                - "gau" (normal distribution with zero-mean and a given standard
+                - "gau" (normal distribution with zero-mean and a given
+                standard
                     deviation or variance)
                 - "syn" (synthesizes from a given power spectrum)
                 - "uni" (uniform distribution over [vmin,vmax[)
@@ -1111,7 +1127,8 @@ class gl_space(point_space):
             var : float, *optional*
                 Variance, overriding `dev` if both are specified
                 (default: 1).
-            spec : {scalar, list, numpy.array, nifty.field, function}, *optional*
+            spec : {scalar, list, numpy.array, nifty.field, function},
+            *optional*
                 Power spectrum (default: 1).
             codomain : nifty.lm_space, *optional*
                 A compatible codomain for power indexing (default: None).
@@ -1477,10 +1494,6 @@ class gl_space(point_space):
         return "nifty_lm.gl_space instance\n- nlat     = " + str(self.para[0]) + "\n- nlon     = " + str(self.para[1]) + "\n- dtype = numpy." + str(np.result_type(self.dtype))
 
 
-
-
-
-
 class hp_space(point_space):
     """
         ..        __
@@ -1604,8 +1617,10 @@ class hp_space(point_space):
             Since the :py:class:`hp_space` class only supports real-valued
             fields, the number of degrees of freedom is the number of pixels.
         """
-        # dof = dim
-        return self.get_dim(split=split)
+        if split:
+            return self.get_shape()
+        else:
+            return self.get_dim()
 
     def get_meta_volume(self, split=False):
         """
diff --git a/nifty_core.py b/nifty_core.py
index 276816a19b916b4f5305e4eca07e55610f37b4fd..56c7e7149d9794e048bb36df29e3182e2da62808 100644
--- a/nifty_core.py
+++ b/nifty_core.py
@@ -159,10 +159,6 @@ import nifty.nifty_utilities as utilities
 
 POINT_DISTRIBUTION_STRATEGIES = DISTRIBUTION_STRATEGIES['global']
 
-#pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679
-
-
-
 
 class space(object):
     """
@@ -220,7 +216,7 @@ class space(object):
             have the same volume.
     """
 
-    def __init__(self, dtype=np.dtype('float'), datamodel='np'):
+    def __init__(self):
         """
             Sets the attributes for a space class instance.
 
@@ -236,11 +232,6 @@ class space(object):
             None
         """
         self.paradict = space_paradict()
-#        self.datamodel = str(datamodel)
-#        self.dtype = np.dtype(dtype)
-#        self.discrete = False
-#        self.harmonic = False
-#        self.distances = np.real(np.array([1], dtype=self.dtype))
 
     @property
     def para(self):
@@ -830,11 +821,9 @@ class point_space(space):
         # parse dtype
         dtype = np.dtype(dtype)
         if dtype not in [np.dtype('bool'),
-                         np.dtype('int8'),
                          np.dtype('int16'),
                          np.dtype('int32'),
                          np.dtype('int64'),
-                         np.dtype('float16'),
                          np.dtype('float32'),
                          np.dtype('float64'),
                          np.dtype('complex64'),
@@ -946,62 +935,68 @@ class point_space(space):
                         return ind[0]
                 return ind
 
-            translation = {"pos": lambda y: getattr(y, '__pos__')(),
-                           "neg": lambda y: getattr(y, '__neg__')(),
-                           "abs": lambda y: getattr(y, '__abs__')(),
-                           "real": lambda y: getattr(y, 'real'),
-                           "imag": lambda y: getattr(y, 'imag'),
-                           "nanmin": np.nanmin,
-                           "amin": np.amin,
-                           "nanmax": np.nanmax,
-                           "amax": np.amax,
-                           "med": np.median,
-                           "mean": np.mean,
-                           "std": np.std,
-                           "var": np.var,
-                           "argmin": _argmin,
-                           "argmin_flat": np.argmin,
-                           "argmax": _argmax,
-                           "argmax_flat": np.argmax,
-                           "conjugate": np.conjugate,
-                           "sum": np.sum,
-                           "prod": np.prod,
-                           "unique": np.unique,
-                           "copy": np.copy,
-                           "isnan": np.isnan,
-                           "isinf": np.isinf,
-                           "isfinite": np.isfinite,
-                           "nan_to_num": np.nan_to_num,
-                           "None": lambda y: y}
+            translation = {'pos': lambda y: getattr(y, '__pos__')(),
+                           'neg': lambda y: getattr(y, '__neg__')(),
+                           'abs': lambda y: getattr(y, '__abs__')(),
+                           'real': lambda y: getattr(y, 'real'),
+                           'imag': lambda y: getattr(y, 'imag'),
+                           'nanmin': np.nanmin,
+                           'amin': np.amin,
+                           'nanmax': np.nanmax,
+                           'amax': np.amax,
+                           'median': np.median,
+                           'mean': np.mean,
+                           'std': np.std,
+                           'var': np.var,
+                           'argmin': _argmin,
+                           'argmin_flat': np.argmin,
+                           'argmax': _argmax,
+                           'argmax_flat': np.argmax,
+                           'conjugate': np.conjugate,
+                           'sum': np.sum,
+                           'prod': np.prod,
+                           'unique': np.unique,
+                           'copy': np.copy,
+                           'copy_empty': np.empty_like,
+                           'isnan': np.isnan,
+                           'isinf': np.isinf,
+                           'isfinite': np.isfinite,
+                           'nan_to_num': np.nan_to_num,
+                           'all': np.all,
+                           'any': np.any,
+                           'None': lambda y: y}
 
         elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
-            translation = {"pos": lambda y: getattr(y, '__pos__')(),
-                           "neg": lambda y: getattr(y, '__neg__')(),
-                           "abs": lambda y: getattr(y, '__abs__')(),
-                           "real": lambda y: getattr(y, 'real'),
-                           "imag": lambda y: getattr(y, 'imag'),
-                           "nanmin": lambda y: getattr(y, 'nanmin')(),
-                           "amin": lambda y: getattr(y, 'amin')(),
-                           "nanmax": lambda y: getattr(y, 'nanmax')(),
-                           "amax": lambda y: getattr(y, 'amax')(),
-                           "median": lambda y: getattr(y, 'median')(),
-                           "mean": lambda y: getattr(y, 'mean')(),
-                           "std": lambda y: getattr(y, 'std')(),
-                           "var": lambda y: getattr(y, 'var')(),
-                           "argmin": lambda y: getattr(y, 'argmin_nonflat')(),
-                           "argmin_flat": lambda y: getattr(y, 'argmin')(),
-                           "argmax": lambda y: getattr(y, 'argmax_nonflat')(),
-                           "argmax_flat": lambda y: getattr(y, 'argmax')(),
-                           "conjugate": lambda y: getattr(y, 'conjugate')(),
-                           "sum": lambda y: getattr(y, 'sum')(),
-                           "prod": lambda y: getattr(y, 'prod')(),
-                           "unique": lambda y: getattr(y, 'unique')(),
-                           "copy": lambda y: getattr(y, 'copy')(),
-                           "isnan": lambda y: getattr(y, 'isnan')(),
-                           "isinf": lambda y: getattr(y, 'isinf')(),
-                           "isfinite": lambda y: getattr(y, 'isfinite')(),
-                           "nan_to_num": lambda y: getattr(y, 'nan_to_num')(),
-                           "None": lambda y: y}
+            translation = {'pos': lambda y: getattr(y, '__pos__')(),
+                           'neg': lambda y: getattr(y, '__neg__')(),
+                           'abs': lambda y: getattr(y, '__abs__')(),
+                           'real': lambda y: getattr(y, 'real'),
+                           'imag': lambda y: getattr(y, 'imag'),
+                           'nanmin': lambda y: getattr(y, 'nanmin')(),
+                           'amin': lambda y: getattr(y, 'amin')(),
+                           'nanmax': lambda y: getattr(y, 'nanmax')(),
+                           'amax': lambda y: getattr(y, 'amax')(),
+                           'median': lambda y: getattr(y, 'median')(),
+                           'mean': lambda y: getattr(y, 'mean')(),
+                           'std': lambda y: getattr(y, 'std')(),
+                           'var': lambda y: getattr(y, 'var')(),
+                           'argmin': lambda y: getattr(y, 'argmin_nonflat')(),
+                           'argmin_flat': lambda y: getattr(y, 'argmin')(),
+                           'argmax': lambda y: getattr(y, 'argmax_nonflat')(),
+                           'argmax_flat': lambda y: getattr(y, 'argmax')(),
+                           'conjugate': lambda y: getattr(y, 'conjugate')(),
+                           'sum': lambda y: getattr(y, 'sum')(),
+                           'prod': lambda y: getattr(y, 'prod')(),
+                           'unique': lambda y: getattr(y, 'unique')(),
+                           'copy': lambda y: getattr(y, 'copy')(),
+                           'copy_empty': lambda y: getattr(y, 'copy_empty')(),
+                           'isnan': lambda y: getattr(y, 'isnan')(),
+                           'isinf': lambda y: getattr(y, 'isinf')(),
+                           'isfinite': lambda y: getattr(y, 'isfinite')(),
+                           'nan_to_num': lambda y: getattr(y, 'nan_to_num')(),
+                           'all': lambda y: getattr(y, 'all')(),
+                           'any': lambda y: getattr(y, 'any')(),
+                           'None': lambda y: y}
         else:
             raise NotImplementedError(about._errors.cstring(
                 "ERROR: function is not implemented for given datamodel."))
@@ -1010,28 +1005,28 @@ class point_space(space):
 
     def binary_operation(self, x, y, op='None', cast=0):
 
-        translation = {"add": lambda z: getattr(z, '__add__'),
-                       "radd": lambda z: getattr(z, '__radd__'),
-                       "iadd": lambda z: getattr(z, '__iadd__'),
-                       "sub": lambda z: getattr(z, '__sub__'),
-                       "rsub": lambda z: getattr(z, '__rsub__'),
-                       "isub": lambda z: getattr(z, '__isub__'),
-                       "mul": lambda z: getattr(z, '__mul__'),
-                       "rmul": lambda z: getattr(z, '__rmul__'),
-                       "imul": lambda z: getattr(z, '__imul__'),
-                       "div": lambda z: getattr(z, '__div__'),
-                       "rdiv": lambda z: getattr(z, '__rdiv__'),
-                       "idiv": lambda z: getattr(z, '__idiv__'),
-                       "pow": lambda z: getattr(z, '__pow__'),
-                       "rpow": lambda z: getattr(z, '__rpow__'),
-                       "ipow": lambda z: getattr(z, '__ipow__'),
-                       "ne": lambda z: getattr(z, '__ne__'),
-                       "lt": lambda z: getattr(z, '__lt__'),
-                       "le": lambda z: getattr(z, '__le__'),
-                       "eq": lambda z: getattr(z, '__eq__'),
-                       "ge": lambda z: getattr(z, '__ge__'),
-                       "gt": lambda z: getattr(z, '__gt__'),
-                       "None": lambda z: lambda u: u}
+        translation = {'add': lambda z: getattr(z, '__add__'),
+                       'radd': lambda z: getattr(z, '__radd__'),
+                       'iadd': lambda z: getattr(z, '__iadd__'),
+                       'sub': lambda z: getattr(z, '__sub__'),
+                       'rsub': lambda z: getattr(z, '__rsub__'),
+                       'isub': lambda z: getattr(z, '__isub__'),
+                       'mul': lambda z: getattr(z, '__mul__'),
+                       'rmul': lambda z: getattr(z, '__rmul__'),
+                       'imul': lambda z: getattr(z, '__imul__'),
+                       'div': lambda z: getattr(z, '__div__'),
+                       'rdiv': lambda z: getattr(z, '__rdiv__'),
+                       'idiv': lambda z: getattr(z, '__idiv__'),
+                       'pow': lambda z: getattr(z, '__pow__'),
+                       'rpow': lambda z: getattr(z, '__rpow__'),
+                       'ipow': lambda z: getattr(z, '__ipow__'),
+                       'ne': lambda z: getattr(z, '__ne__'),
+                       'lt': lambda z: getattr(z, '__lt__'),
+                       'le': lambda z: getattr(z, '__le__'),
+                       'eq': lambda z: getattr(z, '__eq__'),
+                       'ge': lambda z: getattr(z, '__ge__'),
+                       'gt': lambda z: getattr(z, '__gt__'),
+                       'None': lambda z: lambda u: u}
 
         if (cast & 1) != 0:
             x = self.cast(x)
@@ -1069,7 +1064,7 @@ class point_space(space):
     def get_shape(self):
         return (self.paradict['num'],)
 
-    def get_dim(self, split=False):
+    def get_dim(self):
         """
             Computes the dimension of the space, i.e.\  the number of points.
 
@@ -1084,10 +1079,7 @@ class point_space(space):
             dim : {int, numpy.ndarray}
                 Dimension(s) of the space.
         """
-        if split:
-            return self.get_shape()
-        else:
-            return np.prod(self.get_shape())
+        return np.prod(self.get_shape())
 
     def get_dof(self, split=False):
         """
@@ -1100,11 +1092,15 @@ class point_space(space):
             dof : int
                 Number of degrees of freedom of the space.
         """
-        pre_dof = self.get_dim(split=split)
-        if issubclass(self.dtype.type, np.complexfloating):
-            return pre_dof * 2
+        if split:
+            dof = self.get_shape()
+            if issubclass(self.dtype.type, np.complexfloating):
+                dof = tuple(np.array(dof)*2)
         else:
-            return pre_dof
+            dof = self.get_dim()
+            if issubclass(self.dtype.type, np.complexfloating):
+                dof = dof * 2
+        return dof
 
     def get_vol(self, split=False):
         if split:
@@ -1139,7 +1135,7 @@ class point_space(space):
             mol = self.cast(1, dtype=np.dtype('float'))
             return self.calc_weight(mol, power=1)
 
-    def cast(self, x, dtype=None, **kwargs):
+    def cast(self, x=None, dtype=None, **kwargs):
         if dtype is not None:
             dtype = np.dtype(dtype)
 
@@ -1231,7 +1227,7 @@ class point_space(space):
             if to_copy:
                 temp = x.copy_empty(dtype=dtype,
                                     distribution_strategy=self.datamodel)
-                temp.set_local_data(x.get_local_data())
+                temp.inject((slice(None),), x, (slice(None),))
                 temp.hermitian = x.hermitian
                 x = temp
 
@@ -1518,23 +1514,23 @@ class point_space(space):
             return self.cast(0)
 
         if self.datamodel == 'np':
-            if arg[0] == "pm1":
+            if arg['random'] == "pm1":
                 x = random.pm1(dtype=self.dtype,
                                shape=self.get_shape())
-            elif arg[0] == "gau":
+            elif arg['random'] == "gau":
                 x = random.gau(dtype=self.dtype,
                                shape=self.get_shape(),
-                               mean=None,
-                               dev=arg[2],
-                               var=arg[3])
-            elif arg[0] == "uni":
+                               mean=arg['mean'],
+                               std=arg['std'])
+            elif arg['random'] == "uni":
                 x = random.uni(dtype=self.dtype,
                                shape=self.get_shape(),
-                               vmin=arg[1],
-                               vmax=arg[2])
+                               vmin=arg['vmin'],
+                               vmax=arg['vmax'])
             else:
                 raise KeyError(about._errors.cstring(
-                    "ERROR: unsupported random key '" + str(arg[0]) + "'."))
+                    "ERROR: unsupported random key '" +
+                    str(arg['random']) + "'."))
             return x
 
         elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
@@ -1543,46 +1539,34 @@ class point_space(space):
                                              dtype=self.dtype)
 
             # Case 1: uniform distribution over {-1,+1}/{1,i,-1,-i}
-            if arg[0] == 'pm1':
+            if arg['random'] == 'pm1':
                 sample.apply_generator(lambda s: random.pm1(dtype=self.dtype,
                                                             shape=s))
 
             # Case 2: normal distribution with zero-mean and a given standard
             #         deviation or variance
-            elif arg[0] == 'gau':
-                var = arg[3]
-                if np.isscalar(var) or var is None:
-                    processed_var = var
+            elif arg['random'] == 'gau':
+                std = arg['std']
+                if np.isscalar(std) or std is None:
+                    processed_std = std
                 else:
                     try:
-                        processed_var = sample.distributor.\
-                            extract_local_data(var)
+                        processed_std = sample.distributor.\
+                                                        extract_local_data(std)
                     except(AttributeError):
-                        processed_var = var
+                        processed_std = std
 
                 sample.apply_generator(lambda s: random.gau(dtype=self.dtype,
                                                             shape=s,
-                                                            mean=arg[1],
-                                                            dev=arg[2],
-                                                            var=processed_var))
+                                                            mean=arg['mean'],
+                                                            std=processed_std))
 
             # Case 3: uniform distribution
-            elif arg[0] == 'gau':
-                var = arg[3]
-                if np.isscalar(var) == True or var is None:
-                    processed_var = var
-                else:
-                    try:
-                        processed_var = sample.distributor.extract_local_data(
-                            var)
-                    except(AttributeError):
-                        processed_var = var
-
-                sample.apply_generator(lambda s: random.gau(dtype=self.dtype,
+            elif arg['random'] == 'uni':
+                sample.apply_generator(lambda s: random.uni(dtype=self.dtype,
                                                             shape=s,
-                                                            mean=arg[1],
-                                                            dev=arg[2],
-                                                            var=processed_var))
+                                                            vmin=arg['vmin'],
+                                                            vmax=arg['vmax']))
             return sample
 
         else:
@@ -2966,7 +2950,7 @@ class field(object):
     def nanmax(self, **kwargs):
         return self._unary_helper(self.get_val(), op='nanmax', **kwargs)
 
-    def med(self, **kwargs):
+    def median(self, **kwargs):
         """
             Returns the median of the field values.
 
diff --git a/nifty_random.py b/nifty_random.py
index 85ca303b3efb2d29825ea37079b6d6018c1750f8..17edc6ef149f0e13267e184a8b9fe453b0a820e9 100644
--- a/nifty_random.py
+++ b/nifty_random.py
@@ -122,13 +122,14 @@ class random(object):
             return None
 
         if key == "pm1":
-            return [key]
+            return {'random': key}
 
         elif key == "gau":
             mean = kwargs.get('mean', None)
-            dev = kwargs.get('dev', None)
-            var = kwargs.get('var', None)
-            return [key, mean, dev, var]
+            std = kwargs.get('std', None)
+            return {'random': key,
+                    'mean': mean,
+                    'std': std}
 
         elif key == "syn":
             pindex = kwargs.get('pindex', None)
@@ -162,12 +163,20 @@ class random(object):
                                                  size=size,
                                                  kindex=kindex)
 
-            return [key, spec, kpack, harmonic_domain, log, nbin, binbounds]
+            return {'random': key,
+                    'spec': spec,
+                    'kpack': kpack,
+                    'harmonic_domain': harmonic_domain,
+                    'log': log,
+                    'nbin': nbin,
+                    'binbounds': binbounds}
 
         elif key == "uni":
-            vmin = domain.dtype(kwargs.get('vmin', 0))
-            vmax = domain.dtype(kwargs.get('vmax', 1))
-            return [key, vmin, vmax]
+            vmin = domain.dtype.type(kwargs.get('vmin', 0))
+            vmax = domain.dtype.type(kwargs.get('vmax', 1))
+            return {'random': key,
+                    'vmin': vmin,
+                    'vmax': vmax}
 
         else:
             raise KeyError(about._errors.cstring(
@@ -196,7 +205,7 @@ class random(object):
         """
         size = np.prod(shape, axis=0, dtype=np.dtype('int'), out=None)
 
-        if(issubclass(dtype.type, np.complexfloating)):
+        if issubclass(dtype.type, np.complexfloating):
             x = np.array([1 + 0j, 0 + 1j, -1 + 0j, 0 - 1j],
                          dtype=dtype)[np.random.randint(4,
                                                         high=None,
@@ -204,12 +213,12 @@ class random(object):
         else:
             x = 2 * np.random.randint(2, high=None, size=size) - 1
 
-        return x.astype(dtype).reshape(shape, order='C')
+        return x.astype(dtype).reshape(shape)
 
     # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
     @staticmethod
-    def gau(dtype=np.dtype('float64'), shape=1, mean=None, dev=None, var=None):
+    def gau(dtype=np.dtype('float64'), shape=(1,), mean=None, std=None):
         """
             Generates random field values according to a normal distribution.
 
@@ -239,44 +248,36 @@ class random(object):
                 `shape`.
 
         """
-        size = np.prod(shape, axis=0, dtype=np.dtype('int'), out=None)
+        size = np.prod(shape)
 
-        if(issubclass(dtype.type, np.complexfloating)):
-            x = np.empty(size, dtype=dtype, order='C')
+        if issubclass(dtype.type, np.complexfloating):
+            x = np.empty(size, dtype=dtype)
             x.real = np.random.normal(loc=0, scale=np.sqrt(0.5), size=size)
             x.imag = np.random.normal(loc=0, scale=np.sqrt(0.5), size=size)
         else:
             x = np.random.normal(loc=0, scale=1, size=size)
 
-        if(var is not None):
-            if(np.size(var) == 1):
-                x *= np.sqrt(np.abs(var))
-            elif(np.size(var) == size):
-                x *= np.sqrt(np.absolute(var).flatten(order='C'))
-            else:
-                raise ValueError(about._errors.cstring(
-                    "ERROR: dimension mismatch ( " + str(np.size(var)) +
-                    " <> " + str(size) + " )."))
-        elif(dev is not None):
-            if(np.size(dev) == 1):
-                x *= np.abs(dev)
-            elif(np.size(dev) == size):
-                x *= np.absolute(dev).flatten(order='C')
+        if std is not None:
+            if np.size(std) == 1:
+                x *= np.abs(std)
+            elif np.size(std) == size:
+                x *= np.absolute(std).flatten()
             else:
                 raise ValueError(about._errors.cstring(
-                    "ERROR: dimension mismatch ( " + str(np.size(dev)) +
+                    "ERROR: dimension mismatch ( " + str(np.size(std)) +
                     " <> " + str(size) + " )."))
-        if(mean is not None):
-            if(np.size(mean) == 1):
+
+        if mean is not None:
+            if np.size(mean) == 1:
                 x += mean
-            elif(np.size(mean) == size):
+            elif np.size(mean) == size:
                 x += np.array(mean).flatten(order='C')
             else:
                 raise ValueError(about._errors.cstring(
                     "ERROR: dimension mismatch ( " + str(np.size(mean)) +
                     " <> " + str(size) + " )."))
 
-        return x.astype(dtype).reshape(shape, order='C')
+        return x.astype(dtype).reshape(shape)
 
     # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 
diff --git a/rg/nifty_rg.py b/rg/nifty_rg.py
index 469d0a3f2975c61ac88d086e65772fc65cb299cf..0bcf9e2faad2bf025fc1f76c9aa7d9e4554e559a 100644
--- a/rg/nifty_rg.py
+++ b/rg/nifty_rg.py
@@ -255,8 +255,8 @@ class rg_space(point_space):
         casted_x = super(rg_space, self)._cast_to_d2o(x=x,
                                                       dtype=dtype,
                                                       **kwargs)
-        if hermitianize and self.paradict['complexity'] == 1 and \
-           not casted_x.hermitian:
+        if x is not None and hermitianize and \
+           self.paradict['complexity'] == 1 and not casted_x.hermitian:
             about.warnings.cflush(
                  "WARNING: Data gets hermitianized. This operation is " +
                  "extremely expensive\n")
@@ -268,7 +268,7 @@ class rg_space(point_space):
         casted_x = super(rg_space, self)._cast_to_np(x=x,
                                                      dtype=dtype,
                                                      **kwargs)
-        if hermitianize and self.paradict['complexity'] == 1:
+        if x is not None and hermitianize and self.paradict['complexity'] == 1:
             about.warnings.cflush(
                  "WARNING: Data gets hermitianized. This operation is " +
                  "extremely expensive\n")
@@ -539,98 +539,76 @@ class rg_space(point_space):
             vmax : float, *optional*
                 Upper limit for a uniform distribution (default: 1).
         """
-        # TODO: Without hermitianization the random-methods from pointspace
-        # could be used.
-
         # Parse the keyword arguments
         arg = random.parse_arguments(self, **kwargs)
 
-        # Prepare the empty distributed_data_object
-        sample = distributed_data_object(global_shape=self.get_shape(),
-                                         dtype=self.dtype)
-
-        # Should the output be hermitianized? This does not depend on the
-        # hermitianize boolean in about, as it would yield in wrong,
-        # not recoverable results
-
-        hermitianizeQ = self.paradict['complexity']
+        # Should the output be hermitianized?
+        hermitianizeQ = (self.paradict['complexity'] == 1)
 
         # Case 1: uniform distribution over {-1,+1}/{1,i,-1,-i}
-        if arg[0] == 'pm1' and not hermitianizeQ:
-            sample.apply_generator(lambda s: random.pm1(dtype=self.dtype,
-                                                        shape=s))
+        if arg['random'] == 'pm1' and not hermitianizeQ:
+            sample = super(rg_space, self).get_random_values(**arg)
 
-        elif arg[0] == 'pm1' and hermitianizeQ:
+        elif arg['random'] == 'pm1' and hermitianizeQ:
             sample = self.get_random_values(random='uni', vmin=-1, vmax=1)
-            local_data = sample.get_local_data()
+
             if issubclass(sample.dtype.type, np.complexfloating):
-                temp_data = local_data.copy()
-                local_data[temp_data.real >= 0.5] = 1
-                local_data[(temp_data.real >= 0) * (temp_data.real < 0.5)] = -1
-                local_data[(temp_data.real < 0) * (temp_data.imag >= 0)] = 1j
-                local_data[(temp_data.real < 0) * (temp_data.imag < 0)] = -1j
+                temp_data = sample.copy()
+                sample[temp_data.real >= 0.5] = 1
+                sample[(temp_data.real >= 0) * (temp_data.real < 0.5)] = -1
+                sample[(temp_data.real < 0) * (temp_data.imag >= 0)] = 1j
+                sample[(temp_data.real < 0) * (temp_data.imag < 0)] = -1j
             else:
-                local_data[local_data >= 0] = 1
-                local_data[local_data < 0] = -1
-            sample.set_local_data(local_data)
+                sample[sample >= 0] = 1
+                sample[sample < 0] = -1
 
         # Case 2: normal distribution with zero-mean and a given standard
         #         deviation or variance
-        elif arg[0] == 'gau':
-            var = arg[3]
-            if np.isscalar(var) == True or var is None:
-                processed_var = var
-            else:
-                try:
-                    processed_var = sample.distributor.extract_local_data(var)
-                except(AttributeError):
-                    processed_var = var
-
-            sample.apply_generator(lambda s: random.gau(dtype=self.dtype,
-                                                        shape=s,
-                                                        mean=arg[1],
-                                                        dev=arg[2],
-                                                        var=processed_var))
+        elif arg['random'] == 'gau':
+            sample = super(rg_space, self).get_random_values(**arg)
 
             if hermitianizeQ:
                 sample = utilities.hermitianize(sample)
 
         # Case 3: uniform distribution
-        elif arg[0] == "uni" and not hermitianizeQ:
-            sample.apply_generator(lambda s: random.uni(dtype=self.dtype,
-                                                        shape=s,
-                                                        vmin=arg[1],
-                                                        vmax=arg[2]))
+        elif arg['random'] == "uni" and not hermitianizeQ:
+            sample = super(rg_space, self).get_random_values(**arg)
 
-        elif arg[0] == "uni" and hermitianizeQ:
+        elif arg['random'] == "uni" and hermitianizeQ:
             # For a hermitian uniform sample, generate a gaussian one
             # and then convert it to a uniform one
             sample = self.get_random_values(random='gau')
             # Use the cummulative of the gaussian, the error function in order
             # to transform it to a uniform distribution.
             if issubclass(sample.dtype.type, np.complexfloating):
-                def temp_func(x):
+                def temp_erf(x):
                     return erf(x.real) + 1j * erf(x.imag)
             else:
-                def temp_func(x):
+                def temp_erf(x):
                     return erf(x / np.sqrt(2))
-            sample.apply_scalar_function(function=temp_func,
-                                         inplace=True)
+
+            if self.datamodel == 'np':
+                sample = temp_erf(sample)
+            elif self.datamodel in RG_DISTRIBUTION_STRATEGIES:
+                sample.apply_scalar_function(function=temp_erf, inplace=True)
+            else:
+                raise NotImplementedError(about._errors.cstring(
+                    "ERROR: function is not implemented for given datamodel."))
 
             # Shift and stretch the uniform distribution into the given limits
             # sample = (sample + 1)/2 * (vmax-vmin) + vmin
-            vmin = arg[1]
-            vmax = arg[2]
+            vmin = arg['vmin']
+            vmax = arg['vmax']
             sample *= (vmax - vmin) / 2.
             sample += 1 / 2. * (vmax + vmin)
 
-        elif(arg[0] == "syn"):
-            spec = arg[1]
-            kpack = arg[2]
-            harmonic_domain = arg[3]
-            log = arg[4]
-            nbin = arg[5]
-            binbounds = arg[6]
+        elif(arg['random'] == "syn"):
+            spec = arg['spec']
+            kpack = arg['kpack']
+            harmonic_domain = arg['harmonic_domain']
+            log = arg['log']
+            nbin = arg['nbin']
+            binbounds = arg['binbounds']
             # Check whether there is a kpack available or not.
             # kpack is only used for computing kdict and extracting kindex
             # If not, take kdict and kindex from the fourier_domain
@@ -656,14 +634,9 @@ class rg_space(point_space):
                 # -> simply generate a random field in fourier space and
                 # weight the entries accordingly to the powerspectrum
                 if self.paradict['complexity'] == 0:
-                    # set up the sample object. Overwrite the default from
-                    # above to be sure, that the distribution strategy matches
-                    # with the one from kdict
-                    sample = kdict.copy_empty(dtype=self.dtype)
-                    # set up and apply the random number generator
-                    sample.apply_generator(lambda s: np.random.normal(
-                                                       loc=0, scale=1, size=s))
-
+                    sample = self.get_random_values(random='gau',
+                                                    mean=0,
+                                                    std=1)
                 # subcase 2: self is hermitian but probably complex
                 # -> generate a real field (in position space) and transform
                 # it to harmonic space -> field in harmonic space is
@@ -671,14 +644,9 @@ class rg_space(point_space):
                 # powerspectrum.
                 elif self.paradict['complexity'] == 1:
                     temp_codomain = self.get_codomain()
-                    # set up the sample object. Overwrite the default from
-                    # above to be sure, that the distribution strategy matches
-                    # with the one from kdict
-                    sample = kdict.copy_empty(
-                        dtype=temp_codomain.dtype)
-                    # set up and apply the random number generator
-                    sample.apply_generator(lambda s: np.random.normal(
-                                                       loc=0, scale=1, size=s))
+                    sample = temp_codomain.get_random_values(random='gau',
+                                                             mean=0,
+                                                             std=1)
 
                     # In order to get the normalisation right, the sqrt
                     # of self.dim must be divided out.
@@ -695,37 +663,34 @@ class rg_space(point_space):
 
                     # ensure that the kdict and the harmonic_sample have the
                     # same distribution strategy
-                    assert(kdict.distribution_strategy ==
-                           sample.distribution_strategy)
+                    try:
+                        assert(kdict.distribution_strategy ==
+                               sample.distribution_strategy)
+                    except AttributeError:
+                        pass
 
                 # subcase 3: self is fully complex
                 # -> generate a complex random field in harmonic space and
                 # weight the modes accordingly to the powerspectrum
                 elif self.paradict['complexity'] == 2:
-                    # set up the sample object. Overwrite the default from
-                    # above to be sure, that the distribution strategy matches
-                    # with the one from kdict
-                    sample = kdict.copy_empty(dtype=self.dtype)
-
-                    # set up the random number generator
-                    def temp_gen(s):
-                        result = (np.random.normal(loc=0,
-                                                   scale=1 / np.sqrt(2),
-                                                   size=s) +
-                                  np.random.normal(loc=0,
-                                                   scale=1 / np.sqrt(2),
-                                                   size=s) * 1.j)
-                        return result
-                    # apply the random number generator
-                    sample.apply_generator(temp_gen)
-
+                    sample = self.get_random_values(random='gau',
+                                                    mean=0,
+                                                    std=1)
                 # apply the powerspectrum renormalization
-                # therefore extract the local data from kdict
-                local_kdict = kdict.get_local_data()
-                rescaler = np.sqrt(
-                    spec[np.searchsorted(kindex, local_kdict)])
-                sample.apply_scalar_function(lambda x: x * rescaler,
-                                             inplace=True)
+                if self.datamodel == 'np':
+                    rescaler = np.sqrt(spec[np.searchsorted(kindex, kdict)])
+                    sample *= rescaler
+                elif self.datamodel in RG_DISTRIBUTION_STRATEGIES:
+                    # extract the local data from kdict
+                    local_kdict = kdict.get_local_data()
+                    rescaler = np.sqrt(
+                        spec[np.searchsorted(kindex, local_kdict)])
+                    sample.apply_scalar_function(lambda x: x * rescaler,
+                                                 inplace=True)
+                else:
+                    raise NotImplementedError(about._errors.cstring(
+                        "ERROR: function is not implemented for given " +
+                        "datamodel."))
             # Case 2: self is a position space
             else:
                 # get a suitable codomain
@@ -754,27 +719,27 @@ class rg_space(point_space):
 
                 # Get a hermitian/real/complex sample in harmonic space from
                 # the codomain
-                sample = temp_codomain.get_random_values(
-                    random='syn',
-                    pindex=kpack[0],
-                    kindex=kpack[1],
-                    spec=spec,
-                    codomain=self,
-                    log=log,
-                    nbin=nbin,
-                    binbounds=binbounds
-                )
+                sample = temp_codomain.get_random_values(random='syn',
+                                                         pindex=kpack[0],
+                                                         kindex=kpack[1],
+                                                         spec=spec,
+                                                         codomain=self,
+                                                         log=log,
+                                                         nbin=nbin,
+                                                         binbounds=binbounds)
 
                 # Perform a fourier transform
-                sample = temp_codomain.calc_transform(sample,
-                                                      codomain=self)
+                sample = temp_codomain.calc_transform(sample, codomain=self)
 
             if self.paradict['complexity'] == 1:
-                sample.hermitian = True
+                try:
+                    sample.hermitian = True
+                except AttributeError:
+                    pass
 
         else:
             raise KeyError(about._errors.cstring(
-                "ERROR: unsupported random key '" + str(arg[0]) + "'."))
+                "ERROR: unsupported random key '" + str(arg['random']) + "'."))
 
         return sample
 
diff --git a/test/test_nifty_mpi_data.py b/test/test_nifty_mpi_data.py
index 10e15e08e5c73785b0ed91c7a74aad8ab6340194..243e9cd8bda09da7128b2f441e4fddd4f929dfca 100644
--- a/test/test_nifty_mpi_data.py
+++ b/test/test_nifty_mpi_data.py
@@ -1455,10 +1455,10 @@ class Test_contractions(unittest.TestCase):
     @parameterized.expand(
         itertools.product([np.dtype('int'), np.dtype('float'),
                            np.dtype('complex')],
+                          [(0,), (4, 4)],
                           all_distribution_strategies),
         testcase_func_name=custom_name_func)
-    def test_vdot(self, dtype, distribution_strategy):
-        global_shape = (4, 4)
+    def test_vdot(self, dtype, global_shape, distribution_strategy):
         (a, obj) = generate_data(global_shape, dtype,
                                  distribution_strategy)
         assert_equal(obj.vdot(2 * obj), np.vdot(a, 2 * a))
@@ -1467,20 +1467,34 @@ class Test_contractions(unittest.TestCase):
 ###############################################################################
 
     @parameterized.expand(
-        itertools.product(['min', 'amin', 'nanmin', 'max', 'amax', 'nanmax',
-                           'sum', 'prod', 'mean', 'var', 'std', 'median'],
+        itertools.product(['sum', 'prod', 'mean', 'var', 'std', 'median'],
                           all_datatypes,
+                          [(0,), (6, 6)],
                           all_distribution_strategies),
         testcase_func_name=custom_name_func)
-    def test_compatible_contractions(self, function, dtype,
-                                     distribution_strategy):
-        global_shape = (6, 6)
+    def test_compatible_contractions_with_zeros(self, function, dtype,
+                                                global_shape,
+                                                distribution_strategy):
+        (a, obj) = generate_data(global_shape, dtype,
+                                 distribution_strategy,
+                                 strictly_positive=True)
+        assert_almost_equal(getattr(obj, function)(), getattr(np, function)(a),
+                            decimal=4)
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(['min', 'amin', 'nanmin', 'max', 'amax', 'nanmax'],
+                          all_datatypes,
+                          [(6, 6)],
+                          all_distribution_strategies),
+        testcase_func_name=custom_name_func)
+    def test_compatible_contractions_without_zeros(self, function, dtype,
+                                                   global_shape,
+                                                   distribution_strategy):
         (a, obj) = generate_data(global_shape, dtype,
                                  distribution_strategy,
                                  strictly_positive=True)
-        # assert_almost_equal(
-        #    (getattr(obj, function)()-getattr(np, function)(a))/\
-        #    (1+getattr(np, function)(a)), 0, decimal = 4)
         assert_almost_equal(getattr(obj, function)(), getattr(np, function)(a),
                             decimal=4)
 
diff --git a/test/test_nifty_spaces.py b/test/test_nifty_spaces.py
index e6dd22330b146c3ef31011de5979468186006cd0..c22524e838ec843dc7ceed13fe5b7d6142b5650f 100644
--- a/test/test_nifty_spaces.py
+++ b/test/test_nifty_spaces.py
@@ -14,11 +14,19 @@ from nifty import space,\
                   rg_space,\
                   lm_space,\
                   hp_space,\
-                  gl_space
+                  gl_space,\
+                  field,\
+                  distributed_data_object
 
 from nifty.nifty_paradict import space_paradict
 from nifty.nifty_core import POINT_DISTRIBUTION_STRATEGIES
 
+from nifty.rg.nifty_rg import RG_DISTRIBUTION_STRATEGIES
+from nifty.lm.nifty_lm import LM_DISTRIBUTION_STRATEGIES,\
+                              GL_DISTRIBUTION_STRATEGIES,\
+                              HP_DISTRIBUTION_STRATEGIES
+from nifty.nifty_power_indices import power_indices
+
 
 ###############################################################################
 
@@ -31,20 +39,31 @@ def custom_name_func(testcase_func, param_num, param):
 ###############################################################################
 ###############################################################################
 
-all_datatypes = [np.dtype('bool'),
-                 np.dtype('int8'),
-                 np.dtype('int16'),
-                 np.dtype('int32'),
-                 np.dtype('int64'),
-                 np.dtype('float16'),
-                 np.dtype('float32'),
-                 np.dtype('float64'),
-                 np.dtype('complex64'),
-                 np.dtype('complex128')]
+all_point_datatypes = [np.dtype('bool'),
+                       np.dtype('int16'),
+                       np.dtype('int32'),
+                       np.dtype('int64'),
+                       np.dtype('float32'),
+                       np.dtype('float64'),
+                       np.dtype('complex64'),
+                       np.dtype('complex128')]
+
+all_lm_datatypes = [np.dtype('complex64'),
+                    np.dtype('complex128')]
+
+all_gl_datatypes = [np.dtype('float64'),
+                    np.dtype('float128')]
+
+all_hp_datatypes = [np.dtype('float64')]
 
 ###############################################################################
 
-point_datamodels = ['np'] + POINT_DISTRIBUTION_STRATEGIES
+DATAMODELS = {}
+DATAMODELS['point_space'] = ['np'] + POINT_DISTRIBUTION_STRATEGIES
+DATAMODELS['rg_space'] = ['np'] + RG_DISTRIBUTION_STRATEGIES
+DATAMODELS['lm_space'] = ['np'] + LM_DISTRIBUTION_STRATEGIES
+DATAMODELS['gl_space'] = ['np'] + GL_DISTRIBUTION_STRATEGIES
+DATAMODELS['hp_space'] = ['np'] + HP_DISTRIBUTION_STRATEGIES
 
 ###############################################################################
 
@@ -54,6 +73,31 @@ all_spaces = ['space', 'point_space', 'rg_space', 'lm_space', 'hp_space',
 point_like_spaces = ['point_space', 'rg_space', 'lm_space', 'hp_space',
                      'gl_space']
 
+###############################################################################
+
+np_spaces = point_like_spaces
+d2o_spaces = []
+if POINT_DISTRIBUTION_STRATEGIES != []:
+    d2o_spaces += ['point_space']
+if RG_DISTRIBUTION_STRATEGIES != []:
+    d2o_spaces += ['rg_space']
+if LM_DISTRIBUTION_STRATEGIES != []:
+    d2o_spaces += ['lm_space']
+if GL_DISTRIBUTION_STRATEGIES != []:
+    d2o_spaces += ['gl_space']
+if HP_DISTRIBUTION_STRATEGIES != []:
+    d2o_spaces += ['hp_space']
+
+
+unary_operations = ['pos', 'neg', 'abs', 'real', 'imag', 'nanmin', 'amin',
+                    'nanmax', 'amax', 'median', 'mean', 'std', 'var', 'argmin',
+                    'argmin_flat', 'argmax', 'argmax_flat', 'conjugate', 'sum',
+                    'prod', 'unique', 'copy', 'copy_empty', 'isnan', 'isinf',
+                    'isfinite', 'nan_to_num', 'all', 'any', 'None']
+
+binary_operations = ['add', 'radd', 'iadd', 'sub', 'rsub', 'isub', 'mul',
+                     'rmul', 'imul', 'div', 'rdiv', 'idiv', 'pow', 'rpow',
+                     'ipow', 'ne', 'lt', 'le', 'eq', 'ge', 'gt', 'None']
 
 ###############################################################################
 
@@ -68,6 +112,12 @@ def generate_space(name):
     return space_dict[name]
 
 
+def generate_data(space):
+    a = np.arange(space.get_dim()).reshape(space.get_shape())
+    data = space.cast(a)
+    return data
+
+
 ###############################################################################
 ###############################################################################
 
@@ -118,7 +168,7 @@ class Test_Common_Space_Features(unittest.TestCase):
 ###############################################################################
 ###############################################################################
 
-class Test_Common_Point_Like_Space_Features(unittest.TestCase):
+class Test_Common_Point_Like_Space_Interface(unittest.TestCase):
 
     @parameterized.expand(point_like_spaces,
                           testcase_func_name=custom_name_func)
@@ -132,39 +182,64 @@ class Test_Common_Point_Like_Space_Features(unittest.TestCase):
         assert(isinstance(s.discrete, bool))
         assert(isinstance(s.harmonic, bool))
         assert(isinstance(s.distances, tuple))
-        # TODO: Make power_indices class for lm_space
-        # if s.harmonic:
-        #     assert(isinstance(s.power_indices, power_indices))
+        if s.harmonic:
+            assert(isinstance(s.power_indices, power_indices))
 
     @parameterized.expand(point_like_spaces,
                           testcase_func_name=custom_name_func)
-    def test_global_parameters(self, name):
+    def test_getters(self, name):
         s = generate_space(name)
         assert(isinstance(s.get_shape(), tuple))
-
         assert(isinstance(s.get_dim(), np.int))
-        assert(isinstance(s.get_dim(split=True), tuple))
-        assert_equal(s.get_dim(), np.prod(s.get_dim(split=True)))
 
         assert(isinstance(s.get_dof(), np.int))
         assert(isinstance(s.get_dof(split=True), tuple))
         assert_equal(s.get_dof(), np.prod(s.get_dof(split=True)))
 
+        assert(isinstance(s.get_vol(), np.float))
+        assert(isinstance(s.get_dof(split=True), tuple))
+
         assert(isinstance(s.get_meta_volume(), np.float))
         assert(isinstance(s.get_meta_volume(split=True), type(s.cast(1))))
         assert_almost_equal(
             s.get_meta_volume(), s.get_meta_volume(split=True).sum(), 2)
 
+#
+#class Test_Common_Point_Like_Space_Functions(unittest.TestCase):
+#
+#    @parameterized.expand(point_like_spaces,
+#                          testcase_func_name=custom_name_func)
+#    def test_copy(self, name):
+#        s = generate_space(name)
+#        t = s.copy()
+#        assert(s == t)
+#        assert(id(s) != id(t))
+#
+#    @parameterized.expand(point_like_spaces,
+#                          testcase_func_name=custom_name_func)
+#    def test_unary_operations(self, name):
+#        s = generate_space(name)
+#        t = s.copy()
+#
+#
+#    @parameterized.expand(point_like_spaces,
+#                          testcase_func_name=custom_name_func)
+#    def test_apply_scalar_function(self, name):
+#        s = generate_space(name)
+#        d = generate_data(s)
+#        d2 = s.apply_scalar_function(d, lambda x: x**2)
+#        assert(isinstance(d2, type(d)))
+
 
 ###############################################################################
 ###############################################################################
 
-class Test_Point_Initialization(unittest.TestCase):
+class Test_Point_Space(unittest.TestCase):
 
     @parameterized.expand(
         itertools.product([0, 1, 10],
-                          all_datatypes,
-                          point_datamodels),
+                          all_point_datatypes,
+                          DATAMODELS['point_space']),
         testcase_func_name=custom_name_func)
     def test_successfull_init(self, num, dtype, datamodel):
         p = point_space(num, dtype, datamodel)
@@ -176,6 +251,8 @@ class Test_Point_Initialization(unittest.TestCase):
         assert_equal(p.harmonic, False)
         assert_equal(p.distances, (np.float(1.),))
 
+###############################################################################
+
     def test_para(self):
         num = 10
         p = point_space(num)
@@ -185,11 +262,275 @@ class Test_Point_Initialization(unittest.TestCase):
         p.para = np.array([new_num])
         assert_equal(p.para[0], new_num)
 
+###############################################################################
+
     def test_init_fail(self):
         assert_raises(ValueError, lambda: point_space(-5))
         assert_raises(ValueError, lambda: point_space((10, 10)))
         assert_raises(ValueError, lambda: point_space(10, np.uint))
 
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product([0, 1, 10],
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_apply_scalar_function(self, num, datamodel):
+        s = point_space(num, datamodel=datamodel)
+        d = generate_data(s)
+        t = s.apply_scalar_function(d, lambda x: x**2)
+        assert(s.unary_operation(s.binary_operation(d**2, t, 'eq'), 'all'))
+        assert(id(d) != id(t))
+
+        t = s.apply_scalar_function(d, lambda x: x**2, inplace=True)
+        assert(s.unary_operation(s.binary_operation(d, t, 'eq'), 'all'))
+        assert(id(d) == id(t))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product([1, 10],
+                          unary_operations,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_unary_operations(self, num, op, datamodel):
+        s = point_space(num, datamodel=datamodel)
+        d = s.cast(np.arange(num))
+        s.unary_operation(d, op)
+        # TODO: Implement value verification
+
+    @parameterized.expand(
+        itertools.product([1, 10],
+                          binary_operations,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_binary_operations(self, num, op, datamodel):
+        s = point_space(num, datamodel=datamodel)
+        d = s.cast(np.arange(num))
+        d2 = d[::-1]
+        s.binary_operation(d, d2, op)
+        # TODO: Implement value verification
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_get_norm(self, datamodel):
+        num = 10
+        s = point_space(num, datamodel=datamodel)
+        d = s.cast(np.arange(num))
+        assert_almost_equal(s.get_norm(d), 16.881943016134134)
+        assert_almost_equal(s.get_norm(d, q=3), 12.651489979526238)
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_get_shape_dim(self, dtype, datamodel):
+        num = 10
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        assert_equal(s.get_shape(), (num,))
+        assert_equal(s.get_dim(), num)
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_get_shape_dof(self, dtype, datamodel):
+        num = 10
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        if issubclass(dtype.type, np.complexfloating):
+            assert_equal(s.get_dof(), 2*num)
+            assert_equal(s.get_dof(split=True), (2*num,))
+        else:
+            assert_equal(s.get_dof(), num)
+            assert_equal(s.get_dof(split=True), (num,))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_get_shape_vol(self, dtype, datamodel):
+        num = 10
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        assert_equal(s.get_vol(), 1.)
+        assert_equal(s.get_vol(split=True), (1.,))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_get_shape_metavolume(self, dtype, datamodel):
+        num = 10
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        assert_equal(s.get_meta_volume(), 10.)
+        assert(s.unary_operation(s.binary_operation(
+                                                s.get_meta_volume(split=True),
+                                                s.cast(1), 'eq'),
+                                 'all'))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_cast_from_scalar(self, dtype, datamodel):
+        num = 10
+        scalar = 4
+        s = point_space(num, dtype, datamodel=datamodel)
+        if datamodel == 'np':
+            d = (np.ones((num,)) * scalar).astype(dtype=dtype)
+        else:
+            d = distributed_data_object(scalar,
+                                        global_shape=(num,),
+                                        dtype=dtype,
+                                        distribution_strategy=datamodel)
+
+        casted_scalar = s.cast(scalar)
+        assert(s.unary_operation(s.binary_operation(casted_scalar, d, 'eq'),
+                                 'all'))
+        if datamodel != 'np':
+            assert(d.equal(casted_scalar))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_cast_from_field(self, dtype, datamodel):
+        num = 10
+        a = np.arange(num,).astype(dtype)
+        s = point_space(num, dtype, datamodel=datamodel)
+        f = field(s, val=a)
+
+        if datamodel == 'np':
+            d = a
+        else:
+            d = distributed_data_object(a, dtype=dtype,
+                                        distribution_strategy=datamodel)
+
+        casted_f = s.cast(f)
+        assert(s.unary_operation(s.binary_operation(casted_f, d, 'eq'),
+                                 'all'))
+        if datamodel != 'np':
+            assert(d.equal(casted_f))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_cast_from_ndarray(self, dtype, datamodel):
+        num = 10
+        a = np.arange(num,)
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        if datamodel == 'np':
+            d = a.astype(dtype)
+        else:
+            d = distributed_data_object(a, dtype=dtype,
+                                        distribution_strategy=datamodel)
+
+        casted_a = s.cast(a)
+        assert(s.unary_operation(s.binary_operation(casted_a, d, 'eq'),
+                                 'all'))
+        if datamodel != 'np':
+            assert(d.equal(casted_a))
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_cast_from_d2o(self, dtype, datamodel):
+        num = 10
+        pre_a = np.arange(num,)
+        a = distributed_data_object(pre_a)
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        if datamodel == 'np':
+            d = pre_a.astype(dtype)
+        else:
+            d = distributed_data_object(a, dtype=dtype,
+                                        distribution_strategy=datamodel)
+
+        casted_a = s.cast(a)
+        assert(s.unary_operation(s.binary_operation(casted_a, d, 'eq'),
+                                 'all'))
+        if datamodel != 'np':
+            assert(d.equal(casted_a))
+
+
+###############################################################################
+
+    def test_raise_on_not_implementable_methods(self):
+        s = point_space(10)
+        assert_raises(lambda: s.enforce_power)
+        assert_raises(lambda: s.calc_smooth)
+        assert_raises(lambda: s.calc_power)
+
+###############################################################################
+
+    @parameterized.expand(
+        [[10, np.dtype('float64'), 'equal'],
+         [10, np.dtype('float32'), 'np'],
+         [12, np.dtype('float64'), 'np']],
+        testcase_func_name=custom_name_func)
+    def test_get_check_codomain(self, num, dtype, datamodel):
+        s = point_space(10, dtype=np.dtype('float64'), datamodel='np')
+
+        t = s.get_codomain()
+        assert(s.check_codomain(t))
+
+        t_bad = point_space(num, dtype=dtype, datamodel=datamodel)
+        assert(s.check_codomain(t_bad) == False)
+
+        assert(s.check_codomain(None) == False)
+
+###############################################################################
+
+    @parameterized.expand(
+        itertools.product(all_point_datatypes,
+                          DATAMODELS['point_space']),
+        testcase_func_name=custom_name_func)
+    def test_cast_from_d2o(self, dtype, datamodel):
+        num = 100000
+        s = point_space(num, dtype, datamodel=datamodel)
+
+        pm = s.get_random_values(random='pm1')
+        assert_almost_equal(0, s.unary_operation(pm, op='mean'), 2)
+
+        gau = s.get_random_values(random='gau',
+                                  mean=10,
+                                  dev=)
+
+
+
+
+
+
+
+
+
+
+