diff --git a/demos/critical_filtering.py b/demos/critical_filtering.py
index 0011e23f00a3757192ce78f1c86ff66f15bfbba9..d18f94af764fc23b7560bf89ab813fa3f4edf1ff 100644
--- a/demos/critical_filtering.py
+++ b/demos/critical_filtering.py
@@ -69,8 +69,8 @@ if __name__ == "__main__":
     # Creating the mock data
     d = noiseless_data + n
 
-    m0 = ift.Field.full(h_space, 1e-7)
-    t0 = ift.Field.full(p_space, -4.)
+    m0 = ift.full(h_space, 1e-7)
+    t0 = ift.full(p_space, -4.)
     power0 = Distributor.times(ift.exp(0.5 * t0))
 
     plotdict = {"colormap": "Planck-like"}
diff --git a/demos/krylov_sampling.py b/demos/krylov_sampling.py
index 7b316e4db609926a4364f5b5b50eddb214e3deee..4987437ae681cb8f35773b04326788f3a2e91574 100644
--- a/demos/krylov_sampling.py
+++ b/demos/krylov_sampling.py
@@ -31,7 +31,7 @@ d = R(s_x) + n
 
 R_p = R * FFT * A
 j = R_p.adjoint(N.inverse(d))
-D_inv = ift.SandwichOperator(R_p, N.inverse) + S.inverse
+D_inv = ift.SandwichOperator.make(R_p, N.inverse) + S.inverse
 
 
 N_samps = 200
@@ -67,8 +67,8 @@ plt.legend()
 plt.savefig('Krylov_samples_residuals.png')
 plt.close()
 
-D_hat_old = ift.Field.zeros(x_space).to_global_data()
-D_hat_new = ift.Field.zeros(x_space).to_global_data()
+D_hat_old = ift.full(x_space, 0.).to_global_data()
+D_hat_new = ift.full(x_space, 0.).to_global_data()
 for i in range(N_samps):
     D_hat_old += sky(samps_old[i]).to_global_data()**2
     D_hat_new += sky(samps[i]).to_global_data()**2
diff --git a/demos/nonlinear_critical_filter.py b/demos/nonlinear_critical_filter.py
index f1b84ab5cca49e2ef03baaf66accf1475c0070e8..8477acd9ee254f84fbaafbe1bc6efde7bf712a9e 100644
--- a/demos/nonlinear_critical_filter.py
+++ b/demos/nonlinear_critical_filter.py
@@ -69,8 +69,8 @@ if __name__ == "__main__":
     # Creating the mock data
     d = noiseless_data + n
 
-    m0 = ift.Field.full(h_space, 1e-7)
-    t0 = ift.Field.full(p_space, -4.)
+    m0 = ift.full(h_space, 1e-7)
+    t0 = ift.full(p_space, -4.)
     power0 = Distributor.times(ift.exp(0.5 * t0))
 
     IC1 = ift.GradientNormController(name="IC1", iteration_limit=100,
diff --git a/demos/nonlinear_wiener_filter.py b/demos/nonlinear_wiener_filter.py
index be58771570245ea958d632ad1c9ff03e6a97e507..d19e739ac3d9aa48e377c557c4f71c5aaa26ee4a 100644
--- a/demos/nonlinear_wiener_filter.py
+++ b/demos/nonlinear_wiener_filter.py
@@ -36,7 +36,7 @@ if __name__ == "__main__":
     d_space = R.target
 
     p_op = ift.create_power_operator(h_space, p_spec)
-    power = ift.sqrt(p_op(ift.Field.full(h_space, 1.)))
+    power = ift.sqrt(p_op(ift.full(h_space, 1.)))
 
     # Creating the mock data
     true_sky = nonlinearity(HT(power*sh))
@@ -57,7 +57,7 @@ if __name__ == "__main__":
     inverter = ift.ConjugateGradient(controller=ICI)
 
     # initial guess
-    m = ift.Field.full(h_space, 1e-7)
+    m = ift.full(h_space, 1e-7)
     map_energy = ift.library.NonlinearWienerFilterEnergy(
         m, d, R, nonlinearity, HT, power, N, S, inverter=inverter)
 
diff --git a/demos/poisson_demo.py b/demos/poisson_demo.py
index 6c62fa645fb40affbaf94becdf9df007ce548cba..18a269d56305f1bf92c0881a78a2f7201e5804ec 100644
--- a/demos/poisson_demo.py
+++ b/demos/poisson_demo.py
@@ -80,12 +80,12 @@ if __name__ == "__main__":
     IC = ift.GradientNormController(name="inverter", iteration_limit=500,
                                     tol_abs_gradnorm=1e-3)
     inverter = ift.ConjugateGradient(controller=IC)
-    D = (ift.SandwichOperator(R, N.inverse) + Phi_h.inverse).inverse
+    D = (ift.SandwichOperator.make(R, N.inverse) + Phi_h.inverse).inverse
     D = ift.InversionEnabler(D, inverter, approximation=Phi_h)
     m = HT(D(j))
 
     # Uncertainty
-    D = ift.SandwichOperator(aHT, D)  # real space propagator
+    D = ift.SandwichOperator.make(aHT, D)  # real space propagator
     Dhat = ift.probe_with_posterior_samples(D.inverse, None,
                                             nprobes=nprobes)[1]
     sig = ift.sqrt(Dhat)
@@ -113,7 +113,7 @@ if __name__ == "__main__":
         d_domain, np.random.poisson(lam.local_data).astype(np.float64))
 
     # initial guess
-    psi0 = ift.Field.full(h_domain, 1e-7)
+    psi0 = ift.full(h_domain, 1e-7)
     energy = ift.library.PoissonEnergy(psi0, data, R0, nonlin, HT, Phi_h,
                                        inverter)
     IC1 = ift.GradientNormController(name="IC1", iteration_limit=200,
diff --git a/demos/wiener_filter_data_space_noiseless.py b/demos/wiener_filter_data_space_noiseless.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdedcb59e7da1b98ed11563912b1fa2a82d46209
--- /dev/null
+++ b/demos/wiener_filter_data_space_noiseless.py
@@ -0,0 +1,115 @@
+import numpy as np
+import nifty4 as ift
+
+
+# TODO: MAKE RESPONSE MPI COMPATIBLE OR USE LOS RESPONSE INSTEAD
+
+class CustomResponse(ift.LinearOperator):
+    """
+    A custom operator that measures a specific points`
+
+    An operator that is a delta measurement at certain points
+    """
+    def __init__(self, domain, data_points):
+        self._domain = ift.DomainTuple.make(domain)
+        self._points = data_points
+        data_shape = ift.Field.full(domain, 0.).to_global_data()[data_points]\
+            .shape
+        self._target = ift.DomainTuple.make(ift.UnstructuredDomain(data_shape))
+
+    def _times(self, x):
+        d = np.zeros(self._target.shape, dtype=np.float64)
+        d += x.to_global_data()[self._points]
+        return ift.from_global_data(self._target, d)
+
+    def _adjoint_times(self, d):
+        x = np.zeros(self._domain.shape, dtype=np.float64)
+        x[self._points] += d.to_global_data()
+        return ift.from_global_data(self._domain, x)
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def target(self):
+        return self._target
+
+    def apply(self, x, mode):
+        self._check_input(x, mode)
+        return self._times(x) if mode == self.TIMES else self._adjoint_times(x)
+
+    @property
+    def capability(self):
+        return self.TIMES | self.ADJOINT_TIMES
+
+if __name__ == "__main__":
+    np.random.seed(43)
+    # Set up physical constants
+    # Total length of interval or volume the field lives on, e.g. in meters
+    L = 2.
+    # Typical distance over which the field is correlated (in same unit as L)
+    correlation_length = 0.3
+    # Variance of field in position space sqrt(<|s_x|^2>) (in same unit as s)
+    field_variance = 2.
+    # Smoothing length of response (in same unit as L)
+    response_sigma = 0.01
+    # typical noise amplitude of the measurement
+    noise_level = 0.
+
+    # Define resolution (pixels per dimension)
+    N_pixels = 256
+
+    # Set up derived constants
+    k_0 = 1./correlation_length
+    # defining a power spectrum with the right correlation length
+    # we later set the field variance to the desired value
+    unscaled_pow_spec = (lambda k: 1. / (1 + k/k_0) ** 4)
+    pixel_width = L/N_pixels
+
+    # Set up the geometry
+    s_space = ift.RGSpace([N_pixels, N_pixels], distances=pixel_width)
+    h_space = s_space.get_default_codomain()
+    s_var = ift.get_signal_variance(unscaled_pow_spec, h_space)
+    pow_spec = (lambda k: unscaled_pow_spec(k)/s_var*field_variance**2)
+
+    HT = ift.HarmonicTransformOperator(h_space, s_space)
+
+    # Create mock data
+
+    Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
+    sh = Sh.draw_sample()
+
+    Rx = CustomResponse(s_space, [np.arange(0, N_pixels, 5)[:, np.newaxis],
+                                  np.arange(0, N_pixels, 2)[np.newaxis, :]])
+    ift.extra.consistency_check(Rx)
+    a = ift.Field.from_random('normal', s_space)
+    b = ift.Field.from_random('normal', Rx.target)
+    R = Rx * HT
+
+    noiseless_data = R(sh)
+    N = ift.ScalingOperator(noise_level**2, R.target)
+    n = N.draw_sample()
+
+    d = noiseless_data + n
+
+    # Wiener filter
+
+    IC = ift.GradientNormController(name="inverter", iteration_limit=1000,
+                                    tol_abs_gradnorm=0.0001)
+    inverter = ift.ConjugateGradient(controller=IC)
+    # setting up measurement precision matrix M
+    M = (ift.SandwichOperator.make(R.adjoint, Sh) + N)
+    M = ift.InversionEnabler(M, inverter)
+    m = Sh(R.adjoint(M.inverse_times(d)))
+
+    # Plotting
+    backprojection = Rx.adjoint(d)
+    reweighted_backprojection = (backprojection / backprojection.max() *
+                                 HT(sh).max())
+    zmax = max(HT(sh).max(), reweighted_backprojection.max(), HT(m).max())
+    zmin = min(HT(sh).min(), reweighted_backprojection.min(), HT(m).min())
+    plotdict = {"colormap": "Planck-like", "zmax": zmax, "zmin": zmin}
+    ift.plot(HT(sh), name="mock_signal.png", **plotdict)
+    ift.plot(backprojection, name="backprojected_data.png", **plotdict)
+    ift.plot(HT(m), name="reconstruction.png", **plotdict)
diff --git a/demos/wiener_filter_easy.py b/demos/wiener_filter_easy.py
index f1b2322a54835f195bf5a60ea9424d26716c6fbc..788bbf6b88862edacb09a2de861d03351aba43ac 100644
--- a/demos/wiener_filter_easy.py
+++ b/demos/wiener_filter_easy.py
@@ -1,6 +1,7 @@
 import numpy as np
 import nifty4 as ift
 
+
 if __name__ == "__main__":
     np.random.seed(43)
     # Set up physical constants
@@ -12,21 +13,25 @@ if __name__ == "__main__":
     field_variance = 2.
     # Smoothing length of response (in same unit as L)
     response_sigma = 0.01
+    # typical noise amplitude of the measurement
+    noise_level = 1.
 
     # Define resolution (pixels per dimension)
     N_pixels = 256
 
     # Set up derived constants
     k_0 = 1./correlation_length
-    # Note that field_variance**2 = a*k_0/4. for this analytic form of power
-    # spectrum
-    a = field_variance**2/k_0*4.
-    pow_spec = (lambda k: a / (1 + k/k_0) ** 4)
+    #defining a power spectrum with the right correlation length
+    #we later set the field variance to the desired value
+    unscaled_pow_spec = (lambda k: 1. / (1 + k/k_0) ** 4)
     pixel_width = L/N_pixels
 
     # Set up the geometry
     s_space = ift.RGSpace([N_pixels, N_pixels], distances=pixel_width)
     h_space = s_space.get_default_codomain()
+    s_var = ift.get_signal_variance(unscaled_pow_spec, h_space)
+    pow_spec = (lambda k: unscaled_pow_spec(k)/s_var*field_variance**2)
+
     HT = ift.HarmonicTransformOperator(h_space, s_space)
 
     # Create mock data
@@ -36,11 +41,8 @@ if __name__ == "__main__":
 
     R = HT*ift.create_harmonic_smoothing_operator((h_space,), 0,
                                                   response_sigma)
-
     noiseless_data = R(sh)
-    signal_to_noise = 1.
-    noise_amplitude = noiseless_data.val.std()/signal_to_noise
-    N = ift.ScalingOperator(noise_amplitude**2, s_space)
+    N = ift.ScalingOperator(noise_level**2, s_space)
     n = N.draw_sample()
 
     d = noiseless_data + n
@@ -51,7 +53,7 @@ if __name__ == "__main__":
     IC = ift.GradientNormController(name="inverter", iteration_limit=500,
                                     tol_abs_gradnorm=0.1)
     inverter = ift.ConjugateGradient(controller=IC)
-    D = (ift.SandwichOperator(R, N.inverse) + Sh.inverse).inverse
+    D = (ift.SandwichOperator.make(R, N.inverse) + Sh.inverse).inverse
     D = ift.InversionEnabler(D, inverter, approximation=Sh)
     m = D(j)
 
diff --git a/demos/wiener_filter_via_hamiltonian.py b/demos/wiener_filter_via_hamiltonian.py
index 124a625a2504eee5b9770b17a28ce1926d451081..e83de5c7438deb97e0a8157e97eb5e4b7e7dfc53 100644
--- a/demos/wiener_filter_via_hamiltonian.py
+++ b/demos/wiener_filter_via_hamiltonian.py
@@ -50,7 +50,7 @@ if __name__ == "__main__":
     inverter = ift.ConjugateGradient(controller=ctrl)
     controller = ift.GradientNormController(name="min", tol_abs_gradnorm=0.1)
     minimizer = ift.RelaxedNewton(controller=controller)
-    m0 = ift.Field.zeros(h_space)
+    m0 = ift.full(h_space, 0.)
 
     # Initialize Wiener filter energy
     energy = ift.library.WienerFilterEnergy(position=m0, d=d, R=R, N=N, S=S,
diff --git a/nifty4/__init__.py b/nifty4/__init__.py
index d74654389083ea70f1f1407a3faa07184bf1b13c..2f1de367bdd911dec158ce18d0998931a6667c7d 100644
--- a/nifty4/__init__.py
+++ b/nifty4/__init__.py
@@ -8,7 +8,7 @@ from .domain_tuple import DomainTuple
 
 from .operators import *
 
-from .field import Field, sqrt, exp, log
+from .field import Field
 
 from .probing.utils import probe_with_posterior_samples, probe_diagonal, \
     StatCalculator
diff --git a/nifty4/data_objects/distributed_do.py b/nifty4/data_objects/distributed_do.py
index fb238e3a5f85b1f0493802a2a0c74fa2bdd74504..14d132d36ef0df546dfb09a2f9d8fbef28377ddf 100644
--- a/nifty4/data_objects/distributed_do.py
+++ b/nifty4/data_objects/distributed_do.py
@@ -20,6 +20,7 @@ import numpy as np
 from .random import Random
 from mpi4py import MPI
 import sys
+from functools import reduce
 
 _comm = MPI.COMM_WORLD
 ntask = _comm.Get_size()
@@ -145,20 +146,29 @@ class data_object(object):
     def sum(self, axis=None):
         return self._contraction_helper("sum", MPI.SUM, axis)
 
+    def prod(self, axis=None):
+        return self._contraction_helper("prod", MPI.PROD, axis)
+
     def min(self, axis=None):
         return self._contraction_helper("min", MPI.MIN, axis)
 
     def max(self, axis=None):
         return self._contraction_helper("max", MPI.MAX, axis)
 
-    def mean(self):
-        return self.sum()/self.size
+    def mean(self, axis=None):
+        if axis is None:
+            sz = self.size
+        else:
+            sz = reduce(lambda x, y: x*y, [self.shape[i] for i in axis])
+        return self.sum(axis)/sz
 
-    def std(self):
-        return np.sqrt(self.var())
+    def std(self, axis=None):
+        return np.sqrt(self.var(axis))
 
     # FIXME: to be improved!
-    def var(self):
+    def var(self, axis=None):
+        if axis is not None and len(axis) != len(self.shape):
+            raise ValueError("functionality not yet supported")
         return (abs(self-self.mean())**2).mean()
 
     def _binary_helper(self, other, op):
diff --git a/nifty4/domain_tuple.py b/nifty4/domain_tuple.py
index 21779f5474870894bfa05e5cffd38ce07c72117f..39eafa8852c846ab213ed5c5efdbb6598e73ab80 100644
--- a/nifty4/domain_tuple.py
+++ b/nifty4/domain_tuple.py
@@ -34,7 +34,9 @@ class DomainTuple(object):
     """
     _tupleCache = {}
 
-    def __init__(self, domain):
+    def __init__(self, domain, _callingfrommake=False):
+        if not _callingfrommake:
+            raise NotImplementedError
         self._dom = self._parse_domain(domain)
         self._axtuple = self._get_axes_tuple()
         shape_tuple = tuple(sp.shape for sp in self._dom)
@@ -72,7 +74,7 @@ class DomainTuple(object):
         obj = DomainTuple._tupleCache.get(domain)
         if obj is not None:
             return obj
-        obj = DomainTuple(domain)
+        obj = DomainTuple(domain, _callingfrommake=True)
         DomainTuple._tupleCache[domain] = obj
         return obj
 
diff --git a/nifty4/domains/domain.py b/nifty4/domains/domain.py
index 9f22a5baa1e55b938d4e5ccaf934bd3ca581e904..38db023bd752b25501e8b2029bfdc85e77e971c3 100644
--- a/nifty4/domains/domain.py
+++ b/nifty4/domains/domain.py
@@ -23,6 +23,8 @@ from ..utilities import NiftyMetaBase
 class Domain(NiftyMetaBase()):
     """The abstract class repesenting a (structured or unstructured) domain.
     """
+    def __init__(self):
+        self._hash = None
 
     @abc.abstractmethod
     def __repr__(self):
@@ -36,10 +38,12 @@ class Domain(NiftyMetaBase()):
         Only members that are explicitly added to
         :attr:`._needed_for_hash` will be used for hashing.
         """
-        result_hash = 0
-        for key in self._needed_for_hash:
-            result_hash ^= hash(vars(self)[key])
-        return result_hash
+        if self._hash is None:
+            h = 0
+            for key in self._needed_for_hash:
+                h ^= hash(vars(self)[key])
+            self._hash = h
+        return self._hash
 
     def __eq__(self, x):
         """Checks whether two domains are equal.
diff --git a/nifty4/domains/lm_space.py b/nifty4/domains/lm_space.py
index 721171a110a989b9b7f593055c3768617374ce8f..91968a9df0f9b95a9861a6dffc6ca01b033baf53 100644
--- a/nifty4/domains/lm_space.py
+++ b/nifty4/domains/lm_space.py
@@ -19,7 +19,7 @@
 from __future__ import division
 import numpy as np
 from .structured_domain import StructuredDomain
-from ..field import Field, exp
+from ..field import Field
 
 
 class LMSpace(StructuredDomain):
@@ -100,6 +100,8 @@ class LMSpace(StructuredDomain):
         # cf. "All-sky convolution for polarimetry experiments"
         # by Challinor et al.
         # http://arxiv.org/abs/astro-ph/0008228
+        from ..sugar import exp
+
         res = x+1.
         res *= x
         res *= -0.5*sigma*sigma
diff --git a/nifty4/domains/rg_space.py b/nifty4/domains/rg_space.py
index 5f8e0a1fdfd046a23bf7426bbb748ca295ba1176..118ee47dbeb95ffd2aefa5811ac3e8ea34220496 100644
--- a/nifty4/domains/rg_space.py
+++ b/nifty4/domains/rg_space.py
@@ -21,7 +21,7 @@ from builtins import range
 from functools import reduce
 import numpy as np
 from .structured_domain import StructuredDomain
-from ..field import Field, exp
+from ..field import Field
 from .. import dobj
 
 
@@ -144,6 +144,7 @@ class RGSpace(StructuredDomain):
 
     @staticmethod
     def _kernel(x, sigma):
+        from ..sugar import exp
         tmp = x*x
         tmp *= -2.*np.pi*np.pi*sigma*sigma
         exp(tmp, out=tmp)
diff --git a/nifty4/extra/energy_tests.py b/nifty4/extra/energy_tests.py
index 64cebb5be997a0b4f0bc0d67219a90784b93c927..ce166c9616615374c30f0e7a02c7d8a27fa8e556 100644
--- a/nifty4/extra/energy_tests.py
+++ b/nifty4/extra/energy_tests.py
@@ -18,15 +18,19 @@
 
 import numpy as np
 from ..field import Field
+from ..sugar import from_random
 
 __all__ = ["check_value_gradient_consistency",
            "check_value_gradient_curvature_consistency"]
 
 
 def _get_acceptable_energy(E):
-    if not np.isfinite(E.value):
+    val = E.value
+    if not np.isfinite(val):
         raise ValueError
-    dir = Field.from_random("normal", E.position.domain)
+    dir = from_random("normal", E.position.domain)
+    dirder = E.gradient.vdot(dir)
+    dir *= np.abs(val)/np.abs(dirder)*1e-5
     # find a step length that leads to a "reasonable" energy
     for i in range(50):
         try:
@@ -44,12 +48,13 @@ def _get_acceptable_energy(E):
 def check_value_gradient_consistency(E, tol=1e-6, ntries=100):
     for _ in range(ntries):
         E2 = _get_acceptable_energy(E)
+        val = E.value
         dir = E2.position - E.position
         Enext = E2
         dirnorm = dir.norm()
         dirder = E.gradient.vdot(dir)/dirnorm
         for i in range(50):
-            if abs((E2.value-E.value)/dirnorm-dirder) < tol:
+            if abs((E2.value-val)/dirnorm-dirder) < tol:
                 break
             dir *= 0.5
             dirnorm *= 0.5
diff --git a/nifty4/extra/operator_tests.py b/nifty4/extra/operator_tests.py
index 258c2a877e4487c4222ff262e24cae1407d5d75b..45b8437e5d81c69f527c4eefdbbde9c46ab2611c 100644
--- a/nifty4/extra/operator_tests.py
+++ b/nifty4/extra/operator_tests.py
@@ -17,17 +17,26 @@
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
 import numpy as np
+from ..sugar import from_random
 from ..field import Field
 
 __all__ = ["consistency_check"]
 
 
+def _assert_allclose(f1, f2, atol, rtol):
+    if isinstance(f1, Field):
+        return np.testing.assert_allclose(f1.local_data, f2.local_data,
+                                          atol=atol, rtol=rtol)
+    for key, val in f1.items():
+        _assert_allclose(val, f2[key], atol=atol, rtol=rtol)
+
+
 def adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol):
     needed_cap = op.TIMES | op.ADJOINT_TIMES
     if (op.capability & needed_cap) != needed_cap:
         return
-    f1 = Field.from_random("normal", op.domain, dtype=domain_dtype).lock()
-    f2 = Field.from_random("normal", op.target, dtype=target_dtype).lock()
+    f1 = from_random("normal", op.domain, dtype=domain_dtype).lock()
+    f2 = from_random("normal", op.target, dtype=target_dtype).lock()
     res1 = f1.vdot(op.adjoint_times(f2).lock())
     res2 = op.times(f1).vdot(f2)
     np.testing.assert_allclose(res1, res2, atol=atol, rtol=rtol)
@@ -37,15 +46,13 @@ def inverse_implementation(op, domain_dtype, target_dtype, atol, rtol):
     needed_cap = op.TIMES | op.INVERSE_TIMES
     if (op.capability & needed_cap) != needed_cap:
         return
-    foo = Field.from_random("normal", op.target, dtype=target_dtype).lock()
+    foo = from_random("normal", op.target, dtype=target_dtype).lock()
     res = op(op.inverse_times(foo).lock())
-    np.testing.assert_allclose(res.to_global_data(), res.to_global_data(),
-                               atol=atol, rtol=rtol)
+    _assert_allclose(res, foo, atol=atol, rtol=rtol)
 
-    foo = Field.from_random("normal", op.domain, dtype=domain_dtype).lock()
+    foo = from_random("normal", op.domain, dtype=domain_dtype).lock()
     res = op.inverse_times(op(foo).lock())
-    np.testing.assert_allclose(res.to_global_data(), foo.to_global_data(),
-                               atol=atol, rtol=rtol)
+    _assert_allclose(res, foo, atol=atol, rtol=rtol)
 
 
 def full_implementation(op, domain_dtype, target_dtype, atol, rtol):
diff --git a/nifty4/field.py b/nifty4/field.py
index 289faf2375dca610c8aac5e0b5e4954235e07cd6..7c5e88c6d0b4dd9a4c7555cf2ddc9e062e9b620a 100644
--- a/nifty4/field.py
+++ b/nifty4/field.py
@@ -106,62 +106,10 @@ class Field(object):
             raise TypeError("val must be a scalar")
         return Field(DomainTuple.make(domain), val, dtype)
 
-    @staticmethod
-    def ones(domain, dtype=None):
-        return Field(DomainTuple.make(domain), 1., dtype)
-
-    @staticmethod
-    def zeros(domain, dtype=None):
-        return Field(DomainTuple.make(domain), 0., dtype)
-
     @staticmethod
     def empty(domain, dtype=None):
         return Field(DomainTuple.make(domain), None, dtype)
 
-    @staticmethod
-    def full_like(field, val, dtype=None):
-        """Creates a Field from a template, filled with a constant value.
-
-        Parameters
-        ----------
-        field : Field
-            the template field, from which the domain is inferred
-        val : float/complex/int scalar
-            fill value. Data type of the field is inferred from val.
-
-        Returns
-        -------
-        Field
-            the newly created field
-        """
-        if not isinstance(field, Field):
-            raise TypeError("field must be of Field type")
-        return Field.full(field._domain, val, dtype)
-
-    @staticmethod
-    def zeros_like(field, dtype=None):
-        if not isinstance(field, Field):
-            raise TypeError("field must be of Field type")
-        if dtype is None:
-            dtype = field.dtype
-        return Field.zeros(field._domain, dtype)
-
-    @staticmethod
-    def ones_like(field, dtype=None):
-        if not isinstance(field, Field):
-            raise TypeError("field must be of Field type")
-        if dtype is None:
-            dtype = field.dtype
-        return Field.ones(field._domain, dtype)
-
-    @staticmethod
-    def empty_like(field, dtype=None):
-        if not isinstance(field, Field):
-            raise TypeError("field must be of Field type")
-        if dtype is None:
-            dtype = field.dtype
-        return Field.empty(field._domain, dtype)
-
     @staticmethod
     def from_global_data(domain, arr, sum_up=False):
         """Returns a Field constructed from `domain` and `arr`.
@@ -287,6 +235,7 @@ class Field(object):
             The value to fill the field with.
         """
         self._val.fill(fill_value)
+        return self
 
     def lock(self):
         """Write-protect the data content of `self`.
@@ -370,6 +319,17 @@ class Field(object):
         """
         return Field(val=self, copy=True)
 
+    def empty_copy(self):
+        """ Returns a Field with identical domain and data type, but
+        uninitialized data.
+
+        Returns
+        -------
+        Field
+            A copy of 'self', with uninitialized data.
+        """
+        return Field(self._domain, dtype=self.dtype)
+
     def locked_copy(self):
         """ Returns a read-only version of the Field.
 
@@ -503,8 +463,8 @@ class Field(object):
                               or Field (for partial dot products)
         """
         if not isinstance(x, Field):
-            raise ValueError("The dot-partner must be an instance of " +
-                             "the NIFTy field class")
+            raise TypeError("The dot-partner must be an instance of " +
+                            "the NIFTy field class")
 
         if x._domain != self._domain:
             raise ValueError("Domain mismatch")
@@ -694,7 +654,8 @@ class Field(object):
         if self.scalar_weight(spaces) is not None:
             return self._contraction_helper('mean', spaces)
         # MR FIXME: not very efficient
-        tmp = self.weight(1)
+        # MR FIXME: do we need "spaces" here?
+        tmp = self.weight(1, spaces)
         return tmp.sum(spaces)*(1./tmp.total_volume(spaces))
 
     def var(self, spaces=None):
@@ -717,12 +678,10 @@ class Field(object):
         # MR FIXME: not very efficient or accurate
         m1 = self.mean(spaces)
         if np.issubdtype(self.dtype, np.complexfloating):
-            sq = abs(self)**2
-            m1 = abs(m1)**2
+            sq = abs(self-m1)**2
         else:
-            sq = self**2
-            m1 **= 2
-        return sq.mean(spaces) - m1
+            sq = (self-m1)**2
+        return sq.mean(spaces)
 
     def std(self, spaces=None):
         """Determines the standard deviation over the sub-domains given by
@@ -742,6 +701,7 @@ class Field(object):
             The result of the operation. If it is carried out over the entire
             domain, this is a scalar, otherwise a Field.
         """
+        from .sugar import sqrt
         if self.scalar_weight(spaces) is not None:
             return self._contraction_helper('std', spaces)
         return sqrt(self.var(spaces))
@@ -785,24 +745,3 @@ for op in ["__add__", "__radd__", "__iadd__",
             return NotImplemented
         return func2
     setattr(Field, op, func(op))
-
-
-# Arithmetic functions working on Fields
-
-_current_module = sys.modules[__name__]
-
-for f in ["sqrt", "exp", "log", "tanh", "conjugate"]:
-    def func(f):
-        def func2(x, out=None):
-            fu = getattr(dobj, f)
-            if not isinstance(x, Field):
-                raise TypeError("This function only accepts Field objects.")
-            if out is not None:
-                if not isinstance(out, Field) or x._domain != out._domain:
-                    raise ValueError("Bad 'out' argument")
-                fu(x.val, out=out.val)
-                return out
-            else:
-                return Field(domain=x._domain, val=fu(x.val))
-        return func2
-    setattr(_current_module, f, func(f))
diff --git a/nifty4/library/krylov_sampling.py b/nifty4/library/krylov_sampling.py
index bed247e065648fba5aa481bada1d5015f7efafb1..d83a8a8f213fb4aeefd4c4a8713e0cc218db85a7 100644
--- a/nifty4/library/krylov_sampling.py
+++ b/nifty4/library/krylov_sampling.py
@@ -54,7 +54,7 @@ def generate_krylov_samples(D_inv, S, j, N_samps, controller):
     """
     # RL FIXME: make consistent with complex numbers
     j = S.draw_sample(from_inverse=True) if j is None else j
-    energy = QuadraticEnergy(j*0., D_inv, j)
+    energy = QuadraticEnergy(j.empty_copy().fill(0.), D_inv, j)
     y = [S.draw_sample() for _ in range(N_samps)]
 
     status = controller.start(energy)
diff --git a/nifty4/library/noise_energy.py b/nifty4/library/noise_energy.py
index cfad61cfcd7a72e2a3330363c82b7c5073d6c9e4..dc15146c08b7226c05a8370f22770709189afa6d 100644
--- a/nifty4/library/noise_energy.py
+++ b/nifty4/library/noise_energy.py
@@ -16,7 +16,8 @@
 # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
-from ..field import Field, exp
+from ..field import Field
+from ..sugar import exp
 from ..minimization.energy import Energy
 from ..operators.diagonal_operator import DiagonalOperator
 import numpy as np
diff --git a/nifty4/library/nonlinear_power_energy.py b/nifty4/library/nonlinear_power_energy.py
index e76defdd6121e8f6f2d28efc63ff91155bbf2936..dff636a7934983cb3c494b38bcec5e5351223770 100644
--- a/nifty4/library/nonlinear_power_energy.py
+++ b/nifty4/library/nonlinear_power_energy.py
@@ -16,7 +16,7 @@
 # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
-from .. import exp
+from ..sugar import exp
 from ..minimization.energy import Energy
 from ..operators.smoothness_operator import SmoothnessOperator
 from ..operators.inversion_enabler import InversionEnabler
diff --git a/nifty4/library/nonlinearities.py b/nifty4/library/nonlinearities.py
index 810054d0edeee7aebc1f7c280ec277755922b1e0..648290c8b2e949928565ae618a77a78c74ea11e2 100644
--- a/nifty4/library/nonlinearities.py
+++ b/nifty4/library/nonlinearities.py
@@ -16,7 +16,7 @@
 # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
-from ..field import Field, exp, tanh
+from ..sugar import full, exp, tanh
 
 
 class Linear(object):
@@ -24,10 +24,10 @@ class Linear(object):
         return x
 
     def derivative(self, x):
-        return Field.ones_like(x)
+        return full(x.domain, 1.)
 
     def hessian(self, x):
-        return Field.zeros_like(x)
+        return full(x.domain, 0.)
 
 
 class Exponential(object):
diff --git a/nifty4/library/poisson_energy.py b/nifty4/library/poisson_energy.py
index 637cd28f81348240e68ea6c6cd32a3338ed5b48d..f3ea413161ade66cd77ba28c2ad887c7d23f13ed 100644
--- a/nifty4/library/poisson_energy.py
+++ b/nifty4/library/poisson_energy.py
@@ -20,7 +20,7 @@ from ..minimization.energy import Energy
 from ..operators.diagonal_operator import DiagonalOperator
 from ..operators.sandwich_operator import SandwichOperator
 from ..operators.inversion_enabler import InversionEnabler
-from ..field import log
+from ..sugar import log
 
 
 class PoissonEnergy(Energy):
@@ -46,7 +46,7 @@ class PoissonEnergy(Energy):
 
         R1 = Instrument*Rho*ht
         self._grad = (phipos + R1.adjoint_times((lam-d)/(lam+eps))).lock()
-        self._curv = Phi_h.inverse + SandwichOperator(R1, W)
+        self._curv = Phi_h.inverse + SandwichOperator.make(R1, W)
 
     def at(self, position):
         return self.__class__(position, self._d, self._Instrument,
diff --git a/nifty4/library/wiener_filter_curvature.py b/nifty4/library/wiener_filter_curvature.py
index 2fbc1bc7ecf81dcc720aac3b303304b12be665e9..94fa93e49a282a44967a0eab51ec21a04cf361dc 100644
--- a/nifty4/library/wiener_filter_curvature.py
+++ b/nifty4/library/wiener_filter_curvature.py
@@ -39,5 +39,5 @@ def WienerFilterCurvature(R, N, S, inverter):
     inverter : Minimizer
         The minimizer to use during numerical inversion
     """
-    op = SandwichOperator(R, N.inverse) + S.inverse
+    op = SandwichOperator.make(R, N.inverse) + S.inverse
     return InversionEnabler(op, inverter, S.inverse)
diff --git a/nifty4/logger.py b/nifty4/logger.py
index df37ae11436b264924423ba263703d0d1c53d9ea..d49dd42ee6a510622691a5db582b56d7189c3d01 100644
--- a/nifty4/logger.py
+++ b/nifty4/logger.py
@@ -16,6 +16,7 @@
 # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
+
 def _logger_init():
     import logging
     from . import dobj
diff --git a/nifty4/multi/__init__.py b/nifty4/multi/__init__.py
index adcaed6a48d94da6aeb52968a14bed98c4431d43..f627ea25774fcf14d2d8e12bcbddbaa4fe21ac0e 100644
--- a/nifty4/multi/__init__.py
+++ b/nifty4/multi/__init__.py
@@ -1,4 +1,5 @@
 from .multi_domain import MultiDomain
 from .multi_field import MultiField
+from .block_diagonal_operator import BlockDiagonalOperator
 
-__all__ = ["MultiDomain", "MultiField"]
+__all__ = ["MultiDomain", "MultiField", "BlockDiagonalOperator"]
diff --git a/nifty4/multi/block_diagonal_operator.py b/nifty4/multi/block_diagonal_operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..35a1dcca09ea107f699ed85cfa9b8ea763719865
--- /dev/null
+++ b/nifty4/multi/block_diagonal_operator.py
@@ -0,0 +1,55 @@
+import numpy as np
+from ..operators.endomorphic_operator import EndomorphicOperator
+from .multi_domain import MultiDomain
+from .multi_field import MultiField
+
+
+class BlockDiagonalOperator(EndomorphicOperator):
+    def __init__(self, operators):
+        """
+        Parameters
+        ----------
+        operators : dict
+            dictionary with operators domain names as keys and
+            LinearOperators as items
+        """
+        super(BlockDiagonalOperator, self).__init__()
+        self._operators = operators
+        self._domain = MultiDomain.make(
+            {key: op.domain for key, op in self._operators.items()})
+        self._cap = self._all_ops
+        for op in self._operators.values():
+            self._cap &= op.capability
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def capability(self):
+        return self._cap
+
+    def apply(self, x, mode):
+        self._check_input(x, mode)
+        return MultiField({key: op.apply(x[key], mode=mode)
+                           for key, op in self._operators.items()})
+
+    def draw_sample(self, from_inverse=False, dtype=np.float64):
+        dtype = MultiField.build_dtype(dtype, self._domain)
+        return MultiField({key: op.draw_sample(from_inverse, dtype[key])
+                           for key, op in self._operators.items()})
+
+    def _combine_chain(self, op):
+        res = {}
+        for key in self._operators.keys():
+            res[key] = self._operators[key]*op._operators[key]
+        return BlockDiagonalOperator(res)
+
+    def _combine_sum(self, op, selfneg, opneg):
+        from ..operators.sum_operator import SumOperator
+        res = {}
+        for key in self._operators.keys():
+            res[key] = SumOperator.make([self._operators[key],
+                                         op._operators[key]],
+                                        [selfneg, opneg])
+        return BlockDiagonalOperator(res)
diff --git a/nifty4/multi/multi_domain.py b/nifty4/multi/multi_domain.py
index 27c5f841debfa91a2f525f0b6bc0889756da80d7..90894bedc4f7bab1a430b20cacd9c240bc0fae28 100644
--- a/nifty4/multi/multi_domain.py
+++ b/nifty4/multi/multi_domain.py
@@ -1,2 +1,72 @@
-class MultiDomain(dict):
-    pass
+import collections
+from ..domain_tuple import DomainTuple
+
+__all = ["MultiDomain"]
+
+
+class frozendict(collections.Mapping):
+    """
+    An immutable wrapper around dictionaries that implements the complete
+    :py:class:`collections.Mapping` interface. It can be used as a drop-in
+    replacement for dictionaries where immutability is desired.
+    """
+
+    dict_cls = dict
+
+    def __init__(self, *args, **kwargs):
+        self._dict = self.dict_cls(*args, **kwargs)
+        self._hash = None
+
+    def __getitem__(self, key):
+        return self._dict[key]
+
+    def __contains__(self, key):
+        return key in self._dict
+
+    def copy(self, **add_or_replace):
+        return self.__class__(self, **add_or_replace)
+
+    def __iter__(self):
+        return iter(self._dict)
+
+    def __len__(self):
+        return len(self._dict)
+
+    def __repr__(self):
+        return '<%s %r>' % (self.__class__.__name__, self._dict)
+
+    def __hash__(self):
+        if self._hash is None:
+            h = 0
+            for key, value in self._dict.items():
+                h ^= hash((key, value))
+            self._hash = h
+        return self._hash
+
+
+class MultiDomain(frozendict):
+    _domainCache = {}
+
+    def __init__(self, domain, _callingfrommake=False):
+        if not _callingfrommake:
+            raise NotImplementedError
+        super(MultiDomain, self).__init__(domain)
+
+    @staticmethod
+    def make(domain):
+        if isinstance(domain, MultiDomain):
+            return domain
+        if not isinstance(domain, dict):
+            raise TypeError("dict expected")
+        tmp = {}
+        for key, value in domain.items():
+            if not isinstance(key, str):
+                raise TypeError("keys must be strings")
+            tmp[key] = DomainTuple.make(value)
+        domain = frozendict(tmp)
+        obj = MultiDomain._domainCache.get(domain)
+        if obj is not None:
+            return obj
+        obj = MultiDomain(domain, _callingfrommake=True)
+        MultiDomain._domainCache[domain] = obj
+        return obj
diff --git a/nifty4/multi/multi_field.py b/nifty4/multi/multi_field.py
index 7bcf821bfaf133df886c0476c26a963da38b034f..7bd3dc569c7f0dcb57a714ce98cab8f1f2d8827c 100644
--- a/nifty4/multi/multi_field.py
+++ b/nifty4/multi/multi_field.py
@@ -44,7 +44,8 @@ class MultiField(object):
 
     @property
     def domain(self):
-        return MultiDomain({key: val.domain for key, val in self._val.items()})
+        return MultiDomain.make(
+            {key: val.domain for key, val in self._val.items()})
 
     @property
     def dtype(self):
@@ -57,6 +58,18 @@ class MultiField(object):
                                                   dtype[key], **kwargs)
                            for key in domain.keys()})
 
+    def fill(self, fill_value):
+        """Fill `self` uniformly with `fill_value`
+
+        Parameters
+        ----------
+        fill_value: float or complex or int
+            The value to fill the field with.
+        """
+        for val in self._val.values():
+            val.fill(fill_value)
+        return self
+
     def _check_domain(self, other):
         if other.domain != self.domain:
             raise ValueError("domains are incompatible.")
@@ -73,9 +86,22 @@ class MultiField(object):
             v.lock()
         return self
 
+    @property
+    def locked(self):
+        return all(v.locked for v in self.values())
+
     def copy(self):
         return MultiField({key: val.copy() for key, val in self.items()})
 
+    def locked_copy(self):
+        if self.locked:
+            return self
+        return MultiField({key: val.locked_copy()
+                          for key, val in self.items()})
+
+    def empty_copy(self):
+        return MultiField({key: val.empty_copy() for key, val in self.items()})
+
     @staticmethod
     def build_dtype(dtype, domain):
         if isinstance(dtype, dict):
@@ -85,22 +111,24 @@ class MultiField(object):
         return {key: dtype for key in domain.keys()}
 
     @staticmethod
-    def zeros(domain, dtype=None):
+    def empty(domain, dtype=None):
         dtype = MultiField.build_dtype(dtype, domain)
-        return MultiField({key: Field.zeros(dom, dtype=dtype[key])
+        return MultiField({key: Field.empty(dom, dtype=dtype[key])
                            for key, dom in domain.items()})
 
     @staticmethod
-    def ones(domain, dtype=None):
-        dtype = MultiField.build_dtype(dtype, domain)
-        return MultiField({key: Field.ones(dom, dtype=dtype[key])
+    def full(domain, val):
+        return MultiField({key: Field.full(dom, val)
                            for key, dom in domain.items()})
 
+    def to_global_data(self):
+        return {key: val.to_global_data() for key, val in self._val.items()}
+
     @staticmethod
-    def empty(domain, dtype=None):
-        dtype = MultiField.build_dtype(dtype, domain)
-        return MultiField({key: Field.empty(dom, dtype=dtype[key])
-                           for key, dom in domain.items()})
+    def from_global_data(domain, arr, sum_up=False):
+        return MultiField({key: Field.from_global_data(domain[key],
+                                                       val, sum_up)
+                           for key, val in arr.items()})
 
     def norm(self):
         """ Computes the L2-norm of the field values.
diff --git a/nifty4/operators/chain_operator.py b/nifty4/operators/chain_operator.py
index 965dbb156fff49aababa205d2dd286645cf014a2..c0c0b8341df3719751494f4dc598717bfa537d07 100644
--- a/nifty4/operators/chain_operator.py
+++ b/nifty4/operators/chain_operator.py
@@ -64,7 +64,7 @@ class ChainOperator(LinearOperator):
                     opsnew[i] = opsnew[i]._scale(fct)
                     fct = 1.
                     break
-        if fct != 1:
+        if fct != 1 or len(opsnew) == 0:
             # have to add the scaling operator at the end
             opsnew.append(ScalingOperator(fct, lastdom))
         ops = opsnew
@@ -78,11 +78,24 @@ class ChainOperator(LinearOperator):
             else:
                 opsnew.append(op)
         ops = opsnew
+        # Step 5: combine BlockDiagonalOperators where possible
+        from ..multi.block_diagonal_operator import BlockDiagonalOperator
+        opsnew = []
+        for op in ops:
+            if (len(opsnew) > 0 and
+                    isinstance(opsnew[-1], BlockDiagonalOperator) and
+                    isinstance(op, BlockDiagonalOperator)):
+                opsnew[-1] = opsnew[-1]._combine_chain(op)
+            else:
+                opsnew.append(op)
+        ops = opsnew
         return ops
 
     @staticmethod
     def make(ops):
         ops = tuple(ops)
+        if len(ops) == 0:
+            raise ValueError("ops is empty")
         ops = ChainOperator.simplify(ops)
         if len(ops) == 1:
             return ops[0]
diff --git a/nifty4/operators/fft_operator.py b/nifty4/operators/fft_operator.py
index 5f4526bd9adedd1f884045ccfebe5bd4f939cd11..781b072f5bdf652fbc410ad7746b8af326ea64a7 100644
--- a/nifty4/operators/fft_operator.py
+++ b/nifty4/operators/fft_operator.py
@@ -61,9 +61,7 @@ class FFTOperator(LinearOperator):
         adom.check_codomain(target)
         target.check_codomain(adom)
 
-        import pyfftw
-        pyfftw.interfaces.cache.enable()
-        pyfftw.interfaces.cache.set_keepalive_time(1000.)
+        utilities.fft_prep()
 
     def apply(self, x, mode):
         self._check_input(x, mode)
@@ -74,7 +72,6 @@ class FFTOperator(LinearOperator):
             return self._apply_cartesian(x, mode)
 
     def _apply_cartesian(self, x, mode):
-        from pyfftw.interfaces.numpy_fft import fftn
         axes = x.domain.axes[self._space]
         tdom = self._target if x.domain == self._domain else self._domain
         oldax = dobj.distaxis(x.val)
@@ -110,7 +107,7 @@ class FFTOperator(LinearOperator):
             tmp = dobj.from_local_data(shp2d, ldat2, distaxis=0)
             tmp = dobj.transpose(tmp)
             ldat2 = dobj.local_data(tmp)
-            ldat2 = fftn(ldat2, axes=(1,))
+            ldat2 = utilities.my_fftn(ldat2, axes=(1,))
             ldat2 = ldat2.real+ldat2.imag
             tmp = dobj.from_local_data(tmp.shape, ldat2, distaxis=0)
             tmp = dobj.transpose(tmp)
diff --git a/nifty4/operators/inversion_enabler.py b/nifty4/operators/inversion_enabler.py
index 450ba5d117209cba8704f2a135e109ac4702fa60..f70fcd7ac6ee4378144b65cc7518b23a5b4d790b 100644
--- a/nifty4/operators/inversion_enabler.py
+++ b/nifty4/operators/inversion_enabler.py
@@ -67,7 +67,7 @@ class InversionEnabler(EndomorphicOperator):
         if self._op.capability & mode:
             return self._op.apply(x, mode)
 
-        x0 = x*0.
+        x0 = x.empty_copy().fill(0.)
         invmode = self._modeTable[self.INVERSE_BIT][self._ilog[mode]]
         invop = self._op._flip_modes(self._ilog[invmode])
         prec = self._approximation
diff --git a/nifty4/operators/linear_operator.py b/nifty4/operators/linear_operator.py
index c7d5f2296c6a45c0e508a3770d7015353573539a..185c5049bb59da93aa70d651e49f883b10c1ef0d 100644
--- a/nifty4/operators/linear_operator.py
+++ b/nifty4/operators/linear_operator.py
@@ -271,10 +271,6 @@ class LinearOperator(NiftyMetaBase()):
             raise ValueError("requested operator mode is not supported")
 
     def _check_input(self, x, mode):
-        # MR FIXME: temporary fix for working with MultiFields
-        #if not isinstance(x, Field):
-        #    raise ValueError("supplied object is not a `Field`.")
-
         self._check_mode(mode)
         if x.domain != self._dom(mode):
             raise ValueError("The operator's and field's domains don't match.")
diff --git a/nifty4/operators/sandwich_operator.py b/nifty4/operators/sandwich_operator.py
index 71e3c863cc2f5fc4f00a4263efab5f47ecbe49e3..6dd77647d96edaeb05c42a833303047daa80939f 100644
--- a/nifty4/operators/sandwich_operator.py
+++ b/nifty4/operators/sandwich_operator.py
@@ -16,31 +16,51 @@
 # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
+import numpy as np
+
+from .linear_operator import LinearOperator
+from .diagonal_operator import DiagonalOperator
 from .endomorphic_operator import EndomorphicOperator
 from .scaling_operator import ScalingOperator
-import numpy as np
 
 
 class SandwichOperator(EndomorphicOperator):
     """Operator which is equivalent to the expression `bun.adjoint*cheese*bun`.
-
-    Parameters
-    ----------
-    bun: LinearOperator
-        the bun part
-    cheese: EndomorphicOperator
-        the cheese part
     """
 
-    def __init__(self, bun, cheese=None):
+    def __init__(self, bun, cheese, op, _callingfrommake=False):
+        if not _callingfrommake:
+            raise NotImplementedError
         super(SandwichOperator, self).__init__()
         self._bun = bun
+        self._cheese = cheese
+        self._op = op
+
+    @staticmethod
+    def make(bun, cheese=None):
+        """Build a SandwichOperator (or something simpler if possible)
+
+        Parameters
+        ----------
+        bun: LinearOperator
+            the bun part
+        cheese: EndomorphicOperator
+            the cheese part
+        """
+        if not isinstance(bun, LinearOperator):
+            raise TypeError("bun must be a linear operator")
+        if cheese is not None and not isinstance(cheese, LinearOperator):
+            raise TypeError("cheese must be a linear operator")
         if cheese is None:
-            self._cheese = ScalingOperator(1., bun.target)
-            self._op = bun.adjoint*bun
+            cheese = ScalingOperator(1., bun.target)
+            op = bun.adjoint*bun
         else:
-            self._cheese = cheese
-            self._op = bun.adjoint*cheese*bun
+            op = bun.adjoint*cheese*bun
+
+        # if our sandwich is diagonal, we can return immediately
+        if isinstance(op, (ScalingOperator, DiagonalOperator)):
+            return op
+        return SandwichOperator(bun, cheese, op, _callingfrommake=True)
 
     @property
     def domain(self):
@@ -54,8 +74,11 @@ class SandwichOperator(EndomorphicOperator):
         return self._op.apply(x, mode)
 
     def draw_sample(self, from_inverse=False, dtype=np.float64):
+        # Inverse samples from general sandwiches is not possible
         if from_inverse:
             raise NotImplementedError(
                 "cannot draw from inverse of this operator")
+
+        # Samples from general sandwiches
         return self._bun.adjoint_times(
             self._cheese.draw_sample(from_inverse, dtype))
diff --git a/nifty4/operators/scaling_operator.py b/nifty4/operators/scaling_operator.py
index b9a26bfaf32c569e25f88a0e6fb1e41d8590808d..b0aacb7d75c5c872cd3c40ec1ed692479ea47fbf 100644
--- a/nifty4/operators/scaling_operator.py
+++ b/nifty4/operators/scaling_operator.py
@@ -20,8 +20,8 @@ from __future__ import division
 import numpy as np
 from ..field import Field
 from ..multi.multi_field import MultiField
-from ..domain_tuple import DomainTuple
 from .endomorphic_operator import EndomorphicOperator
+from ..domain_tuple import DomainTuple
 
 
 class ScalingOperator(EndomorphicOperator):
@@ -49,12 +49,13 @@ class ScalingOperator(EndomorphicOperator):
     """
 
     def __init__(self, factor, domain):
+        from ..sugar import makeDomain
         super(ScalingOperator, self).__init__()
 
         if not np.isscalar(factor):
             raise TypeError("Scalar required")
         self._factor = factor
-        self._domain = DomainTuple.make(domain)
+        self._domain = makeDomain(domain)
 
     def apply(self, x, mode):
         self._check_input(x, mode)
@@ -62,7 +63,7 @@ class ScalingOperator(EndomorphicOperator):
         if self._factor == 1.:
             return x.copy()
         if self._factor == 0.:
-            return x.zeros_like(x)
+            return x.empty_copy().fill(0.)
 
         if mode == self.TIMES:
             return x*self._factor
diff --git a/nifty4/operators/sum_operator.py b/nifty4/operators/sum_operator.py
index 2680bc4384a55310055d8ce37b9fe72e824b533d..d32d7bfd73cac73d516551b3e171769dea1ef297 100644
--- a/nifty4/operators/sum_operator.py
+++ b/nifty4/operators/sum_operator.py
@@ -102,12 +102,36 @@ class SumOperator(LinearOperator):
                     negnew.append(neg[i])
         ops = opsnew
         neg = negnew
+        # Step 5: combine BlockDiagonalOperators where possible
+        from ..multi.block_diagonal_operator import BlockDiagonalOperator
+        processed = [False] * len(ops)
+        opsnew = []
+        negnew = []
+        for i in range(len(ops)):
+            if not processed[i]:
+                if isinstance(ops[i], BlockDiagonalOperator):
+                    op = ops[i]
+                    opneg = neg[i]
+                    for j in range(i+1, len(ops)):
+                        if isinstance(ops[j], BlockDiagonalOperator):
+                            op = op._combine_sum(ops[j], opneg, neg[j])
+                            opneg = False
+                            processed[j] = True
+                    opsnew.append(op)
+                    negnew.append(opneg)
+                else:
+                    opsnew.append(ops[i])
+                    negnew.append(neg[i])
+        ops = opsnew
+        neg = negnew
         return ops, neg
 
     @staticmethod
     def make(ops, neg):
         ops = tuple(ops)
         neg = tuple(neg)
+        if len(ops) == 0:
+            raise ValueError("ops is empty")
         if len(ops) != len(neg):
             raise ValueError("length mismatch between ops and neg")
         ops, neg = SumOperator.simplify(ops, neg)
diff --git a/nifty4/sugar.py b/nifty4/sugar.py
index 3e829e3edc32208e9945f4a500a8a51d2d308917..1360901e59cca16de513e6730fe7d6415cae1ac1 100644
--- a/nifty4/sugar.py
+++ b/nifty4/sugar.py
@@ -16,19 +16,23 @@
 # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
 # and financially supported by the Studienstiftung des deutschen Volkes.
 
+import sys
 import numpy as np
 from .domains.power_space import PowerSpace
 from .field import Field
+from .multi.multi_field import MultiField
+from .multi.multi_domain import MultiDomain
 from .operators.diagonal_operator import DiagonalOperator
 from .operators.power_distributor import PowerDistributor
 from .domain_tuple import DomainTuple
 from . import dobj, utilities
 from .logger import logger
 
-__all__ = ['PS_field',
-           'power_analyze',
-           'create_power_operator',
-           'create_harmonic_smoothing_operator']
+__all__ = ['PS_field', 'power_analyze', 'create_power_operator',
+           'create_harmonic_smoothing_operator', 'from_random',
+           'full', 'empty', 'from_global_data', 'from_local_data',
+           'makeDomain', 'sqrt', 'exp', 'log', 'tanh', 'conjugate',
+           'get_signal_variance']
 
 
 def PS_field(pspace, func):
@@ -38,6 +42,34 @@ def PS_field(pspace, func):
     return Field(pspace, val=data)
 
 
+def get_signal_variance(spec, space):
+    """
+    Computes how much a field with a given power spectrum will vary in space
+
+    This is a small helper function that computes how the expected variance
+    of a harmonically transformed sample of this power spectrum.
+
+    Parameters
+    ---------
+    spec: method
+        a method that takes one k-value and returns the power spectrum at that
+        location
+    space: PowerSpace or any harmonic Domain
+        If this function is given a harmonic domain, it creates the naturally
+        binned PowerSpace to that domain.
+        The field, for which the signal variance is then computed, is assumed
+        to have this PowerSpace as naturally binned PowerSpace
+    """
+    if space.harmonic:
+        space = PowerSpace(space)
+    if not isinstance(space, PowerSpace):
+        raise ValueError(
+            "space must be either a harmonic space or Power space.")
+    field = PS_field(space, spec)
+    dist = PowerDistributor(space.harmonic_partner, space)
+    k_field = dist(field)
+    return k_field.weight(2).sum()
+
 def _single_power_analyze(field, idx, binbounds):
     power_domain = PowerSpace(field.domain[idx], binbounds)
     pd = PowerDistributor(field.domain, power_domain, idx)
@@ -161,3 +193,70 @@ def create_harmonic_smoothing_operator(domain, space, sigma):
     kfunc = domain[space].get_fft_smoothing_kernel_function(sigma)
     return DiagonalOperator(kfunc(domain[space].get_k_length_array()), domain,
                             space)
+
+
+def full(domain, val):
+    if isinstance(domain, (dict, MultiDomain)):
+        return MultiField.full(domain, val)
+    return Field.full(domain, val)
+
+
+def empty(domain, dtype):
+    if isinstance(domain, (dict, MultiDomain)):
+        return MultiField.empty(domain, dtype)
+    return Field.empty(domain, dtype)
+
+
+def from_random(random_type, domain, dtype=np.float64, **kwargs):
+    if isinstance(domain, (dict, MultiDomain)):
+        return MultiField.from_random(random_type, domain, dtype, **kwargs)
+    return Field.from_random(random_type, domain, dtype, **kwargs)
+
+
+def from_global_data(domain, arr, sum_up=False):
+    if isinstance(domain, (dict, MultiDomain)):
+        return MultiField.from_global_data(domain, arr, sum_up)
+    return Field.from_global_data(domain, arr, sum_up)
+
+
+def from_local_data(domain, arr):
+    if isinstance(domain, (dict, MultiDomain)):
+        return MultiField.from_local_data(domain, arr)
+    return Field.from_local_data(domain, arr)
+
+
+def makeDomain(domain):
+    if isinstance(domain, (MultiDomain, dict)):
+        return MultiDomain.make(domain)
+    return DomainTuple.make(domain)
+
+
+# Arithmetic functions working on Fields
+
+_current_module = sys.modules[__name__]
+
+for f in ["sqrt", "exp", "log", "tanh", "conjugate"]:
+    def func(f):
+        def func2(x, out=None):
+            if isinstance(x, MultiField):
+                if out is not None:
+                    if (not isinstance(out, MultiField) or
+                            x._domain != out._domain):
+                        raise ValueError("Bad 'out' argument")
+                    for key, value in x.items():
+                        func2(value, out=out[key])
+                    return out
+                return MultiField({key: func2(val) for key, val in x.items()})
+            elif isinstance(x, Field):
+                fu = getattr(dobj, f)
+                if out is not None:
+                    if not isinstance(out, Field) or x._domain != out._domain:
+                        raise ValueError("Bad 'out' argument")
+                    fu(x.val, out=out.val)
+                    return out
+                else:
+                    return Field(domain=x._domain, val=fu(x.val))
+            else:
+                return getattr(np, f)(x, out)
+        return func2
+    setattr(_current_module, f, func(f))
diff --git a/nifty4/utilities.py b/nifty4/utilities.py
index 3bbbd69bca386fd546b9da61bdc3288bf6352d70..b52975bed5d56fadf724d058087c4524fbcf0a36 100644
--- a/nifty4/utilities.py
+++ b/nifty4/utilities.py
@@ -23,7 +23,8 @@ import abc
 from future.utils import with_metaclass
 
 __all__ = ["get_slice_list", "safe_cast", "parse_spaces", "infer_space",
-           "memo", "NiftyMetaBase", "hartley", "my_fftn_r2c"]
+           "memo", "NiftyMetaBase", "fft_prep", "hartley", "my_fftn_r2c",
+           "my_fftn"]
 
 
 def get_slice_list(shape, axes):
@@ -167,6 +168,17 @@ def nthreads():
     return nthreads._val
 nthreads._val = None
 
+# Optional extra arguments for the FFT calls
+# _fft_extra_args = {}
+# if exact reproducibility is needed, use this:
+_fft_extra_args = dict(planner_effort='FFTW_ESTIMATE')
+
+
+def fft_prep():
+    import pyfftw
+    pyfftw.interfaces.cache.enable()
+    pyfftw.interfaces.cache.set_keepalive_time(1000.)
+
 
 def hartley(a, axes=None):
     # Check if the axes provided are valid given the shape
@@ -177,7 +189,7 @@ def hartley(a, axes=None):
         raise TypeError("Hartley transform requires real-valued arrays.")
 
     from pyfftw.interfaces.numpy_fft import rfftn
-    tmp = rfftn(a, axes=axes, threads=nthreads())
+    tmp = rfftn(a, axes=axes, threads=nthreads(), **_fft_extra_args)
 
     def _fill_array(tmp, res, axes):
         if axes is None:
@@ -219,7 +231,7 @@ def my_fftn_r2c(a, axes=None):
         raise TypeError("Transform requires real-valued input arrays.")
 
     from pyfftw.interfaces.numpy_fft import rfftn
-    tmp = rfftn(a, axes=axes, threads=nthreads())
+    tmp = rfftn(a, axes=axes, threads=nthreads(), **_fft_extra_args)
 
     def _fill_complex_array(tmp, res, axes):
         if axes is None:
@@ -250,3 +262,8 @@ def my_fftn_r2c(a, axes=None):
         return res
 
     return _fill_complex_array(tmp, np.empty_like(a, dtype=tmp.dtype), axes)
+
+
+def my_fftn(a, axes=None):
+    from pyfftw.interfaces.numpy_fft import fftn
+    return fftn(a, axes=axes, **_fft_extra_args)
diff --git a/test/test_energies/test_map.py b/test/test_energies/test_map.py
index 311feb1884b5dc2e0e0937bd0c170de27d7f837b..fb298f908539c8e08ac8183f8f3e856026821236 100644
--- a/test/test_energies/test_map.py
+++ b/test/test_energies/test_map.py
@@ -29,7 +29,8 @@ def _flat_PS(k):
 
 
 class Energy_Tests(unittest.TestCase):
-    @expand(product([ift.RGSpace(64, distances=.789),
+    @expand(product([ift.GLSpace(15),
+                     ift.RGSpace(64, distances=.789),
                      ift.RGSpace([32, 32], distances=.789)],
                     [4, 78, 23]))
     def testLinearMap(self, space, seed):
@@ -63,7 +64,8 @@ class Energy_Tests(unittest.TestCase):
         ift.extra.check_value_gradient_curvature_consistency(
             energy, tol=1e-4, ntries=10)
 
-    @expand(product([ift.RGSpace(64, distances=.789),
+    @expand(product([ift.GLSpace(15),
+                     ift.RGSpace(64, distances=.789),
                      ift.RGSpace([32, 32], distances=.789)],
                     [ift.library.Tanh, ift.library.Exponential,
                      ift.library.Linear],
diff --git a/test/test_energies/test_power.py b/test/test_energies/test_power.py
index 1062f65fc851bb70cdbc830e453089474538513e..a8c01f04bd2c88e4a30d836cb0d83d222d7c6284 100644
--- a/test/test_energies/test_power.py
+++ b/test/test_energies/test_power.py
@@ -50,7 +50,7 @@ class Energy_Tests(unittest.TestCase):
         n = ift.Field.from_random(domain=space, random_type='normal')
         s = ht(xi * A)
         R = ift.ScalingOperator(10., space)
-        diag = ift.Field.ones(space)
+        diag = ift.full(space, 1.)
         N = ift.DiagonalOperator(diag)
         d = R(f(s)) + n
 
diff --git a/test/test_field.py b/test/test_field.py
index 5515a1d41ec5f67ebce8f3d558767c31b22b27f5..cab8f64c51529d6a682f749632571e6293b4b443 100644
--- a/test/test_field.py
+++ b/test/test_field.py
@@ -18,7 +18,7 @@
 
 import unittest
 import numpy as np
-from numpy.testing import assert_equal, assert_allclose
+from numpy.testing import assert_equal, assert_allclose, assert_raises
 from itertools import product
 import nifty4 as ift
 from test.common import expand
@@ -124,24 +124,140 @@ class Test_Functionality(unittest.TestCase):
         res = m.vdot(m, spaces=1)
         assert_allclose(res.local_data, 37.5)
 
+    def test_lock(self):
+        s1 = ift.RGSpace((10,))
+        f1 = ift.Field(s1, 27)
+        assert_equal(f1.locked, False)
+        f1.lock()
+        assert_equal(f1.locked, True)
+        with assert_raises(ValueError):
+            f1 += f1
+        assert_equal(f1.locked_copy() is f1, True)
+
+    def test_fill(self):
+        s1 = ift.RGSpace((10,))
+        f1 = ift.Field(s1, 27)
+        assert_equal(f1.fill(10).local_data, 10)
+
+    def test_dataconv(self):
+        s1 = ift.RGSpace((10,))
+        ld = np.arange(ift.dobj.local_shape(s1.shape)[0])
+        gd = np.arange(s1.shape[0])
+        assert_equal(ld, ift.from_local_data(s1, ld).local_data)
+        assert_equal(gd, ift.from_global_data(s1, gd).to_global_data())
+
+    def test_cast_domain(self):
+        s1 = ift.RGSpace((10,))
+        s2 = ift.RGSpace((10,), distances=20.)
+        d = np.arange(s1.shape[0])
+        d2 = ift.from_global_data(s1, d).cast_domain(s2).to_global_data()
+        assert_equal(d, d2)
+
+    def test_empty_domain(self):
+        f = ift.Field((), 5)
+        assert_equal(f.to_global_data(), 5)
+        f = ift.Field(None, 5)
+        assert_equal(f.to_global_data(), 5)
+        assert_equal(f.empty_copy().domain, f.domain)
+        assert_equal(f.empty_copy().dtype, f.dtype)
+        assert_equal(f.copy().domain, f.domain)
+        assert_equal(f.copy().dtype, f.dtype)
+        assert_equal(f.copy().local_data, f.local_data)
+        assert_equal(f.copy() is f, False)
+
+    def test_trivialities(self):
+        s1 = ift.RGSpace((10,))
+        f1 = ift.Field(s1, 27)
+        assert_equal(f1.local_data, f1.real.local_data)
+        f1 = ift.Field(s1, 27.+3j)
+        assert_equal(f1.real.local_data, 27.)
+        assert_equal(f1.imag.local_data, 3.)
+        assert_equal(f1.local_data, +f1.local_data)
+        assert_equal(f1.sum(), f1.sum(0))
+        f1 = ift.from_global_data(s1, np.arange(10))
+        assert_equal(f1.min(), 0)
+        assert_equal(f1.max(), 9)
+        assert_equal(f1.prod(), 0)
+
+    def test_weight(self):
+        s1 = ift.RGSpace((10,))
+        f = ift.Field(s1, 10.)
+        f2 = f.copy()
+        f.weight(1, out=f2)
+        assert_equal(f.weight(1).local_data, f2.local_data)
+        assert_equal(f.total_volume(), 1)
+        assert_equal(f.total_volume(0), 1)
+        assert_equal(f.total_volume((0,)), 1)
+        assert_equal(f.scalar_weight(), 0.1)
+        assert_equal(f.scalar_weight(0), 0.1)
+        assert_equal(f.scalar_weight((0,)), 0.1)
+        s1 = ift.GLSpace(10)
+        f = ift.Field(s1, 10.)
+        assert_equal(f.scalar_weight(), None)
+        assert_equal(f.scalar_weight(0), None)
+        assert_equal(f.scalar_weight((0,)), None)
+
+    @expand(product([ift.RGSpace(10), ift.GLSpace(10)],
+                    [np.float64, np.complex128]))
+    def test_reduction(self, dom, dt):
+        s1 = ift.Field(dom, 1., dtype=dt)
+        assert_allclose(s1.mean(), 1.)
+        assert_allclose(s1.mean(0), 1.)
+        assert_allclose(s1.var(), 0., atol=1e-14)
+        assert_allclose(s1.var(0), 0., atol=1e-14)
+        assert_allclose(s1.std(), 0., atol=1e-14)
+        assert_allclose(s1.std(0), 0., atol=1e-14)
+
+    def test_err(self):
+        s1 = ift.RGSpace((10,))
+        s2 = ift.RGSpace((11,))
+        f1 = ift.Field(s1, 27)
+        with assert_raises(ValueError):
+            f2 = ift.Field(s2, f1)
+        with assert_raises(ValueError):
+            f2 = ift.Field(s2, f1.val)
+        with assert_raises(TypeError):
+            f2 = ift.Field(s2, "xyz")
+        with assert_raises(TypeError):
+            if f1:
+                pass
+        with assert_raises(TypeError):
+            f1.full((2, 4, 6))
+        with assert_raises(TypeError):
+            f2 = ift.Field(None, None)
+        with assert_raises(ValueError):
+            f2 = ift.Field(s1, None)
+        with assert_raises(ValueError):
+            f1.imag
+        with assert_raises(TypeError):
+            f1.vdot(42)
+        with assert_raises(ValueError):
+            f1.vdot(ift.Field(s2, 1.))
+        with assert_raises(TypeError):
+            f1.copy_content_from(1)
+        with assert_raises(ValueError):
+            f1.copy_content_from(ift.Field(s2, 1.))
+        with assert_raises(TypeError):
+            ift.full(s1, [2, 3])
+
     def test_stdfunc(self):
         s = ift.RGSpace((200,))
         f = ift.Field(s, 27)
         assert_equal(f.local_data, 27)
         assert_equal(f.shape, (200,))
         assert_equal(f.dtype, np.int)
-        fx = ift.Field.empty_like(f)
+        fx = ift.empty(f.domain, f.dtype)
         assert_equal(f.dtype, fx.dtype)
         assert_equal(f.shape, fx.shape)
-        fx = ift.Field.zeros_like(f)
+        fx = ift.full(f.domain, 0)
         assert_equal(f.dtype, fx.dtype)
         assert_equal(f.shape, fx.shape)
         assert_equal(fx.local_data, 0)
-        fx = ift.Field.ones_like(f)
+        fx = ift.full(f.domain, 1)
         assert_equal(f.dtype, fx.dtype)
         assert_equal(f.shape, fx.shape)
         assert_equal(fx.local_data, 1)
-        fx = ift.Field.full_like(f, 67.)
+        fx = ift.full(f.domain, 67.)
         assert_equal(f.shape, fx.shape)
         assert_equal(fx.local_data, 67.)
         f = ift.Field.from_random("normal", s)
diff --git a/test/test_minimization/test_minimizers.py b/test/test_minimization/test_minimizers.py
index 267898ea3a5d412080e1821b04ea1401f08e20c8..9e8b1045134496b54f95a32cc8dbe263be02848e 100644
--- a/test/test_minimization/test_minimizers.py
+++ b/test/test_minimization/test_minimizers.py
@@ -30,7 +30,7 @@ spaces = [ift.RGSpace([1024], distances=0.123), ift.HPSpace(32)]
 
 minimizers = ['ift.VL_BFGS(IC)',
               'ift.NonlinearCG(IC, "Polak-Ribiere")',
-              #'ift.NonlinearCG(IC, "Hestenes-Stiefel"),
+              # 'ift.NonlinearCG(IC, "Hestenes-Stiefel"),
               'ift.NonlinearCG(IC, "Fletcher-Reeves")',
               'ift.NonlinearCG(IC, "5.49")',
               'ift.NewtonCG(xtol=1e-5, maxiter=1000)',
@@ -53,7 +53,7 @@ class Test_Minimizers(unittest.TestCase):
         covariance_diagonal = ift.Field.from_random(
                                   'uniform', domain=space) + 0.5
         covariance = ift.DiagonalOperator(covariance_diagonal)
-        required_result = ift.Field.ones(space, dtype=np.float64)
+        required_result = ift.full(space, 1.)
 
         try:
             minimizer = eval(minimizer)
diff --git a/test/test_multi_field.py b/test/test_multi_field.py
new file mode 100644
index 0000000000000000000000000000000000000000..42b2ac3f4f9eedb133fe252bcdabe95626196be6
--- /dev/null
+++ b/test/test_multi_field.py
@@ -0,0 +1,67 @@
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Copyright(C) 2013-2018 Max-Planck-Society
+#
+# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
+# and financially supported by the Studienstiftung des deutschen Volkes.
+
+import unittest
+import numpy as np
+from numpy.testing import assert_equal, assert_allclose, assert_raises
+from itertools import product
+import nifty4 as ift
+from test.common import expand
+
+dom = ift.makeDomain({"d1": ift.RGSpace(10)})
+
+class Test_Functionality(unittest.TestCase):
+    def test_vdot(self):
+        f1 = ift.from_random("normal", domain=dom, dtype=np.complex128)
+        f2 = ift.from_random("normal", domain=dom, dtype=np.complex128)
+        assert_allclose(f1.vdot(f2), np.conj(f2.vdot(f1)))
+
+    def test_lock(self):
+        f1 = ift.full(dom, 27)
+        assert_equal(f1.locked, False)
+        f1.lock()
+        assert_equal(f1.locked, True)
+        with assert_raises(ValueError):
+            f1 += f1
+        assert_equal(f1.locked_copy() is f1, True)
+
+    def test_fill(self):
+        f1 = ift.full(dom, 27)
+        f1.fill(10)
+        for val in f1.values():
+            assert_equal((val == 10).all(), True)
+
+    def test_dataconv(self):
+        f1 = ift.full(dom, 27)
+        f2 = ift.from_global_data(dom, f1.to_global_data())
+        for key, val in f1.items():
+            assert_equal(val.local_data, f2[key].local_data)
+
+    def test_blockdiagonal(self):
+        op = ift.BlockDiagonalOperator({"d1": ift.ScalingOperator(20., dom["d1"])})
+        op2 = op*op
+        ift.extra.consistency_check(op2)
+        assert_equal(type(op2), ift.BlockDiagonalOperator)
+        f1 = op2(ift.full(dom, 1))
+        for val in f1.values():
+            assert_equal((val == 400).all(), True)
+        op2 = op+op
+        assert_equal(type(op2), ift.BlockDiagonalOperator)
+        f1 = op2(ift.full(dom, 1))
+        for val in f1.values():
+            assert_equal((val == 40).all(), True)