diff --git a/nifty/__init__.py b/nifty/__init__.py
index 4e214e0a23da85e2b768174a6925434a291e7d82..b17e137bf0f533c2e3d64018c3e6618676e33749 100644
--- a/nifty/__init__.py
+++ b/nifty/__init__.py
@@ -48,8 +48,9 @@ from random import Random
 from nifty_simple_math import *
 from nifty_utilities import *
 
-from field_types import FieldType,\
-                        FieldArray
+from field_types import *
+
+from minimization import *
 
 from spaces import *
 
diff --git a/nifty/minimization/__init__.py b/nifty/minimization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1f47c8a7538d9207e562e8c5990698f74433e1d
--- /dev/null
+++ b/nifty/minimization/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+from conjugate_gradient import ConjugateGradient
diff --git a/nifty/minimization/conjugate_gradient.py b/nifty/minimization/conjugate_gradient.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee1f2f58cd62ec9c302712c5885e9d51b7c086b7
--- /dev/null
+++ b/nifty/minimization/conjugate_gradient.py
@@ -0,0 +1,319 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+from nifty.config import notification, about
+
+class ConjugateGradient(object):
+    """
+        ..      _______       ____ __
+        ..    /  _____/     /   _   /
+        ..   /  /____  __  /  /_/  / __
+        ..   \______//__/  \____  //__/  class
+        ..                /______/
+
+        NIFTY tool class for conjugate gradient
+
+        This tool minimizes :math:`A x = b` with respect to `x` given `A` and
+        `b` using a conjugate gradient; i.e., a step-by-step minimization
+        relying on conjugated gradient directions. Further, `A` is assumed to
+        be a positive definite and self-adjoint operator. The use of a
+        preconditioner `W` that is roughly the inverse of `A` is optional.
+        For details on the methodology refer to [#]_, for details on usage and
+        output, see the notes below.
+
+        Parameters
+        ----------
+        A : {operator, function}
+            Operator `A` applicable to a field.
+        b : field
+            Resulting field of the operation `A(x)`.
+        W : {operator, function}, *optional*
+            Operator `W` that is a preconditioner on `A` and is applicable to a
+            field (default: None).
+        spam : function, *optional*
+            Callback function which is given the current `x` and iteration
+            counter each iteration (default: None).
+        reset : integer, *optional*
+            Number of iterations after which to restart; i.e., forget previous
+            conjugated directions (default: sqrt(b.dim)).
+        note : bool, *optional*
+            Indicates whether notes are printed or not (default: False).
+
+        See Also
+        --------
+        scipy.sparse.linalg.cg
+
+        Notes
+        -----
+        After initialization by `__init__`, the minimizer is started by calling
+        it using `__call__`, which takes additional parameters. Notifications,
+        if enabled, will state the iteration number, current step widths
+        `alpha` and `beta`, the current relative residual `delta` that is
+        compared to the tolerance, and the convergence level if changed.
+        The minimizer will exit in three states: DEAD if alpha becomes
+        infinite, QUIT if the maximum number of iterations is reached, or DONE
+        if convergence is achieved. Returned will be the latest `x` and the
+        latest convergence level, which can evaluate ``True`` for the exit
+        states QUIT and DONE.
+
+        References
+        ----------
+        .. [#] J. R. Shewchuk, 1994, `"An Introduction to the Conjugate
+            Gradient Method Without the Agonizing Pain"
+            <http://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf>`_
+
+        Examples
+        --------
+        >>> b = field(point_space(2), val=[1, 9])
+        >>> A = diagonal_operator(b.domain, diag=[4, 3])
+        >>> x,convergence = conjugate_gradient(A, b, note=True)(tol=1E-4, clevel=3)
+        iteration : 00000001   alpha = 3.3E-01   beta = 1.3E-03   delta = 3.6E-02
+        iteration : 00000002   alpha = 2.5E-01   beta = 7.6E-04   delta = 1.0E-03
+        iteration : 00000003   alpha = 3.3E-01   beta = 2.5E-04   delta = 1.6E-05   convergence level : 1
+        iteration : 00000004   alpha = 2.5E-01   beta = 1.8E-06   delta = 2.1E-08   convergence level : 2
+        iteration : 00000005   alpha = 2.5E-01   beta = 2.2E-03   delta = 1.0E-09   convergence level : 3
+        ... done.
+        >>> bool(convergence)
+        True
+        >>> x.val # yields 1/4 and 9/3
+        array([ 0.25,  3.  ])
+
+        Attributes
+        ----------
+        A : {operator, function}
+            Operator `A` applicable to a field.
+        x : field
+            Current field.
+        b : field
+            Resulting field of the operation `A(x)`.
+        W : {operator, function}
+            Operator `W` that is a preconditioner on `A` and is applicable to a
+            field; can be ``None``.
+        spam : function
+            Callback function which is given the current `x` and iteration
+            counter each iteration; can be ``None``.
+        reset : integer
+            Number of iterations after which to restart; i.e., forget previous
+            conjugated directions (default: sqrt(b.dim)).
+        note : notification
+            Notification instance.
+
+    """
+    def __init__(self):
+        pass
+
+    def __call__(self, A, b, x0=None, W=None, spam=None, reset=None,
+                 note=False, **kwargs):
+        """
+            Runs the conjugate gradient minimization.
+
+            Parameters
+            ----------
+            x0 : field, *optional*
+                Starting guess for the minimization.
+            tol : scalar, *optional*
+                Tolerance specifying convergence; measured by current relative
+                residual (default: 1E-4).
+            clevel : integer, *optional*
+                Number of times the tolerance should be undershot before
+                exiting (default: 1).
+            limii : integer, *optional*
+                Maximum number of iterations performed (default: 10 * b.dim).
+
+            Returns
+            -------
+            x : field
+                Latest `x` of the minimization.
+            convergence : integer
+                Latest convergence level indicating whether the minimization
+                has converged or not.
+
+        """
+        if hasattr(A, "__call__"):
+            self.A = A
+        else:
+            raise AttributeError(about._errors.cstring(
+                "ERROR: A must be callable!"))
+
+        self.b = b
+
+        if W is None or hasattr(W, "__call__"):
+            self.W = W
+        else:
+            raise AttributeError(about._errors.cstring(
+                "ERROR: W must be None or callable!"))
+
+        self.spam = spam
+
+        if reset is None:
+            self.reset = max(2, int(np.sqrt(b.domain.dim)))
+        else:
+            self.reset = max(2, int(reset))
+
+        self.note = notification(default=bool(note))
+
+        self.x = self.b.copy_empty()
+        self.x.set_val(new_val=0)
+        self.x.set_val(new_val=x0)
+
+        if self.W is None:
+            return self._calc_without(**kwargs)
+        else:
+            return self._calc_with(**kwargs)
+
+    def _calc_without(self, tol=1E-4, clevel=1, limii=None):
+        clevel = int(clevel)
+        if limii is None:
+            limii = 10*self.b.domain.dim
+        else:
+            limii = int(limii)
+
+        r = self.b-self.A(self.x)
+        print ('r', r.val)
+        d = self.b.copy_empty()
+        d.set_val(new_val = r.get_val())
+        gamma = r.dot(d)
+        if gamma==0:
+            return self.x, clevel+1
+        delta_ = np.absolute(gamma)**(-0.5)
+
+
+        convergence = 0
+        ii = 1
+        while(True):
+
+            # print ('gamma', gamma)
+            q = self.A(d)
+            # print ('q', q.val)
+            alpha = gamma/d.dot(q) ## positive definite
+            if np.isfinite(alpha) == False:
+                self.note.cprint(
+                    "\niteration : %08u   alpha = NAN\n... dead."%ii)
+                return self.x, 0
+            self.x += d * alpha
+            # print ('x', self.x.val)
+            if np.signbit(np.real(alpha)) == True:
+                about.warnings.cprint(
+                    "WARNING: positive definiteness of A violated.")
+                r = self.b-self.A(self.x)
+            elif (ii%self.reset) == 0:
+                r = self.b-self.A(self.x)
+            else:
+                r -= q * alpha
+            # print ('r', r.val)
+            gamma_ = gamma
+            gamma = r.dot(r)
+            # print ('gamma', gamma)
+            beta = max(0, gamma/gamma_) ## positive definite
+            # print ('d*beta', beta, (d*beta).val)
+            d = r + d*beta
+            # print ('d', d.val)
+            delta = delta_*np.absolute(gamma)**0.5
+            self.note.cflush(
+        "\niteration : %08u   alpha = %3.1E   beta = %3.1E   delta = %3.1E"\
+        %(ii,np.real(alpha),np.real(beta),np.real(delta)))
+            if gamma == 0:
+                convergence = clevel+1
+                self.note.cprint("   convergence level : INF\n... done.")
+                break
+            elif np.absolute(delta)<tol:
+                convergence += 1
+                self.note.cflush("   convergence level : %u"%convergence)
+                if(convergence==clevel):
+                    self.note.cprint("\n... done.")
+                    break
+            else:
+                convergence = max(0, convergence-1)
+            if ii==limii:
+                self.note.cprint("\n... quit.")
+                break
+
+            if (self.spam is not None):
+                self.spam(self.x, ii)
+
+            ii += 1
+
+        if (self.spam is not None):
+            self.spam(self.x,ii)
+
+        return self.x, convergence
+
+    ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+    def _calc_with(self, tol=1E-4, clevel=1, limii=None): ## > runs cg with preconditioner
+
+        clevel = int(clevel)
+        if(limii is None):
+            limii = 10*self.b.domain.dim
+        else:
+            limii = int(limii)
+        r = self.b-self.A(self.x)
+
+        d = self.W(r)
+        gamma = r.dot(d)
+        if gamma==0:
+            return self.x, clevel+1
+        delta_ = np.absolute(gamma)**(-0.5)
+
+        convergence = 0
+        ii = 1
+        while(True):
+            q = self.A(d)
+            alpha = gamma/d.dot(q) ## positive definite
+            if np.isfinite(alpha) == False:
+                self.note.cprint(
+                    "\niteration : %08u   alpha = NAN\n... dead."%ii)
+                return self.x, 0
+            self.x += d * alpha ## update
+            if np.signbit(np.real(alpha)) == True:
+                about.warnings.cprint(
+                "WARNING: positive definiteness of A violated.")
+                r = self.b-self.A(self.x)
+            elif (ii%self.reset) == 0:
+                r = self.b-self.A(self.x)
+            else:
+                r -= q * alpha
+            s = self.W(r)
+            gamma_ = gamma
+            gamma = r.dot(s)
+            if np.signbit(np.real(gamma)) == True:
+                about.warnings.cprint(
+                "WARNING: positive definiteness of W violated.")
+            beta = max(0, gamma/gamma_) ## positive definite
+            d = s + d*beta ## conjugated gradient
+
+            delta = delta_*np.absolute(gamma)**0.5
+            self.note.cflush(
+        "\niteration : %08u   alpha = %3.1E   beta = %3.1E   delta = %3.1E"\
+        %(ii,np.real(alpha),np.real(beta),np.real(delta)))
+            if gamma==0:
+                convergence = clevel+1
+                self.note.cprint("   convergence level : INF\n... done.")
+                break
+            elif np.absolute(delta)<tol:
+                convergence += 1
+                self.note.cflush("   convergence level : %u"%convergence)
+                if convergence==clevel:
+                    self.note.cprint("\n... done.")
+                    break
+            else:
+                convergence = max(0, convergence-1)
+            if ii==limii:
+                self.note.cprint("\n... quit.")
+                break
+
+            if (self.spam is not None):
+                self.spam(self.x,ii)
+
+            ii += 1
+
+        if (self.spam is not None):
+            self.spam(self.x,ii)
+        return self.x, convergence
+
+    ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+    def __repr__(self):
+        return "<nifty_tools.conjugate_gradient>"
+
+##=============================================================================
+
diff --git a/nifty/operators/__init__.py b/nifty/operators/__init__.py
index 72effb71f5f5def1edc9b0ddd42cd39b0584b95b..577aeca4e047d74435360728bba1690cb3dd5fd9 100644
--- a/nifty/operators/__init__.py
+++ b/nifty/operators/__init__.py
@@ -29,6 +29,8 @@ from endomorphic_operator import EndomorphicOperator
 
 from fft_operator import *
 
+from propagator_operator import PropagatorOperator
+
 from nifty_operators import operator,\
                             diagonal_operator,\
                             power_operator,\
diff --git a/nifty/operators/diagonal_operator/diagonal_operator.py b/nifty/operators/diagonal_operator/diagonal_operator.py
index 34c219a12feccfba438d0b4e59799e7e636862f2..0b980fb57307ca94ef84da79fc86ca2254ac34b3 100644
--- a/nifty/operators/diagonal_operator/diagonal_operator.py
+++ b/nifty/operators/diagonal_operator/diagonal_operator.py
@@ -163,15 +163,17 @@ class DiagonalOperator(EndomorphicOperator):
         # the one of x, reshape the local data of self and apply it directly
         active_axes = []
         if spaces is None:
-            for axes in x.domain_axes:
-                active_axes += axes
+            if self.domain != ():
+                for axes in x.domain_axes:
+                    active_axes += axes
         else:
             for space_index in spaces:
                 active_axes += x.domain_axes[space_index]
 
         if types is None:
-            for axes in x.field_type_axes:
-                active_axes += axes
+            if self.field_type != ():
+                for axes in x.field_type_axes:
+                    active_axes += axes
         else:
             for type_index in types:
                 active_axes += x.field_type_axes[type_index]
diff --git a/nifty/operators/linear_operator/linear_operator.py b/nifty/operators/linear_operator/linear_operator.py
index 53496fdda094688376b9a7ae6f5e50e0430ef102..25a128498ac4e3ee0b4474e628ac47f438692935 100644
--- a/nifty/operators/linear_operator/linear_operator.py
+++ b/nifty/operators/linear_operator/linear_operator.py
@@ -186,7 +186,7 @@ class LinearOperator(object):
                     "match."))
         else:
             for i, field_type_index in enumerate(types):
-                if x.field_types[field_type_index] != self_field_type[i]:
+                if x.field_type[field_type_index] != self_field_type[i]:
                     raise ValueError(about._errors.cstring(
                         "ERROR: The operator's and and field's field_type "
                         "don't match."))
diff --git a/nifty/operators/propagator_operator/__init__.py b/nifty/operators/propagator_operator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a1cc3fa2ae00bfce335ea431576bc15ab85e49c
--- /dev/null
+++ b/nifty/operators/propagator_operator/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+from propagator_operator import PropagatorOperator
diff --git a/nifty/operators/propagator_operator/propagator_operator.py b/nifty/operators/propagator_operator/propagator_operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..039d5e80461ffc36be050a2475e33dbf03baf10a
--- /dev/null
+++ b/nifty/operators/propagator_operator/propagator_operator.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+from nifty.config import about
+from nifty.minimization import ConjugateGradient
+
+from nifty.operators.linear_operator import LinearOperator
+
+
+class PropagatorOperator(LinearOperator):
+    """
+        ..                                                                            __
+        ..                                                                          /  /_
+        ..      _______   _____   ______    ______    ____ __   ____ __   ____ __  /   _/  ______    _____
+        ..    /   _   / /   __/ /   _   | /   _   | /   _   / /   _   / /   _   / /  /   /   _   | /   __/
+        ..   /  /_/  / /  /    /  /_/  / /  /_/  / /  /_/  / /  /_/  / /  /_/  / /  /_  /  /_/  / /  /
+        ..  /   ____/ /__/     \______/ /   ____/  \______|  \___   /  \______|  \___/  \______/ /__/     operator class
+        .. /__/                        /__/                 /______/
+
+        NIFTY subclass for propagator operators (of a certain family)
+
+        The propagator operators :math:`D` implemented here have an inverse
+        formulation like :math:`(S^{-1} + M)`, :math:`(S^{-1} + N^{-1})`, or
+        :math:`(S^{-1} + R^\dagger N^{-1} R)` as appearing in Wiener filter
+        theory.
+
+        Parameters
+        ----------
+        S : operator
+            Covariance of the signal prior.
+        M : operator
+            Likelihood contribution.
+        R : operator
+            Response operator translating signal to (noiseless) data.
+        N : operator
+            Covariance of the noise prior or the likelihood, respectively.
+
+        See Also
+        --------
+        conjugate_gradient
+
+        Notes
+        -----
+        The propagator will puzzle the operators `S` and `M` or `R`, `N` or
+        only `N` together in the predefined from, a domain is set
+        automatically. The application of the inverse is done by invoking a
+        conjugate gradient.
+        Note that changes to `S`, `M`, `R` or `N` auto-update the propagator.
+
+        Examples
+        --------
+        >>> f = field(rg_space(4), val=[2, 4, 6, 8])
+        >>> S = power_operator(f.target, spec=1)
+        >>> N = diagonal_operator(f.domain, diag=1)
+        >>> D = propagator_operator(S=S, N=N) # D^{-1} = S^{-1} + N^{-1}
+        >>> D(f).val
+        array([ 1.,  2.,  3.,  4.])
+
+        Attributes
+        ----------
+        domain : space
+            A space wherein valid arguments live.
+        codomain : space
+            An alternative space wherein valid arguments live; commonly the
+            codomain of the `domain` attribute.
+        sym : bool
+            Indicates that the operator is self-adjoint.
+        uni : bool
+            Indicates that the operator is not unitary.
+        imp : bool
+            Indicates that volume weights are implemented in the `multiply`
+            instance methods.
+        target : space
+            The space wherein the operator output lives.
+        _A1 : {operator, function}
+            Application of :math:`S^{-1}` to a field.
+        _A2 : {operator, function}
+            Application of all operations not included in `A1` to a field.
+        RN : {2-tuple of operators}, *optional*
+            Contains `R` and `N` if given.
+
+    """
+
+    # ---Overwritten properties and methods---
+
+    def __init__(self, S=None, M=None, R=None, N=None, inverter=None):
+        """
+            Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
+            and `RN` if required.
+
+            Parameters
+            ----------
+            S : operator
+                Covariance of the signal prior.
+            M : operator
+                Likelihood contribution.
+            R : operator
+                Response operator translating signal to (noiseless) data.
+            N : operator
+                Covariance of the noise prior or the likelihood, respectively.
+
+        """
+
+        self.S = S
+        self.S_inverse_times = self.S.inverse_times
+
+        # build up the likelihood contribution
+        (self.M_times,
+         M_domain,
+         M_field_type,
+         M_target,
+         M_field_type_target) = self._build_likelihood_contribution(M, R, N)
+
+        # assert that S and M have matching domains
+        if not (self.domain == M_domain and
+                self.field_type == M_target and
+                self.target == M_target and
+                self.field_type_target == M_field_type_target):
+            raise ValueError(about._errors.cstring(
+                "ERROR: The domains and targets of the prior " +
+                "signal covariance and the likelihood contribution must be " +
+                "the same in the sense of '=='."))
+
+        if inverter is not None:
+            self.inverter = inverter
+        else:
+            self.inverter = conjugate_gradient()
+
+    # ---Mandatory properties and methods---
+
+    @property
+    def domain(self):
+        return self.S.domain
+
+    @property
+    def field_type(self):
+        return self.S.field_type
+
+    @property
+    def target(self):
+        return self.S.target
+
+    @property
+    def field_type_target(self):
+        return self.S.field_type_target
+
+    @property
+    def implemented(self):
+        return True
+
+    @property
+    def symmetric(self):
+        return True
+
+    @property
+    def unitary(self):
+        return False
+
+    # ---Added properties and methods---
+
+    def _build_likelihood_contribution(self, M, R, N):
+        # if a M is given, return its times method and its domains
+        # supplier and discard R and N
+        if M is not None:
+            return (M.times, M.domain, M.field_type, M.target, M.cotarget)
+
+        if N is not None:
+            if R is not None:
+                return (lambda z: R.adjoint_times(N.inverse_times(R.times(z))),
+                        R.domain, R.field_type, R.domain, R.field_type)
+            else:
+                return (N.inverse_times,
+                        N.domain, N.field_type, N.target, N.field_type_target)
+        else:
+            raise ValueError(about._errors.cstring(
+                "ERROR: At least M or N must be given."))
+
+    def _multiply(self, x, W=None, spam=None, reset=None, note=False,
+                  x0=None, tol=1E-4, clevel=1, limii=None, **kwargs):
+
+        if W is None:
+            W = self.S
+        (result, convergence) = self.inverter(A=self._inverse_multiply,
+                                              b=x,
+                                              W=W,
+                                              spam=spam,
+                                              reset=reset,
+                                              note=note,
+                                              x0=x0,
+                                              tol=tol,
+                                              clevel=clevel,
+                                              limii=limii)
+        # evaluate
+        if not convergence:
+            about.warnings.cprint("WARNING: conjugate gradient failed.")
+
+        return result
+
+    def _inverse_multiply(self, x, **kwargs):
+        result = self.S_inverse_times(x)
+        result += self.M_times(x)
+        return result