diff --git a/.gitignore b/.gitignore
index f2515d0a75ca91da6b4b468c3a212204a8e28590..8748468c1a32d4ab6b5be1ca43f38e63362cd8dc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,11 @@
 setup.cfg
 .idea
 .DS_Store
+*.pyc
+*.html
+.document
+.svn/
+*.csv
 
 # from https://github.com/github/gitignore/blob/master/Python.gitignore
 
@@ -95,4 +100,4 @@ ENV/
 .spyderproject
 
 # Rope project settings
-.ropeproject
\ No newline at end of file
+.ropeproject
diff --git a/demos/__init__.py b/demos/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/demos/critical_filtering.py b/demos/critical_filtering.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a9e2103fc63e75c3c449d943852cb446fc0d94a
--- /dev/null
+++ b/demos/critical_filtering.py
@@ -0,0 +1,157 @@
+
+from nifty import *
+from nifty.library.wiener_filter import WienerFilterEnergy
+from nifty.library.critical_filter import CriticalPowerEnergy
+import plotly.offline as pl
+import plotly.graph_objs as go
+
+from mpi4py import MPI
+comm = MPI.COMM_WORLD
+rank = comm.rank
+
+np.random.seed(42)
+
+
+def plot_parameters(m,t,p, p_d):
+
+    x = log(t.domain[0].kindex)
+    m = fft.adjoint_times(m)
+    m = m.val.get_full_data().real
+    t = t.val.get_full_data().real
+    p = p.val.get_full_data().real
+    p_d = p_d.val.get_full_data().real
+    pl.plot([go.Heatmap(z=m)], filename='map.html')
+    pl.plot([go.Scatter(x=x,y=t), go.Scatter(x=x ,y=p), go.Scatter(x=x, y=p_d)], filename="t.html")
+
+
+class AdjointFFTResponse(LinearOperator):
+    def __init__(self, FFT, R, default_spaces=None):
+        super(AdjointFFTResponse, self).__init__(default_spaces)
+        self._domain = FFT.target
+        self._target = R.target
+        self.R = R
+        self.FFT = FFT
+
+    def _times(self, x, spaces=None):
+        return self.R(self.FFT.adjoint_times(x))
+
+    def _adjoint_times(self, x, spaces=None):
+        return self.FFT(self.R.adjoint_times(x))
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def target(self):
+        return self._target
+
+    @property
+    def unitary(self):
+        return False
+
+if __name__ == "__main__":
+
+    distribution_strategy = 'not'
+
+    # Set up position space
+    s_space = RGSpace([128,128])
+    # s_space = HPSpace(32)
+
+    # Define harmonic transformation and associated harmonic space
+    fft = FFTOperator(s_space)
+    h_space = fft.target[0]
+
+    # Setting up power space
+    p_space = PowerSpace(h_space, logarithmic=True,
+                         distribution_strategy=distribution_strategy)
+
+    # Choosing the prior correlation structure and defining correlation operator
+    p_spec = (lambda k: (.5 / (k + 1) ** 3))
+    S = create_power_operator(h_space, power_spectrum=p_spec,
+                              distribution_strategy=distribution_strategy)
+
+    # Drawing a sample sh from the prior distribution in harmonic space
+    sp = Field(p_space,  val=p_spec,
+               distribution_strategy=distribution_strategy)
+    sh = sp.power_synthesize(real_signal=True)
+
+
+    # Choosing the measurement instrument
+    # Instrument = SmoothingOperator(s_space, sigma=0.01)
+    Instrument = DiagonalOperator(s_space, diagonal=1.)
+    # Instrument._diagonal.val[200:400, 200:400] = 0
+    #Instrument._diagonal.val[64:512-64, 64:512-64] = 0
+
+
+    #Adding a harmonic transformation to the instrument
+    R = AdjointFFTResponse(fft, Instrument)
+
+    noise = 1.
+    N = DiagonalOperator(s_space, diagonal=noise, bare=True)
+    n = Field.from_random(domain=s_space,
+                          random_type='normal',
+                          std=sqrt(noise),
+                          mean=0)
+
+    # Creating the mock data
+    d = R(sh) + n
+
+    # The information source
+    j = R.adjoint_times(N.inverse_times(d))
+    realized_power = log(sh.power_analyze(logarithmic=p_space.config["logarithmic"],
+                                          nbin=p_space.config["nbin"]))
+    data_power = log(fft(d).power_analyze(logarithmic=p_space.config["logarithmic"],
+                                          nbin=p_space.config["nbin"]))
+    d_data = d.val.get_full_data().real
+    if rank == 0:
+        pl.plot([go.Heatmap(z=d_data)], filename='data.html')
+
+    #  minimization strategy
+
+    def convergence_measure(a_energy, iteration): # returns current energy
+        x = a_energy.value
+        print (x, iteration)
+
+
+    minimizer1 = RelaxedNewton(convergence_tolerance=10e-2,
+                              convergence_level=2,
+                              iteration_limit=3,
+                              callback=convergence_measure)
+
+    minimizer2 = VL_BFGS(convergence_tolerance=0,
+                       iteration_limit=7,
+                       callback=convergence_measure,
+                       max_history_length=3)
+
+    # Setting starting position
+    flat_power = Field(p_space,val=10e-8)
+    m0 = flat_power.power_synthesize(real_signal=True)
+
+    t0 = Field(p_space, val=log(1./(1+p_space.kindex)**2))
+
+
+
+    for i in range(500):
+        S0 = create_power_operator(h_space, power_spectrum=exp(t0),
+                              distribution_strategy=distribution_strategy)
+
+        # Initializing the  nonlinear Wiener Filter energy
+        map_energy = WienerFilterEnergy(position=m0, d=d, R=R, N=N, S=S0, inverter=inverter)
+        # Solving the Wiener Filter analytically
+        D0 = map_energy.curvature
+        m0 = D0.inverse_times(j)
+        # Initializing the power energy with updated parameters
+        power_energy = CriticalPowerEnergy(position=t0, m=m0, D=D0, smoothness_prior=10., samples=3)
+
+        (power_energy, convergence) = minimizer1(power_energy)
+
+
+        # Setting new power spectrum
+        t0.val  = power_energy.position.val.real
+
+        # Plotting current estimate
+        print i
+        if i%50 == 0:
+            plot_parameters(m0,t0,log(sp), data_power)
+
+
diff --git a/demos/wiener_filter_advanced.py b/demos/wiener_filter_advanced.py
new file mode 100644
index 0000000000000000000000000000000000000000..95c282ec6aa5f17271e91241dec494ef3bbb3489
--- /dev/null
+++ b/demos/wiener_filter_advanced.py
@@ -0,0 +1,128 @@
+
+from nifty import *
+
+import plotly.offline as pl
+import plotly.graph_objs as go
+
+from mpi4py import MPI
+comm = MPI.COMM_WORLD
+rank = comm.rank
+
+np.random.seed(42)
+
+class AdjointFFTResponse(LinearOperator):
+    def __init__(self, FFT, R, default_spaces=None):
+        super(AdjointFFTResponse, self).__init__(default_spaces)
+        self._domain = FFT.target
+        self._target = R.target
+        self.R = R
+        self.FFT = FFT
+
+    def _times(self, x, spaces=None):
+        return self.R(self.FFT.adjoint_times(x))
+
+    def _adjoint_times(self, x, spaces=None):
+        return self.FFT(self.R.adjoint_times(x))
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def target(self):
+        return self._target
+
+    @property
+    def unitary(self):
+        return False
+
+
+
+if __name__ == "__main__":
+
+    distribution_strategy = 'not'
+
+    # Set up position space
+    s_space = RGSpace([128,128])
+    # s_space = HPSpace(32)
+
+    # Define harmonic transformation and associated harmonic space
+    fft = FFTOperator(s_space)
+    h_space = fft.target[0]
+
+    # Setting up power space
+    p_space = PowerSpace(h_space, distribution_strategy=distribution_strategy)
+
+    # Choosing the prior correlation structure and defining correlation operator
+    p_spec = (lambda k: (42 / (k + 1) ** 3))
+
+    S = create_power_operator(h_space, power_spectrum=p_spec,
+                              distribution_strategy=distribution_strategy)
+
+    # Drawing a sample sh from the prior distribution in harmonic space
+    sp = Field(p_space, val=p_spec,
+               distribution_strategy=distribution_strategy)
+    sh = sp.power_synthesize(real_signal=True)
+    ss = fft.adjoint_times(sh)
+
+    # Choosing the measurement instrument
+    # Instrument = SmoothingOperator(s_space, sigma=0.05)
+    Instrument = DiagonalOperator(s_space, diagonal=1.)
+#    Instrument._diagonal.val[200:400, 200:400] = 0
+
+    #Adding a harmonic transformation to the instrument
+    R = AdjointFFTResponse(fft, Instrument)
+    signal_to_noise = 1.
+    N = DiagonalOperator(s_space, diagonal=ss.var()/signal_to_noise, bare=True)
+    n = Field.from_random(domain=s_space,
+                          random_type='normal',
+                          std=ss.std()/np.sqrt(signal_to_noise),
+                          mean=0)
+
+    # Creating the mock data
+    d = R(sh) + n
+    j = R.adjoint_times(N.inverse_times(d))
+
+    # Choosing the minimization strategy
+
+    def convergence_measure(energy, iteration): # returns current energy
+        x = energy.value
+        print (x, iteration)
+
+#    minimizer = SteepestDescent(convergence_tolerance=0,
+#                                iteration_limit=50,
+#                                callback=convergence_measure)
+
+    minimizer = RelaxedNewton(convergence_tolerance=0,
+                              iteration_limit=1,
+                              callback=convergence_measure)
+    #
+    # minimizer = VL_BFGS(convergence_tolerance=0,
+    #                    iteration_limit=50,
+    #                    callback=convergence_measure,
+    #                    max_history_length=3)
+    #
+
+    inverter = ConjugateGradient(convergence_level=3,
+                                 convergence_tolerance=10e-5,
+                                 preconditioner=None)
+    # Setting starting position
+    m0 = Field(h_space, val=.0)
+
+    # Initializing the Wiener Filter energy
+    energy = WienerFilterEnergy(position=m0, d=d, R=R, N=N, S=S, inverter=inverter)
+    D0 = energy.curvature
+
+    # Solving the problem analytically
+    m0 = D0.inverse_times(j)
+
+    sample_variance = Field(sh.domain,val=0. + 0j)
+    sample_mean = Field(sh.domain,val=0. + 0j)
+
+    # sampling the uncertainty map
+    n_samples = 1
+    for i in range(n_samples):
+        sample = sugar.generate_posterior_sample(m0,D0)
+        sample_variance += sample**2
+        sample_mean += sample
+    variance = sample_variance/n_samples - (sample_mean/n_samples)
+
diff --git a/demos/wiener_filter.py b/demos/wiener_filter_easy.py
similarity index 83%
rename from demos/wiener_filter.py
rename to demos/wiener_filter_easy.py
index 5a3162bac4911baeccf7268a183c1d8818fb3473..966bea58dbe3318d30d3c4b5a15ab3ec1f64f9c8 100644
--- a/demos/wiener_filter.py
+++ b/demos/wiener_filter_easy.py
@@ -20,7 +20,7 @@ if __name__ == "__main__":
     correlation_length = 0.1
     #variance of field in position space sqrt(<|s_x|^2>) (in unit of s)
     field_variance = 2.
-    #smoothing length that response (in same unit as L)
+    #smoothing length of response (in same unit as L)
     response_sigma = 0.1
 
     #defining resolution (pixels per dimension)
@@ -63,15 +63,8 @@ if __name__ == "__main__":
     d = R(ss) + n
 
     # Wiener filter
+
     j = R.adjoint_times(N.inverse_times(d))
     D = PropagatorOperator(S=S, N=N, R=R)
 
     m = D(j)
-
-    d_data = d.val.get_full_data().real
-    m_data = m.val.get_full_data().real
-    ss_data = ss.val.get_full_data().real
-#    if rank == 0:
-#        pl.plot([go.Heatmap(z=d_data)], filename='data.html')
-#        pl.plot([go.Heatmap(z=m_data)], filename='map.html')
-#        pl.plot([go.Heatmap(z=ss_data)], filename='map_orig.html')
diff --git a/demos/wiener_filter_hamiltonian.py b/demos/wiener_filter_hamiltonian.py
deleted file mode 100644
index 85c14d937ef2c3c4d5bea9249d8348e2c04c337b..0000000000000000000000000000000000000000
--- a/demos/wiener_filter_hamiltonian.py
+++ /dev/null
@@ -1,150 +0,0 @@
-from __future__ import division
-from __future__ import print_function
-from builtins import object
-
-from nifty import *
-
-import plotly.offline as pl
-import plotly.graph_objs as go
-
-from mpi4py import MPI
-comm = MPI.COMM_WORLD
-rank = comm.rank
-
-np.random.seed(42)
-
-class WienerFilterEnergy(Energy):
-    def __init__(self, position, D, j):
-        # in principle not necessary, but useful in order to make the signature
-        # explicit
-        super(WienerFilterEnergy, self).__init__(position)
-        self.D = D
-        self.j = j
-
-    def at(self, position):
-        return self.__class__(position, D=self.D, j=self.j)
-
-    @property
-    def value(self):
-        D_inv_x = self.D_inverse_x()
-        H = 0.5 * D_inv_x.vdot(self.position) - self.j.vdot(self.position)
-        return H.real
-
-    @property
-    def gradient(self):
-        D_inv_x = self.D_inverse_x()
-        g = D_inv_x - self.j
-        return_g = g.copy_empty(dtype=np.float)
-        return_g.val = g.val.real
-        return return_g
-
-    @property
-    def curvature(self):
-        class Dummy(object):
-            def __init__(self, x):
-                self.x = x
-            def inverse_times(self, *args, **kwargs):
-                return self.x.times(*args, **kwargs)
-        my_dummy = Dummy(self.D)
-        return my_dummy
-
-
-    @memo
-    def D_inverse_x(self):
-        return D.inverse_times(self.position)
-
-
-if __name__ == "__main__":
-
-    distribution_strategy = 'not'
-
-    # Set up spaces and fft transformation
-    s_space = RGSpace([512, 512])
-    fft = FFTOperator(s_space)
-    h_space = fft.target[0]
-    p_space = PowerSpace(h_space, distribution_strategy=distribution_strategy)
-
-    # create the field instances and power operator
-    pow_spec = (lambda k: (42. / (k + 1) ** 3))
-    S = create_power_operator(h_space, power_spectrum=pow_spec,
-                              distribution_strategy=distribution_strategy)
-
-    sp = Field(p_space, val=lambda z: pow_spec(z)**0.5,
-               distribution_strategy=distribution_strategy)
-    sh = sp.power_synthesize(real_signal=True)
-    ss = fft.inverse_times(sh)
-
-    # model the measurement process
-    R = SmoothingOperator.make(s_space, sigma=0.01)
-#    R = DiagonalOperator(s_space, diagonal=1.)
-#    R._diagonal.val[200:400, 200:400] = 0
-
-    signal_to_noise = 1
-    N = DiagonalOperator(s_space, diagonal=ss.var()/signal_to_noise, bare=True)
-    n = Field.from_random(domain=s_space,
-                          random_type='normal',
-                          std=ss.std()/np.sqrt(signal_to_noise),
-                          mean=0)
-
-    # create mock data
-    d = R(ss) + n
-
-    # set up reconstruction objects
-    j = R.adjoint_times(N.inverse_times(d))
-    D = PropagatorOperator(S=S, N=N, R=R)
-
-    def distance_measure(energy, iteration):
-        x = energy.position
-        print(iteration, (x-ss).norm()/ss.norm().real)
-
-#    minimizer = SteepestDescent(convergence_tolerance=0,
-#                                iteration_limit=50,
-#                                callback=distance_measure)
-
-    minimizer = RelaxedNewton(convergence_tolerance=0,
-                              iteration_limit=2,
-                              callback=distance_measure)
-
-#    minimizer = VL_BFGS(convergence_tolerance=0,
-#                        iteration_limit=50,
-#                        callback=distance_measure,
-#                        max_history_length=3)
-
-    m0 = Field(s_space, val=1.)
-
-    energy = WienerFilterEnergy(position=m0, D=D, j=j)
-
-    (energy, convergence) = minimizer(energy)
-
-    m = energy.position
-
-    d_data = d.val.get_full_data().real
-    if rank == 0:
-        pl.plot([go.Heatmap(z=d_data)], filename='data.html')
-
-
-    ss_data = ss.val.get_full_data().real
-    if rank == 0:
-        pl.plot([go.Heatmap(z=ss_data)], filename='ss.html')
-
-    sh_data = sh.val.get_full_data().real
-    if rank == 0:
-        pl.plot([go.Heatmap(z=sh_data)], filename='sh.html')
-
-    j_data = j.val.get_full_data().real
-    if rank == 0:
-        pl.plot([go.Heatmap(z=j_data)], filename='j.html')
-
-    jabs_data = np.abs(j.val.get_full_data())
-    jphase_data = np.angle(j.val.get_full_data())
-    if rank == 0:
-        pl.plot([go.Heatmap(z=jabs_data)], filename='j_abs.html')
-        pl.plot([go.Heatmap(z=jphase_data)], filename='j_phase.html')
-
-    m_data = m.val.get_full_data().real
-    if rank == 0:
-        pl.plot([go.Heatmap(z=m_data)], filename='map.html')
-
-#    grad_data = grad.val.get_full_data().real
-#    if rank == 0:
-#        pl.plot([go.Heatmap(z=grad_data)], filename='grad.html')
diff --git a/demos/wiener_filter_harmonic.py b/demos/wiener_filter_harmonic.py
deleted file mode 100644
index 667c216b483e3a36db17b625c5c6bc2977d036eb..0000000000000000000000000000000000000000
--- a/demos/wiener_filter_harmonic.py
+++ /dev/null
@@ -1,140 +0,0 @@
-from __future__ import division
-from builtins import range
-from nifty import *
-from mpi4py import MPI
-import plotly.offline as py
-import plotly.graph_objs as go
-
-
-comm = MPI.COMM_WORLD
-rank = comm.rank
-
-
-def plot_maps(x, name):
-
-    trace = [None]*len(x)
-
-    keys = list(x.keys())
-    field = x[keys[0]]
-    domain = field.domain[0]
-    shape = len(domain.shape)
-    max_n = domain.shape[0]*domain.distances[0]
-    step = domain.distances[0]
-    x_axis = np.arange(0, max_n, step)
-
-    if shape == 1:
-        for ii in range(len(x)):
-            trace[ii] = go.Scatter(x= x_axis, y=x[keys[ii]].val.get_full_data(), name=keys[ii])
-        fig = go.Figure(data=trace)
-
-        py.plot(fig, filename=name)
-
-    elif shape == 2:
-        for ii in range(len(x)):
-            py.plot([go.Heatmap(z=x[keys[ii]].val.get_full_data())], filename=keys[ii])
-    else:
-        raise TypeError("Only 1D and 2D field plots are supported")
-
-def plot_power(x, name):
-
-    layout = go.Layout(
-        xaxis=dict(
-            type='log',
-            autorange=True
-        ),
-        yaxis=dict(
-            type='log',
-            autorange=True
-        )
-    )
-
-    trace = [None]*len(x)
-
-    keys = list(x.keys())
-    field = x[keys[0]]
-    domain = field.domain[0]
-    x_axis = domain.kindex
-
-    for ii in range(len(x)):
-        trace[ii] = go.Scatter(x= x_axis, y=x[keys[ii]].val.get_full_data(), name=keys[ii])
-
-    fig = go.Figure(data=trace, layout=layout)
-    py.plot(fig, filename=name)
-
-np.random.seed(42)
-
-
-
-if __name__ == "__main__":
-
-    distribution_strategy = 'not'
-
-    # setting spaces
-    npix = np.array([500])  # number of pixels
-    total_volume = 1.  # total length
-
-    # setting signal parameters
-    lambda_s = .05  # signal correlation length
-    sigma_s = 10.  # signal variance
-
-
-    #setting response operator parameters
-    length_convolution = .025
-    exposure = 1.
-
-    # calculating parameters
-    k_0 = 4. / (2 * np.pi * lambda_s)
-    a_s = sigma_s ** 2. * lambda_s * total_volume
-
-    # creation of spaces
-    # x1 = RGSpace([npix,npix], distances=total_volume / npix,
-    #              zerocenter=False)
-    # k1 = RGRGTransformation.get_codomain(x1)
-
-    x1 = HPSpace(64)
-    k1 = HPLMTransformation.get_codomain(x1)
-    p1 = PowerSpace(harmonic_partner=k1, logarithmic=False)
-
-    # creating Power Operator with given spectrum
-    spec = (lambda k: a_s / (1 + (k / k_0) ** 2) ** 2)
-    p_field = Field(p1, val=spec)
-    S_op = create_power_operator(k1, spec)
-
-    # creating FFT-Operator and Response-Operator with Gaussian convolution
-    # adjust dtype_target probperly
-    Fft_op = FFTOperator(domain=x1, target=k1,
-                        domain_dtype=np.float64,
-                        target_dtype=np.float64)
-    R_op = ResponseOperator(x1, sigma=[length_convolution],
-                            exposure=[exposure])
-
-    # drawing a random field
-    sk = p_field.power_synthesize(real_power=True, mean=0.)
-    s = Fft_op.adjoint_times(sk)
-
-    signal_to_noise = 1
-    N_op = DiagonalOperator(R_op.target, diagonal=s.var()/signal_to_noise, bare=True)
-    n = Field.from_random(domain=R_op.target,
-                          random_type='normal',
-                          std=s.std()/np.sqrt(signal_to_noise),
-                          mean=0)
-
-    d = R_op(s) + n
-
-    # Wiener filter
-    j = Fft_op.times(R_op.adjoint_times(N_op.inverse_times(d)))
-    D = HarmonicPropagatorOperator(S=S_op, N=N_op, R=R_op)
-
-    mk = D(j)
-
-    m = Fft_op.adjoint_times(mk)
-
-    # z={}
-    # z["signal"] = s
-    # z["reconstructed_map"] = m
-    # z["data"] = d
-    # z["lambda"] = R_op(s)
-    #
-    # plot_maps(z, "Wiener_filter.html")
-
-
diff --git a/demos/wiener_filter_unit.py b/demos/wiener_filter_unit.py
deleted file mode 100644
index 1441d264603cf2593687c5ab899cbbd8536c2f64..0000000000000000000000000000000000000000
--- a/demos/wiener_filter_unit.py
+++ /dev/null
@@ -1,132 +0,0 @@
-from __future__ import division
-from builtins import range
-from nifty import *
-from mpi4py import MPI
-import plotly.offline as py
-import plotly.graph_objs as go
-
-
-comm = MPI.COMM_WORLD
-rank = comm.rank
-
-
-def plot_maps(x, name):
-
-    trace = [None]*len(x)
-
-    keys = list(x.keys())
-    field = x[keys[0]]
-    domain = field.domain[0]
-    shape = len(domain.shape)
-    max_n = domain.shape[0]*domain.distances[0]
-    step = domain.distances[0]
-    x_axis = np.arange(0, max_n, step)
-
-    if shape == 1:
-        for ii in range(len(x)):
-            trace[ii] = go.Scatter(x= x_axis, y=x[keys[ii]].val.get_full_data(), name=keys[ii])
-        fig = go.Figure(data=trace)
-
-        py.plot(fig, filename=name)
-
-    elif shape == 2:
-        for ii in range(len(x)):
-            py.plot([go.Heatmap(z=x[keys[ii]].val.get_full_data())], filename=keys[ii])
-    else:
-        raise TypeError("Only 1D and 2D field plots are supported")
-
-def plot_power(x, name):
-
-    layout = go.Layout(
-        xaxis=dict(
-            type='log',
-            autorange=True
-        ),
-        yaxis=dict(
-            type='log',
-            autorange=True
-        )
-    )
-
-    trace = [None]*len(x)
-
-    keys = list(x.keys())
-    field = x[keys[0]]
-    domain = field.domain[0]
-    x_axis = domain.kindex
-
-    for ii in range(len(x)):
-        trace[ii] = go.Scatter(x= x_axis, y=x[keys[ii]].val.get_full_data(), name=keys[ii])
-
-    fig = go.Figure(data=trace, layout=layout)
-    py.plot(fig, filename=name)
-
-np.random.seed(42)
-
-if __name__ == "__main__":
-
-    distribution_strategy = 'not'
-
-    # setting spaces
-    npix = np.array([500])  # number of pixels
-    total_volume = 1.  # total length
-
-    # setting signal parameters
-    lambda_s = .05  # signal correlation length
-    sigma_s = 10.  # signal variance
-
-
-    #setting response operator parameters
-    length_convolution = .025
-    exposure = 1.
-
-    # calculating parameters
-    k_0 = 4. / (2 * np.pi * lambda_s)
-    a_s = sigma_s ** 2. * lambda_s * total_volume
-
-    # creation of spaces
-    x1 = RGSpace(npix, distances=total_volume / npix,
-                 zerocenter=False)
-    k1 = RGRGTransformation.get_codomain(x1)
-    p1 = PowerSpace(harmonic_partner=k1, logarithmic=False)
-
-    # creating Power Operator with given spectrum
-    spec = (lambda k: a_s / (1 + (k / k_0) ** 2) ** 2)
-    p_field = Field(p1, val=spec)
-    S_op = create_power_operator(k1, spec)
-
-    # creating FFT-Operator and Response-Operator with Gaussian convolution
-    Fft_op = FFTOperator(domain=x1, target=k1,
-                        domain_dtype=np.float64,
-                        target_dtype=np.complex128)
-    R_op = ResponseOperator(x1, sigma=[length_convolution],
-                            exposure=[exposure])
-
-    # drawing a random field
-    sk = p_field.power_synthesize(real_signal=True, mean=0.)
-    s = Fft_op.inverse_times(sk)
-
-    signal_to_noise = 1
-    N_op = DiagonalOperator(R_op.target, diagonal=s.var()/signal_to_noise, bare=True)
-    n = Field.from_random(domain=R_op.target,
-                          random_type='normal',
-                          std=s.std()/np.sqrt(signal_to_noise),
-                          mean=0)
-
-    d = R_op(s) + n
-
-    # Wiener filter
-    j = R_op.adjoint_times(N_op.inverse_times(d))
-    D = PropagatorOperator(S=S_op, N=N_op, R=R_op)
-
-    m = D(j)
-
-    z={}
-    z["signal"] = s
-    z["reconstructed_map"] = m
-    z["data"] = d
-    z["lambda"] = R_op(s)
-
-    plot_maps(z, "Wiener_filter.html")
-
-
diff --git a/nifty/__init__.py b/nifty/__init__.py
index 233fbe085cedc03d33dea6e37503f7c5b3b2201b..049ffc1a9af7ac98c9a7c21bba959abf00428281 100644
--- a/nifty/__init__.py
+++ b/nifty/__init__.py
@@ -32,8 +32,6 @@ from .config import dependency_injector,\
 
 from d2o import distributed_data_object, d2o_librarian
 
-from .energies import *
-
 from .field import Field
 
 from .random import Random
@@ -44,6 +42,8 @@ from .nifty_utilities import *
 
 from .field_types import *
 
+from .energies import *
+
 from .minimization import *
 
 from .spaces import *
@@ -55,3 +55,5 @@ from .probing import *
 from .sugar import *
 
 from . import plotting
+
+from . import library
diff --git a/nifty/field.py b/nifty/field.py
index ad9d35a538531b8964403cb2be8e92c99be3041d..e08a70e197fb0cbb22452102a6bcda71fc04964a 100644
--- a/nifty/field.py
+++ b/nifty/field.py
@@ -1077,7 +1077,7 @@ class Field(Loggable, Versionable, object):
         if spaces is None:
             x_val = x.get_val(copy=False)
             y_val = y.get_val(copy=False)
-            result = (x_val.conjugate() * y_val).sum()
+            result = (y_val.conjugate() * x_val).sum()
             return result
         else:
             # create a diagonal operator which is capable of taking care of the
@@ -1091,17 +1091,12 @@ class Field(Loggable, Versionable, object):
             return dotted.sum(spaces=spaces)
 
     def norm(self):
-        """ Computes the Lq-norm of the field values.
-
-        Parameters
-        ----------
-        q : scalar
-            Parameter q of the Lq-norm (default: 2).
+        """ Computes the L2-norm of the field values.
 
         Returns
         -------
         norm : scalar
-            The Lq-norm of the field values.
+            The L2-norm of the field values.
 
         """
         return np.sqrt(np.abs(self.vdot(x=self)))
diff --git a/nifty/library/__init__.py b/nifty/library/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1ba169f989d547b3e40659d0ee371c00643897d
--- /dev/null
+++ b/nifty/library/__init__.py
@@ -0,0 +1,2 @@
+from .critical_filter import *
+from .wiener_filter import *
diff --git a/nifty/library/critical_filter/__init__.py b/nifty/library/critical_filter/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..59526df8926d188c5d6cf6ece17550b0143bcd0c
--- /dev/null
+++ b/nifty/library/critical_filter/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+
+from .critical_power_curvature import CriticalPowerCurvature
+from .critical_power_energy import CriticalPowerEnergy
diff --git a/nifty/library/critical_filter/critical_power_curvature.py b/nifty/library/critical_filter/critical_power_curvature.py
new file mode 100644
index 0000000000000000000000000000000000000000..798e666e34b4f523ecae1eb931fa3580b6f97dda
--- /dev/null
+++ b/nifty/library/critical_filter/critical_power_curvature.py
@@ -0,0 +1,50 @@
+from nifty.operators.endomorphic_operator import EndomorphicOperator
+from nifty.operators.invertible_operator_mixin import InvertibleOperatorMixin
+from nifty.operators.diagonal_operator import DiagonalOperator
+
+
+class CriticalPowerCurvature(InvertibleOperatorMixin, EndomorphicOperator):
+    """The curvature of the CriticalPowerEnergy.
+
+    This operator implements the second derivative of the
+    CriticalPowerEnergy used in some minimization algorithms or
+    for error estimates of the powerspectrum.
+
+
+    Parameters
+    ----------
+    theta: Field,
+        The map and inverse gamma prior contribution to the curvature.
+    T : SmoothnessOperator,
+        The smoothness prior contribution to the curvature.
+    """
+
+    # ---Overwritten properties and methods---
+
+    def __init__(self, theta, T, inverter=None, preconditioner=None):
+
+        self.theta = DiagonalOperator(theta.domain, diagonal=theta)
+        self.T = T
+        if preconditioner is None:
+            preconditioner = self.theta.inverse_times
+        self._domain = self.theta.domain
+        super(CriticalPowerCurvature, self).__init__(
+                                                 inverter=inverter,
+                                                 preconditioner=preconditioner)
+
+    def _times(self, x, spaces):
+        return self.T(x) + self.theta(x)
+
+    # ---Mandatory properties and methods---
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def self_adjoint(self):
+        return True
+
+    @property
+    def unitary(self):
+        return False
diff --git a/nifty/library/critical_filter/critical_power_energy.py b/nifty/library/critical_filter/critical_power_energy.py
new file mode 100644
index 0000000000000000000000000000000000000000..7848aee5901b71af033225fb1404a329bfe73411
--- /dev/null
+++ b/nifty/library/critical_filter/critical_power_energy.py
@@ -0,0 +1,131 @@
+import numpy as np
+
+from nifty.energies.energy import Energy
+from nifty.operators.smoothness_operator import SmoothnessOperator
+from nifty.library.critical_filter import CriticalPowerCurvature
+from nifty.energies.memoization import memo
+
+from nifty.sugar import generate_posterior_sample
+from nifty import Field, exp
+
+
+class CriticalPowerEnergy(Energy):
+    """The Energy of the power spectrum according to the critical filter.
+
+    It describes the energy of the logarithmic amplitudes of the power spectrum
+    of a Gaussian random field under reconstruction uncertainty with smoothness
+    and inverse gamma prior. It is used to infer the correlation structure of a
+    correlated signal.
+
+    Parameters
+    ----------
+    position : Field,
+        The current position of this energy.
+    m : Field,
+        The map whichs power spectrum has to be inferred
+    D : EndomorphicOperator,
+        The curvature of the Gaussian encoding the posterior covariance.
+        If not specified, the map is assumed to be no reconstruction.
+        default : None
+    alpha : float
+        The spectral prior of the inverse gamma distribution. 1.0 corresponds
+        to non-informative.
+        default : 1.0
+    q : float
+        The cutoff parameter of the inverse gamma distribution. 0.0 corresponds
+        to non-informative.
+        default : 0.0
+    smoothness_prior : float
+        Controls the strength of the smoothness prior
+        default : 0.0
+    logarithmic : boolean
+        Whether smoothness acts on linear or logarithmic scale.
+    samples : integer
+        Number of samples used for the estimation of the uncertainty
+        corrections.
+        default : 3
+    w : Field
+        The contribution from the map with or without uncertainty. It is used
+        to pass on the result of the costly sampling during the minimization.
+        default : None
+    inverter : ConjugateGradient
+        The inversion strategy to invert the curvature and to generate samples.
+        default : None
+    """
+
+    def __init__(self, position, m, D=None, alpha=1.0, q=0.,
+                 smoothness_prior=0., logarithmic=True, samples=3, w=None):
+        super(CriticalPowerEnergy, self).__init__(position=position)
+        self.m = m
+        self.D = D
+        self.samples = samples
+        self.smoothness_prior = np.float(smoothness_prior)
+        self.alpha = Field(self.position.domain, val=alpha)
+        self.q = Field(self.position.domain, val=q)
+        self.T = SmoothnessOperator(domain=self.position.domain[0],
+                                    strength=self.smoothness_prior,
+                                    logarithmic=logarithmic)
+        self.rho = self.position.domain[0].rho
+        self._w = w if w is not None else None
+
+    def at(self, position):
+        return self.__class__(position, self.m, D=self.D, alpha=self.alpha,
+                              q=self.q, smoothness_prior=self.smoothness_prior,
+                              w=self.w, samples=self.samples)
+
+    @property
+    def value(self):
+        energy = self._theta.sum()
+        energy += self.position.vdot(self._rho_prime, bare=True)
+        energy += 0.5 * self.position.vdot(self._Tt)
+        return energy.real
+
+    @property
+    def gradient(self):
+        gradient = -self._theta.weight(-1)
+        gradient += (self._rho_prime).weight(-1)
+        gradient += self._Tt
+        gradient.val = gradient.val.real
+        return gradient
+
+    @property
+    def curvature(self):
+        curvature = CriticalPowerCurvature(theta=self._theta.weight(-1),
+                                           T=self.T)
+        return curvature
+
+    @property
+    def w(self):
+        if self._w is None:
+            w = Field(domain=self.position.domain, val=0., dtype=self.m.dtype)
+            if self.D is not None:
+                for i in range(self.samples):
+                    posterior_sample = generate_posterior_sample(
+                                                            self.m, self.D)
+                    projected_sample = posterior_sample.power_analyze(
+                     logarithmic=self.position.domain[0].config["logarithmic"],
+                     nbin=self.position.domain[0].config["nbin"])
+                    w += (projected_sample) * self.rho
+                w /= float(self.samples)
+            else:
+                w = self.m.power_analyze(
+                     logarithmic=self.position.domain[0].config["logarithmic"],
+                     nbin=self.position.domain[0].config["nbin"])
+                w *= self.rho
+            self._w = w
+        return self._w
+
+    @property
+    @memo
+    def _theta(self):
+        return exp(-self.position) * (self.q + self.w / 2.)
+
+    @property
+    @memo
+    def _rho_prime(self):
+        return self.alpha - 1. + self.rho / 2.
+
+    @property
+    @memo
+    def _Tt(self):
+        return self.T(self.position)
diff --git a/nifty/library/wiener_filter/__init__.py b/nifty/library/wiener_filter/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..58a4a46f633b957b1cd213071f7776d6a5cc6440
--- /dev/null
+++ b/nifty/library/wiener_filter/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+
+from .wiener_filter_curvature import WienerFilterCurvature
+from .wiener_filter_energy import WienerFilterEnergy
diff --git a/nifty/library/wiener_filter/wiener_filter_curvature.py b/nifty/library/wiener_filter/wiener_filter_curvature.py
new file mode 100644
index 0000000000000000000000000000000000000000..10ed55b91566aea7140f7c120680f72b0e2707bb
--- /dev/null
+++ b/nifty/library/wiener_filter/wiener_filter_curvature.py
@@ -0,0 +1,53 @@
+from nifty.operators import EndomorphicOperator,\
+                            InvertibleOperatorMixin
+
+
+class WienerFilterCurvature(InvertibleOperatorMixin, EndomorphicOperator):
+    """The curvature of the WienerFilterEnergy.
+
+    This operator implements the second derivative of the
+    WienerFilterEnergy used in some minimization algorithms or
+    for error estimates of the posterior maps. It is the
+    inverse of the propagator operator.
+
+
+    Parameters
+    ----------
+    R: LinearOperator,
+        The response operator of the Wiener filter measurement.
+    N : EndomorphicOperator
+        The noise covariance.
+    S: DiagonalOperator,
+        The prior signal covariance
+
+    """
+
+    def __init__(self, R, N, S, inverter=None, preconditioner=None):
+
+        self.R = R
+        self.N = N
+        self.S = S
+        if preconditioner is None:
+            preconditioner = self.S.times
+        self._domain = self.S.domain
+        super(WienerFilterCurvature, self).__init__(
+                                                 inverter=inverter,
+                                                 preconditioner=preconditioner)
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def self_adjoint(self):
+        return True
+
+    @property
+    def unitary(self):
+        return False
+
+    # ---Added properties and methods---
+
+    def _times(self, x, spaces):
+        return (self.R.adjoint_times(self.N.inverse_times(self.R(x))) +
+                self.S.inverse_times(x))
diff --git a/nifty/library/wiener_filter/wiener_filter_energy.py b/nifty/library/wiener_filter/wiener_filter_energy.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eec64c51746c6121008f8890b79fcf1da25654d
--- /dev/null
+++ b/nifty/library/wiener_filter/wiener_filter_energy.py
@@ -0,0 +1,59 @@
+from nifty.energies.energy import Energy
+from nifty.energies.memoization import memo
+from nifty.library.wiener_filter import WienerFilterCurvature
+
+
+class WienerFilterEnergy(Energy):
+    """The Energy for the Wiener filter.
+
+    It covers the case of linear measurement with
+    Gaussian noise and Gaussain signal prior with known covariance.
+
+    Parameters
+    ----------
+    position: Field,
+        The current position.
+    d : Field,
+        the data.
+    R : Operator,
+        The response operator, describtion of the measurement process.
+    N : EndomorphicOperator,
+        The noise covariance in data space.
+    S : EndomorphicOperator,
+        The prior signal covariance in harmonic space.
+    """
+
+    def __init__(self, position, d, R, N, S, inverter=None):
+        super(WienerFilterEnergy, self).__init__(position=position)
+        self.d = d
+        self.R = R
+        self.N = N
+        self.S = S
+
+    def at(self, position):
+        return self.__class__(position=position, d=self.d, R=self.R, N=self.N,
+                              S=self.S, inverter=self.inverter)
+
+    @property
+    @memo
+    def value(self):
+        return 0.5*self.position.vdot(self._Dx) - self._j.vdot(self.position)
+
+    @property
+    @memo
+    def gradient(self):
+        return self._Dx - self._j
+
+    @property
+    @memo
+    def curvature(self):
+        return WienerFilterCurvature(R=self.R, N=self.N, S=self.S)
+
+    @memo
+    def _Dx(self):
+        return self.curvature(self.position)
+
+    @property
+    @memo
+    def _j(self):
+        return self.R.adjoint_times(self.N.inverse_times(self.d))
diff --git a/nifty/minimization/descent_minimizer.py b/nifty/minimization/descent_minimizer.py
index 8d082bc05d51294343fdc2a0857429fbbe09e5d6..4dbf2c652fe6c4563d24517b0cb4eecb37e1ac1a 100644
--- a/nifty/minimization/descent_minimizer.py
+++ b/nifty/minimization/descent_minimizer.py
@@ -146,14 +146,14 @@ class DescentMinimizer(with_metaclass(NiftyMeta, type('NewBase', (Loggable, obje
                 break
 
             # current position is encoded in energy object
-            descend_direction = self.get_descend_direction(energy)
+            descent_direction = self.get_descent_direction(energy)
 
             # compute the step length, which minimizes energy.value along the
             # search direction
             step_length, f_k, new_energy = \
                 self.line_searcher.perform_line_search(
                                                energy=energy,
-                                               pk=descend_direction,
+                                               pk=descent_direction,
                                                f_k_minus_1=f_k_minus_1)
             f_k_minus_1 = energy.value
 
@@ -165,7 +165,7 @@ class DescentMinimizer(with_metaclass(NiftyMeta, type('NewBase', (Loggable, obje
 
             energy = new_energy
             # check convergence
-            delta = abs(gradient).max() * (step_length/gradient_norm)
+            delta = abs(gradient).max() * (step_length/np.sqrt(gradient_norm))
             self.logger.debug("Iteration:%08u step_length=%3.1E "
                               "delta=%3.1E energy=%3.1E" %
                               (iteration_number, step_length, delta,
@@ -195,5 +195,5 @@ class DescentMinimizer(with_metaclass(NiftyMeta, type('NewBase', (Loggable, obje
         return energy, convergence
 
     @abc.abstractmethod
-    def get_descend_direction(self, energy):
+    def get_descent_direction(self, energy):
         raise NotImplementedError
diff --git a/nifty/minimization/line_searching/line_search.py b/nifty/minimization/line_searching/line_search.py
index 27c39af5f897e9b46c2834cb213bfc8d54f9dd72..275607d09583ed3dd4edee02f59997fa6b4ec2d4 100644
--- a/nifty/minimization/line_searching/line_search.py
+++ b/nifty/minimization/line_searching/line_search.py
@@ -26,29 +26,31 @@ from future.utils import with_metaclass
 
 class LineSearch(with_metaclass(abc.ABCMeta, type('NewBase', (Loggable, object), {}))):
     """Class for determining the optimal step size along some descent direction.
-    
+
     Initialize the line search procedure which can be used by a specific line
     search method. Its finds the step size in a specific direction in the
     minimization process.
-    
+
     Attributes
     ----------
     line_energy : LineEnergy Object
         LineEnergy object from which we can extract energy at a specific point.
     f_k_minus_1 : Field
         Value of the field at the k-1 iteration of the line search procedure.
-    prefered_initial_step_size : float
+    preferred_initial_step_size : float
         Initial guess for the step length.
-    
+
     """
 
+    __metaclass__ = abc.ABCMeta
+
     def __init__(self):
 
-        
+
 
         self.line_energy = None
         self.f_k_minus_1 = None
-        self.prefered_initial_step_size = None
+        self.preferred_initial_step_size = None
 
     def _set_line_energy(self, energy, pk, f_k_minus_1=None):
         """Set the coordinates for a new line search.
@@ -57,13 +59,13 @@ class LineSearch(with_metaclass(abc.ABCMeta, type('NewBase', (Loggable, object),
         ----------
         energy : Energy object
             Energy object from which we can calculate the energy, gradient and
-            curvature at a specific point.        
+            curvature at a specific point.
         pk : Field
             Unit vector pointing into the search direction.
         f_k_minus_1 : float
-            Value of the fuction (energy) which will be minimized at the k-1 
+            Value of the fuction (energy) which will be minimized at the k-1
             iteration of the line search procedure. (Default: None)
-            
+
         """
         self.line_energy = LineEnergy(position=0.,
                                       energy=energy,
diff --git a/nifty/minimization/line_searching/line_search_strong_wolfe.py b/nifty/minimization/line_searching/line_search_strong_wolfe.py
index 54f869a890b75a38ab375032a0542d9250f70535..749180bd2d9389b97539b69b7254ae6cb531ac01 100644
--- a/nifty/minimization/line_searching/line_search_strong_wolfe.py
+++ b/nifty/minimization/line_searching/line_search_strong_wolfe.py
@@ -26,9 +26,9 @@ from .line_search import LineSearch
 class LineSearchStrongWolfe(LineSearch):
     """Class for finding a step size that satisfies the strong Wolfe conditions.
 
-    Algorithm contains two stages. It begins whit a trial step length and it
-    keeps increasing the it until it finds an acceptable step length or an
-    interval. If it does not satisfy the Wolfe conditions it performs the Zoom
+    Algorithm contains two stages. It begins whit a trial step length and
+    keeps increasing it until it finds an acceptable step length or an
+    interval. If it does not satisfy the Wolfe conditions, it performs the Zoom
     algorithm (second stage). By interpolating it decreases the size of the
     interval until an acceptable step length is found.
 
@@ -122,8 +122,8 @@ class LineSearchStrongWolfe(LineSearch):
 
         # set alphas
         alpha0 = 0.
-        if self.prefered_initial_step_size is not None:
-            alpha1 = self.prefered_initial_step_size
+        if self.preferred_initial_step_size is not None:
+            alpha1 = self.preferred_initial_step_size
         elif old_phi_0 is not None and phiprime_0 != 0:
             alpha1 = min(1.0, 1.01*2*(phi_0 - old_phi_0)/phiprime_0)
             if alpha1 < 0:
diff --git a/nifty/minimization/relaxed_newton.py b/nifty/minimization/relaxed_newton.py
index 5d7767caecd1f8ef374e7f2468b6f93deda55182..508a9eb0978e953ce3df5ba23e977b6b2002477b 100644
--- a/nifty/minimization/relaxed_newton.py
+++ b/nifty/minimization/relaxed_newton.py
@@ -32,9 +32,9 @@ class RelaxedNewton(DescentMinimizer):
                                 convergence_level=convergence_level,
                                 iteration_limit=iteration_limit)
 
-        self.line_searcher.prefered_initial_step_size = 1.
+        self.line_searcher.preferred_initial_step_size = 1.
 
-    def get_descend_direction(self, energy):
+    def get_descent_direction(self, energy):
         """ Calculates the descent direction according to a Newton scheme.
 
         The descent direction is determined by weighting the gradient at the
@@ -50,12 +50,9 @@ class RelaxedNewton(DescentMinimizer):
 
         Returns
         -------
-        descend_direction : Field
+        descent_direction : Field
            Returns the descent direction with proposed step length. In a
            quadratic potential this corresponds to the optimal step.
 
         """
-        gradient = energy.gradient
-        curvature = energy.curvature
-        descend_direction = curvature.inverse_times(gradient)
-        return descend_direction * -1
+        return -energy.curvature.inverse_times(energy.gradient)
diff --git a/nifty/minimization/steepest_descent.py b/nifty/minimization/steepest_descent.py
index 43928b22b1c3822998b9f83d251ac8a6d604d808..388dd95d19f38cf99843846906a1de77901c5462 100644
--- a/nifty/minimization/steepest_descent.py
+++ b/nifty/minimization/steepest_descent.py
@@ -22,7 +22,7 @@ from .descent_minimizer import DescentMinimizer
 
 
 class SteepestDescent(DescentMinimizer):
-    def get_descend_direction(self, energy):
+    def get_descent_direction(self, energy):
         """ Implementation of the steepest descent minimization scheme.
 
         Also known as 'gradient descent'. This algorithm simply follows the
@@ -36,10 +36,9 @@ class SteepestDescent(DescentMinimizer):
 
         Returns
         -------
-        descend_direction : Field
+        descent_direction : Field
             Returns the descent direction.
 
         """
 
-        descend_direction = energy.gradient
-        return descend_direction * -1
+        return -energy.gradient
diff --git a/nifty/minimization/vl_bfgs.py b/nifty/minimization/vl_bfgs.py
index 826335f9cd9ff43e9cb3fb779a3059fd71992c0e..1b427f2c9e47f615853ce6024e712b06f5c63977 100644
--- a/nifty/minimization/vl_bfgs.py
+++ b/nifty/minimization/vl_bfgs.py
@@ -43,7 +43,7 @@ class VL_BFGS(DescentMinimizer):
         self._information_store = None
         return super(VL_BFGS, self).__call__(energy)
 
-    def get_descend_direction(self, energy):
+    def get_descent_direction(self, energy):
         """Implementation of the Vector-free L-BFGS minimization scheme.
 
         Find the descent direction by using the inverse Hessian.
@@ -60,7 +60,7 @@ class VL_BFGS(DescentMinimizer):
 
         Returns
         -------
-        descend_direction : Field
+        descent_direction : Field
             Returns the descent direction.
 
         References
@@ -83,11 +83,11 @@ class VL_BFGS(DescentMinimizer):
         b = self._information_store.b
         delta = self._information_store.delta
 
-        descend_direction = delta[0] * b[0]
+        descent_direction = delta[0] * b[0]
         for i in range(1, len(delta)):
-            descend_direction += delta[i] * b[i]
+            descent_direction += delta[i] * b[i]
 
-        return descend_direction
+        return descent_direction
 
 
 class InformationStore(object):
@@ -107,34 +107,35 @@ class InformationStore(object):
     max_history_length : integer
         Maximum number of stored past updates.
     s : List
-        List of past position differences, which are Fields.
+        Circular buffer of past position differences, which are Fields.
     y : List
-        List of past gradient differences, which are Fields.
+        Circular buffer of past gradient differences, which are Fields.
     last_x : Field
-        Initial position in variable space.
+        Latest position in variable space.
     last_gradient : Field
-        Gradient at initial position.
+        Gradient at latest position.
     k : integer
-        Number of currently stored past updates.
-    _ss_store : dictionary
-        Dictionary of scalar products between different elements of s.
-    _sy_store : dictionary
-        Dictionary of scalar products between elements of s and y.
-    _yy_store : dictionary
-        Dictionary of scalar products between different elements of y.
+        Number of updates that have taken place
+    ss : numpy.ndarray
+        2D circular buffer of scalar products between different elements of s.
+    sy : numpy.ndarray
+        2D circular buffer of scalar products between elements of s and y.
+    yy : numpy.ndarray
+        2D circular buffer of scalar products between different elements of y.
 
     """
     def __init__(self, max_history_length, x0, gradient):
         self.max_history_length = max_history_length
-        self.s = LimitedList(max_history_length)
-        self.y = LimitedList(max_history_length)
+        self.s = [None]*max_history_length
+        self.y = [None]*max_history_length
         self.last_x = x0.copy()
         self.last_gradient = gradient.copy()
         self.k = 0
 
-        self._ss_store = {}
-        self._sy_store = {}
-        self._yy_store = {}
+        mmax = max_history_length
+        self.ss = np.empty((mmax, mmax), dtype=np.float64)
+        self.sy = np.empty((mmax, mmax), dtype=np.float64)
+        self.yy = np.empty((mmax, mmax), dtype=np.float64)
 
     @property
     def history_length(self):
@@ -155,15 +156,17 @@ class InformationStore(object):
         """
         result = []
         m = self.history_length
-        k = self.k
+        mmax = self.max_history_length
 
+        k = self.k
         s = self.s
+
         for i in range(m):
-            result.append(s[k-m+i])
+            result.append(s[(k-m+i) % mmax])
 
         y = self.y
         for i in range(m):
-            result.append(y[k-m+i])
+            result.append(y[(k-m+i) % mmax])
 
         result.append(self.last_gradient)
 
@@ -183,28 +186,36 @@ class InformationStore(object):
 
         """
         m = self.history_length
+        mmax = self.max_history_length
         k = self.k
         result = np.empty((2*m+1, 2*m+1), dtype=np.float)
 
+        # update the stores
+        k1 = (k-1) % mmax
         for i in range(m):
-            for j in range(m):
-                result[i, j] = self.ss_store(k-m+i, k-m+j)
-
-                sy_ij = self.sy_store(k-m+i, k-m+j)
-                result[i, m+j] = sy_ij
-                result[m+j, i] = sy_ij
+            kmi = (k-m+i) % mmax
+            self.ss[kmi, k1] = self.ss[k1, kmi] = self.s[kmi].vdot(self.s[k1])
+            self.yy[kmi, k1] = self.yy[k1, kmi] = self.y[kmi].vdot(self.y[k1])
+            self.sy[kmi, k1] = self.s[kmi].vdot(self.y[k1])
+        for j in range(m-1):
+            kmj = (k-m+j) % mmax
+            self.sy[k1, kmj] = self.s[k1].vdot(self.y[kmj])
 
-                result[m+i, m+j] = self.yy_store(k-m+i, k-m+j)
+        for i in range(m):
+            kmi = (k-m+i) % mmax
+            for j in range(m):
+                kmj = (k-m+j) % mmax
+                result[i, j] = self.ss[kmi, kmj]
+                result[i, m+j] = result[m+j, i] = self.sy[kmi, kmj]
+                result[m+i, m+j] = self.yy[kmi, kmj]
 
-            sgrad_i = self.sgrad_store(k-m+i)
-            result[2*m, i] = sgrad_i
-            result[i, 2*m] = sgrad_i
+            sgrad_i = self.s[kmi].vdot(self.last_gradient)
+            result[2*m, i] = result[i, 2*m] = sgrad_i
 
-            ygrad_i = self.ygrad_store(k-m+i)
-            result[2*m, m+i] = ygrad_i
-            result[m+i, 2*m] = ygrad_i
+            ygrad_i = self.y[kmi].vdot(self.last_gradient)
+            result[2*m, m+i] = result[m+i, 2*m] = ygrad_i
 
-        result[2*m, 2*m] = self.gradgrad_store()
+        result[2*m, 2*m] = self.last_gradient.norm()
 
         return result
 
@@ -234,185 +245,25 @@ class InformationStore(object):
         for i in range(2*m+1):
             delta[i] *= b_dot_b[m-1, 2*m-1]/b_dot_b[2*m-1, 2*m-1]
 
-        for j in range(m-1, -1, -1):
+        for j in range(m):
             delta_b_b = sum([delta[l]*b_dot_b[m+j, l] for l in range(2*m+1)])
             beta = delta_b_b/b_dot_b[j, m+j]
             delta[j] += (alpha[j] - beta)
 
         return delta
 
-    def ss_store(self, i, j):
-        """Updates the dictionary _ss_store with a new scalar product.
-
-        Returns the scalar product of s_i and s_j.
-
-        Parameters
-        ----------
-        i : integer
-            s index.
-        j : integer
-            s index.
-
-        Returns
-        -------
-        _ss_store[key] : float
-            Scalar product of s_i and s_j.
-
-        """
-        key = tuple(sorted((i, j)))
-        if key not in self._ss_store:
-            self._ss_store[key] = self.s[i].vdot(self.s[j])
-        return self._ss_store[key]
-
-    def sy_store(self, i, j):
-        """Updates the dictionary _sy_store with a new scalar product.
-
-        Returns the scalar product of s_i and y_j.
-
-        Parameters
-        ----------
-        i : integer
-            s index.
-        j : integer
-            y index.
-
-        Returns
-        -------
-        _sy_store[key] : float
-            Scalar product of s_i and y_j.
-
-        """
-        key = (i, j)
-        if key not in self._sy_store:
-            self._sy_store[key] = self.s[i].vdot(self.y[j])
-        return self._sy_store[key]
-
-    def yy_store(self, i, j):
-        """Updates the dictionary _yy_store with a new scalar product.
-
-        Returns the scalar product of y_i and y_j.
-
-        Parameters
-        ----------
-        i : integer
-            y index.
-        j : integer
-            y index.
-
-        Returns
-        ------
-        _yy_store[key] : float
-            Scalar product of y_i and y_j.
-
-        """
-        key = tuple(sorted((i, j)))
-        if key not in self._yy_store:
-            self._yy_store[key] = self.y[i].vdot(self.y[j])
-        return self._yy_store[key]
-
-    def sgrad_store(self, i):
-        """Returns scalar product between s_i and gradient on initial position.
-
-        Returns
-        -------
-        scalar product : float
-            Scalar product.
-
-        """
-        return self.s[i].vdot(self.last_gradient)
-
-    def ygrad_store(self, i):
-        """Returns scalar product between y_i and gradient on initial position.
-
-        Returns
-        -------
-        scalar product : float
-            Scalar product.
-
-        """
-        return self.y[i].vdot(self.last_gradient)
-
-    def gradgrad_store(self):
-        """Returns scalar product of gradient on initial position with itself.
-
-        Returns
-        -------
-        scalar product : float
-            Scalar product.
-
-        """
-        return self.last_gradient.vdot(self.last_gradient)
-
     def add_new_point(self, x, gradient):
         """Updates the s list and y list.
 
-        Calculates the new position and gradient differences and adds them to
-        the respective list.
+        Calculates the new position and gradient differences and enters them
+        into the respective list.
 
         """
-        self.k += 1
-
-        new_s = x - self.last_x
-        self.s.add(new_s)
-
-        new_y = gradient - self.last_gradient
-        self.y.add(new_y)
+        mmax = self.max_history_length
+        self.s[self.k % mmax] = x - self.last_x
+        self.y[self.k % mmax] = gradient - self.last_gradient
 
         self.last_x = x.copy()
         self.last_gradient = gradient.copy()
 
-
-class LimitedList(object):
-    """Class for creating a list of limited length.
-
-    Parameters
-    ----------
-    history_length : integer
-        Maximum number of stored past updates.
-
-    Attributes
-    ----------
-    history_length : integer
-        Maximum number of stored past updates.
-    _offset : integer
-        Offset to correct the indices which are bigger than maximum history.
-        length.
-    _storage : list
-        List where input values are stored.
-
-    """
-    def __init__(self, history_length):
-        self.history_length = int(history_length)
-        self._offset = 0
-        self._storage = []
-
-    def __getitem__(self, index):
-        """Returns the element with index [index-offset].
-
-        Parameters
-        ----------
-        index : integer
-            Index of the selected element.
-
-        Returns
-        -------
-            selected element
-        """
-        return self._storage[index-self._offset]
-
-    def add(self, value):
-        """Adds a new element to the list.
-
-        If the list is of length maximum history then it removes the first
-        element first.
-
-        Parameters
-        ----------
-        value : anything
-            New element in the list.
-
-        """
-        if len(self._storage) == self.history_length:
-            self._storage.pop(0)
-            self._offset += 1
-        self._storage.append(value)
+        self.k += 1
diff --git a/nifty/operators/__init__.py b/nifty/operators/__init__.py
index 560ce3e57fc8b3527deb83c441de40bcc538dbc8..f88014017edbcccd5fe1f94e6449ea0675a2d8a3 100644
--- a/nifty/operators/__init__.py
+++ b/nifty/operators/__init__.py
@@ -32,10 +32,10 @@ from .invertible_operator_mixin import InvertibleOperatorMixin
 
 from .projection_operator import ProjectionOperator
 
-from .propagator_operator import PropagatorOperator
-
-from .propagator_operator import HarmonicPropagatorOperator
-
 from .composed_operator import ComposedOperator
 
 from .response_operator import ResponseOperator
+
+from .laplace_operator import LaplaceOperator
+
+from .smoothness_operator import SmoothnessOperator
diff --git a/nifty/operators/invertible_operator_mixin/invertible_operator_mixin.py b/nifty/operators/invertible_operator_mixin/invertible_operator_mixin.py
index dea80b129ce85af70a78b02537adb95ef850b24e..4517ff24d12d5aa00e21a21fe19d30dcfd680950 100644
--- a/nifty/operators/invertible_operator_mixin/invertible_operator_mixin.py
+++ b/nifty/operators/invertible_operator_mixin/invertible_operator_mixin.py
@@ -39,7 +39,8 @@ class InvertibleOperatorMixin(object):
         (default: ConjugateGradient)
 
     preconditioner : LinearOperator
-        Preconditions the minimizaion problem
+        Preconditioner that is used by ConjugateGraduent if no minimizer was
+        given.
 
     Attributes
     ----------
diff --git a/nifty/operators/laplace_operator/__init__.py b/nifty/operators/laplace_operator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..61427ae4fa1061209d0e6ad1a10bae7b931c5f46
--- /dev/null
+++ b/nifty/operators/laplace_operator/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+from .laplace_operator import LaplaceOperator
diff --git a/nifty/operators/laplace_operator/laplace_operator.py b/nifty/operators/laplace_operator/laplace_operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..cce914cfac1ed593a37ed6d9ae7abd2ed97e6e9f
--- /dev/null
+++ b/nifty/operators/laplace_operator/laplace_operator.py
@@ -0,0 +1,159 @@
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Copyright(C) 2013-2017 Max-Planck-Society
+#
+# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
+# and financially supported by the Studienstiftung des deutschen Volkes.
+
+import numpy as np
+from nifty.field import Field
+from nifty.spaces.power_space import PowerSpace
+from nifty.operators.endomorphic_operator import EndomorphicOperator
+
+import nifty.nifty_utilities as utilities
+
+
+class LaplaceOperator(EndomorphicOperator):
+    """A irregular LaplaceOperator with free boundary and excluding monopole.
+
+    This LaplaceOperator implements the second derivative of a Field in PowerSpace
+    on logarithmic or linear scale with vanishing curvature at the boundary, starting
+    at the second entry of the Field. The second derivative of the Field on the irregular grid
+    is calculated using finite differences.
+
+    Parameters
+    ----------
+    logarithmic : boolean,
+        Whether smoothness is calculated on a logarithmic scale or linear scale
+        default : True
+    """
+
+    def __init__(self, domain, default_spaces=None, logarithmic=True):
+        super(LaplaceOperator, self).__init__(default_spaces)
+        self._domain = self._parse_domain(domain)
+        if len(self.domain) != 1:
+            raise ValueError("The domain must contain exactly one PowerSpace.")
+
+        if not isinstance(self.domain[0], PowerSpace):
+            raise TypeError("The domain must contain exactly one PowerSpace.")
+
+        if logarithmic:
+            self.positions = self.domain[0].kindex.copy()
+            self.positions[1:] = np.log(self.positions[1:])
+            self.positions[0] = -1.
+        else:
+            self.positions = self.domain[0].kindex.copy()
+            self.positions[0] = -1
+
+        self.fwd_dist = self.positions[1:] - self.positions[:-1]
+
+    @property
+    def target(self):
+        return self._domain
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def unitary(self):
+        return False
+
+    @property
+    def symmetric(self):
+        return False
+
+    @property
+    def self_adjoint(self):
+        return False
+
+    def _times(self, x, spaces):
+        spaces = utilities.cast_axis_to_tuple(spaces, len(x.domain))
+        if spaces is None:
+            # this case means that x lives on only one space, which is
+            # identical to the space in the domain of `self`. Otherwise the
+            # input check of LinearOperator would have failed.
+            axes = x.domain_axes[0]
+        else:
+            axes = x.domain_axes[spaces[0]]
+        axis = axes[0]
+        prefix = (slice(None),) * axis
+        fwd_dist = self.fwd_dist.reshape((1,)*axis + self.fwd_dist.shape)
+        positions = self.positions.reshape((1,)*axis + self.positions.shape)
+        ret = x.val.copy_empty()
+        x = x.val
+        ret[prefix + (slice(1, -1),)] = \
+            (-((x[prefix + (slice(1, -1),)] - x[prefix + (slice(0, -2),)]) /
+               fwd_dist[prefix + (slice(0, -1),)]) +
+             ((x[prefix + (slice(2, None),)] - x[prefix + (slice(1, -1),)]) /
+              fwd_dist[prefix + (slice(1, None),)]))
+        ret[prefix + (slice(1, -1),)] /= \
+            (positions[prefix + (slice(2, None),)] -
+             positions[prefix + (slice(None, -2),)])
+        ret *= 2.
+        ret[prefix + (slice(0, 2),)] = 0
+        ret[prefix + (slice(-1, -1),)] = 0
+        ret[prefix + (slice(2, None),)] *= \
+            np.sqrt(fwd_dist)[prefix + (slice(1, None),)]
+        return Field(self.domain, val=ret).weight(power=-0.5, spaces=spaces)
+
+    def _adjoint_times(self, x, spaces):
+        spaces = utilities.cast_axis_to_tuple(spaces, len(x.domain))
+        if spaces is None:
+            # this case means that x lives on only one space, which is
+            # identical to the space in the domain of `self`. Otherwise the
+            # input check of LinearOperator would have failed.
+            axes = x.domain_axes[0]
+        else:
+            axes = x.domain_axes[spaces[0]]
+        axis = axes[0]
+        prefix = (slice(None),) * axis
+        fwd_dist = self.fwd_dist.reshape((1,)*axis + self.fwd_dist.shape)
+        positions = self.positions.reshape((1,)*axis + self.positions.shape)
+        y = x.copy().weight(power=0.5).val
+        y[prefix + (slice(2, None),)] *= \
+            np.sqrt(fwd_dist)[prefix + (slice(1, None),)]
+        y[prefix + (slice(0, 2),)] = 0
+        y[prefix + (slice(-1, -1),)] = 0
+        ret = y.copy_empty()
+        y[prefix + (slice(1, -1),)] /= \
+            (positions[prefix + (slice(2, None),)] -
+             positions[prefix + (slice(None, -2),)])
+        y *= 2
+        ret[prefix + (slice(1, -1),)] = \
+            (-y[prefix + (slice(1, -1),)]/fwd_dist[prefix + (slice(0, -1),)] -
+             y[prefix + (slice(1, -1),)]/fwd_dist[prefix + (slice(1, None),)])
+        ret[prefix + (slice(0, -2),)] += \
+            y[prefix + (slice(1, -1),)] / fwd_dist[prefix + (slice(0, -1),)]
+        ret[prefix + (slice(2, None),)] += \
+            y[prefix + (slice(1, -1),)] / fwd_dist[prefix + (slice(1, None),)]
+        return Field(self.domain, val=ret).weight(-1, spaces=spaces)
+
+    def _irregular_laplace(self, x):
+        ret = np.zeros_like(x)
+        ret[1:-1] = (-(x[1:-1] - x[0:-2]) / self.fwd_dist[:-1] +
+                      (x[2:] - x[1:-1]) / self.fwd_dist[1:])
+        ret[1:-1] /= self.positions[2:] - self.positions[:-2]
+        ret *= 2.
+        return ret
+
+    def _irregular_adj_laplace(self, x):
+        ret = np.zeros_like(x)
+        y = x.copy()
+        y[1:-1] /= self.positions[2:] - self.positions[:-2]
+        y *= 2
+        ret[1:-1] = -y[1:-1] / self.fwd_dist[:-1] - y[1:-1] / self.fwd_dist[1:]
+        ret[0:-2] += y[1:-1] / self.fwd_dist[:-1]
+        ret[2:] += y[1:-1] / self.fwd_dist[1:]
+        return ret
diff --git a/nifty/operators/propagator_operator/__init__.py b/nifty/operators/propagator_operator/__init__.py
deleted file mode 100644
index 8c7ca4132d8152498f97b16e24704177861575d3..0000000000000000000000000000000000000000
--- a/nifty/operators/propagator_operator/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Copyright(C) 2013-2017 Max-Planck-Society
-#
-# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
-# and financially supported by the Studienstiftung des deutschen Volkes.
-
-from .propagator_operator import PropagatorOperator
-from .harmonic_propagator_operator import HarmonicPropagatorOperator
diff --git a/nifty/operators/propagator_operator/harmonic_propagator_operator.py b/nifty/operators/propagator_operator/harmonic_propagator_operator.py
deleted file mode 100644
index 221ad2bdecc4a8c097a86fb34b7421f1bde1abc2..0000000000000000000000000000000000000000
--- a/nifty/operators/propagator_operator/harmonic_propagator_operator.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Copyright(C) 2013-2017 Max-Planck-Society
-#
-# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
-# and financially supported by the Studienstiftung des deutschen Volkes.
-
-from nifty.operators import EndomorphicOperator,\
-                            FFTOperator,\
-                            InvertibleOperatorMixin
-
-
-class HarmonicPropagatorOperator(InvertibleOperatorMixin, EndomorphicOperator):
-    """ NIFTY Harmonic Propagator Operator D.
-
-    The propagator operator D, is known from the Wiener Filter.
-    Its inverse functional form might look like:
-    D = (S^(-1) + M)^(-1)
-    D = (S^(-1) + N^(-1))^(-1)
-    D = (S^(-1) + R^(\dagger) N^(-1) R)^(-1)
-    In contrast to the PropagatorOperator the inference is done in the
-    harmonic space.
-
-    Parameters
-    ----------
-        S : LinearOperator
-            Covariance of the signal prior.
-        M : LinearOperator
-            Likelihood contribution.
-        R : LinearOperator
-            Response operator translating signal to (noiseless) data.
-        N : LinearOperator
-            Covariance of the noise prior or the likelihood, respectively.
-        inverter : class to invert explicitly defined operators
-            (default:ConjugateGradient)
-        preconditioner : Field
-            numerical preconditioner to speed up convergence
-        default_spaces : tuple of ints *optional*
-            Defines on which space(s) of a given field the Operator acts by
-            default (default: None)
-
-    Attributes
-    ----------
-    domain : tuple of DomainObjects, i.e. Spaces and FieldTypes
-        The domain on which the Operator's input Field lives.
-    target : tuple of DomainObjects, i.e. Spaces and FieldTypes
-        The domain in which the outcome of the operator lives. As the Operator
-        is endomorphic this is the same as its domain.
-    unitary : boolean
-        Indicates whether the Operator is unitary or not.
-    self_adjoint : boolean
-        Indicates whether the operator is self_adjoint or not.
-
-    Raises
-    ------
-    ValueError
-        is raised if
-            * neither N nor M is given
-
-    Notes
-    -----
-
-    Examples
-    --------
-
-    See Also
-    --------
-    Scientific reference
-    https://arxiv.org/abs/0806.3474
-
-    """
-
-    # ---Overwritten properties and methods---
-
-    def __init__(self, S, M=None, R=None, N=None, inverter=None,
-                 preconditioner=None):
-        """
-            Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
-            and `RN` if required.
-
-            Parameters
-            ----------
-            S : operator
-                Covariance of the signal prior.
-            M : operator
-                Likelihood contribution.
-            R : operator
-                Response operator translating signal to (noiseless) data.
-            N : operator
-                Covariance of the noise prior or the likelihood, respectively.
-
-        """
-        # infer domain, and target
-        if M is not None:
-            self._codomain = M.domain
-            self._likelihood = M.times
-
-        elif N is None:
-            raise ValueError("Either M or N must be given!")
-
-        elif R is not None:
-            self._codomain = R.domain
-            self._likelihood = \
-                lambda z: R.adjoint_times(N.inverse_times(R.times(z)))
-        else:
-            self._codomain = N.domain
-            self._likelihood = lambda z: N.inverse_times(z)
-
-        self._domain = S.domain
-        self._S = S
-        self._fft_S = FFTOperator(self._domain, target=self._codomain)
-
-        super(HarmonicPropagatorOperator, self).__init__(inverter=inverter,
-                                                 preconditioner=preconditioner)
-
-    # ---Mandatory properties and methods---
-
-    @property
-    def domain(self):
-        return self._domain
-
-    @property
-    def self_adjoint(self):
-        return True
-
-    @property
-    def unitary(self):
-        return False
-
-    # ---Added properties and methods---
-    def _likelihood_times(self, x, spaces=None):
-        transformed_x = self._fft_S.times(x, spaces=spaces)
-        y = self._likelihood(transformed_x)
-        transformed_y = self._fft_S.adjoint_times(y, spaces=spaces)
-        result = x.copy_empty()
-        result.set_val(transformed_y, copy=False)
-        return result
-
-    def _inverse_times(self, x, spaces):
-        pre_result = self._S.inverse_times(x, spaces)
-        pre_result += self._likelihood_times(x)
-        result = x.copy_empty()
-        result.set_val(pre_result, copy=False)
-        return result
diff --git a/nifty/operators/propagator_operator/propagator_operator.py b/nifty/operators/propagator_operator/propagator_operator.py
deleted file mode 100644
index afb4495c709e09a47708b74de77a2b7b49c2d83f..0000000000000000000000000000000000000000
--- a/nifty/operators/propagator_operator/propagator_operator.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Copyright(C) 2013-2017 Max-Planck-Society
-#
-# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
-# and financially supported by the Studienstiftung des deutschen Volkes.
-
-from nifty.operators import EndomorphicOperator,\
-                            FFTOperator,\
-                            InvertibleOperatorMixin
-
-
-class PropagatorOperator(InvertibleOperatorMixin, EndomorphicOperator):
-    """ NIFTY Propagator Operator D.
-
-    The propagator operator D, is known from the Wiener Filter.
-    Its inverse functional form might look like:
-    D = (S^(-1) + M)^(-1)
-    D = (S^(-1) + N^(-1))^(-1)
-    D = (S^(-1) + R^(\dagger) N^(-1) R)^(-1)
-
-    Parameters
-    ----------
-        S : LinearOperator
-            Covariance of the signal prior.
-        M : LinearOperator
-            Likelihood contribution.
-        R : LinearOperator
-            Response operator translating signal to (noiseless) data.
-        N : LinearOperator
-            Covariance of the noise prior or the likelihood, respectively.
-        inverter : class to invert explicitly defined operators
-            (default:ConjugateGradient)
-        preconditioner : Field
-            numerical preconditioner to speed up convergence
-        default_spaces : tuple of ints *optional*
-            Defines on which space(s) of a given field the Operator acts by
-            default (default: None)
-
-    Attributes
-    ----------
-    domain : tuple of DomainObjects, i.e. Spaces and FieldTypes
-        The domain on which the Operator's input Field lives.
-    target : tuple of DomainObjects, i.e. Spaces and FieldTypes
-        The domain in which the outcome of the operator lives. As the Operator
-        is endomorphic this is the same as its domain.
-    unitary : boolean
-        Indicates whether the Operator is unitary or not.
-    self_adjoint : boolean
-        Indicates whether the operator is self_adjoint or not.
-
-    Raises
-    ------
-    ValueError
-        is raised if
-            * neither N nor M is given
-
-    Notes
-    -----
-
-    Examples
-    --------
-    >>> x_space = RGSpace(4)
-    >>> k_space = RGRGTransformation.get_codomain(x_space)
-    >>> f = Field(x_space, val=[2, 4, 6, 8])
-    >>> S = create_power_operator(k_space, spec=1)
-    >>> N = DiagonalOperaor(f.domain, diag=1)
-    >>> D = PropagatorOperator(S=S, N=N) # D^{-1} = S^{-1} + N^{-1}
-    >>> D(f).val
-    <distributed_data_object>
-    array([ 1.,  2.,  3.,  4.]
-
-    See Also
-    --------
-    Scientific reference
-    https://arxiv.org/abs/0806.3474
-
-    """
-
-    # ---Overwritten properties and methods---
-
-    def __init__(self, S=None, M=None, R=None, N=None, inverter=None,
-                 preconditioner=None, default_spaces=None):
-        """
-            Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
-            and `RN` if required.
-
-            Parameters
-            ----------
-            S : operator
-                Covariance of the signal prior.
-            M : operator
-                Likelihood contribution.
-            R : operator
-                Response operator translating signal to (noiseless) data.
-            N : operator
-                Covariance of the noise prior or the likelihood, respectively.
-
-        """
-        # infer domain, and target
-        if M is not None:
-            self._domain = M.domain
-            self._likelihood_times = M.times
-
-        elif N is None:
-            raise ValueError("Either M or N must be given!")
-
-        elif R is not None:
-            self._domain = R.domain
-            self._likelihood_times = \
-                lambda z: R.adjoint_times(N.inverse_times(R.times(z)))
-        else:
-            self._domain = N.domain
-            self._likelihood_times = lambda z: N.inverse_times(z)
-
-        self._S = S
-        self._fft_S = FFTOperator(self._domain, target=self._S.domain)
-
-        if preconditioner is None:
-            preconditioner = self._S_times
-
-        super(PropagatorOperator, self).__init__(inverter=inverter,
-                                                 preconditioner=preconditioner,
-                                                 default_spaces=default_spaces)
-
-    # ---Mandatory properties and methods---
-
-    @property
-    def domain(self):
-        return self._domain
-
-    @property
-    def self_adjoint(self):
-        return True
-
-    @property
-    def unitary(self):
-        return False
-
-    # ---Added properties and methods---
-
-    def _S_times(self, x, spaces=None):
-            transformed_x = self._fft_S(x, spaces=spaces)
-            y = self._S(transformed_x, spaces=spaces)
-            transformed_y = self._fft_S.inverse_times(y, spaces=spaces)
-            result = x.copy_empty()
-            result.set_val(transformed_y, copy=False)
-            return result
-
-    def _S_inverse_times(self, x, spaces=None):
-            transformed_x = self._fft_S(x, spaces=spaces)
-            y = self._S.inverse_times(transformed_x, spaces=spaces)
-            transformed_y = self._fft_S.inverse_times(y, spaces=spaces)
-            result = x.copy_empty()
-            result.set_val(transformed_y, copy=False)
-            return result
-
-    def _inverse_times(self, x, spaces):
-        pre_result = self._S_inverse_times(x, spaces)
-        pre_result += self._likelihood_times(x)
-        result = x.copy_empty()
-        result.set_val(pre_result, copy=False)
-        return result
diff --git a/nifty/operators/smoothness_operator/__init__.py b/nifty/operators/smoothness_operator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..24e0f1fdec49ae44ed261c4cf3ba59603b25aeed
--- /dev/null
+++ b/nifty/operators/smoothness_operator/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+from .smoothness_operator import SmoothnessOperator
diff --git a/nifty/operators/smoothness_operator/smoothness_operator.py b/nifty/operators/smoothness_operator/smoothness_operator.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9e35453616ff9df87ed252db3727e245c360343
--- /dev/null
+++ b/nifty/operators/smoothness_operator/smoothness_operator.py
@@ -0,0 +1,88 @@
+
+from nifty.spaces.power_space import PowerSpace
+from nifty.operators.endomorphic_operator import EndomorphicOperator
+from nifty.operators.laplace_operator import LaplaceOperator
+
+
+class SmoothnessOperator(EndomorphicOperator):
+    """An operator measuring the smoothness on an irregular grid with respect
+    to some scale.
+
+    This operator applies the irregular LaplaceOperator and its adjoint to some
+    Field over a PowerSpace which corresponds to its smoothness and weights the
+    result with a scale parameter sigma. It is used in the smoothness prior
+    terms of the CriticalPowerEnergy. For this purpose we use free boundary
+    conditions in the LaplaceOperator, having no curvature at both ends. In
+    addition the first entry is ignored as well, corresponding to the overall
+    mean of the map. The mean is therefore not considered in the smoothness
+    prior.
+
+
+    Parameters
+    ----------
+    sigma: float,
+        Specifies the strength of the SmoothnessOperator
+    logarithmic : boolean,
+        Whether smoothness is calculated on a logarithmic scale or linear scale
+        default : True
+    """
+
+    # ---Overwritten properties and methods---
+
+    def __init__(self, domain, strength=1., logarithmic=True,
+                 default_spaces=None):
+
+        super(SmoothnessOperator, self).__init__(default_spaces=default_spaces)
+
+        self._domain = self._parse_domain(domain)
+        if len(self.domain) != 1:
+            raise ValueError("The domain must contain exactly one PowerSpace.")
+
+        if not isinstance(self.domain[0], PowerSpace):
+            raise TypeError("The domain must contain exactly one PowerSpace.")
+
+        if strength <= 0:
+            raise ValueError("ERROR: invalid sigma.")
+
+        self._strength = strength
+
+        self._laplace = LaplaceOperator(domain=self.domain,
+                                        logarithmic=logarithmic)
+
+    # ---Mandatory properties and methods---
+
+    @property
+    def target(self):
+        return self._domain
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def unitary(self):
+        return False
+
+    @property
+    def symmetric(self):
+        return False
+
+    @property
+    def self_adjoint(self):
+        return False
+
+    def _times(self, x, spaces):
+        if self._strength != 0:
+            result = self._laplace.adjoint_times(self._laplace(x, spaces),
+                                                 spaces)
+            result *= self._strength**2
+        else:
+            result = x.copy_empty()
+            result.val[:] = 0
+        return result
+
+    # ---Added properties and methods---
+
+    @property
+    def strength(self):
+        return self._strength
diff --git a/nifty/sugar.py b/nifty/sugar.py
index a2a7ce0bd268659b4ab8cde5bdefa70414ea0e98..c3b8f9d6f97649ed6ff8decaa4b5c4dcbc9109e8 100644
--- a/nifty/sugar.py
+++ b/nifty/sugar.py
@@ -18,7 +18,8 @@
 
 from . import PowerSpace,\
               Field,\
-              DiagonalOperator
+              DiagonalOperator,\
+              sqrt
 
 __all__ = ['create_power_operator']
 
@@ -50,10 +51,56 @@ def create_power_operator(domain, power_spectrum, dtype=None,
 
     """
 
-    power_domain = PowerSpace(domain,
-                              distribution_strategy=distribution_strategy)
+    if isinstance(power_spectrum, Field):
+        power_domain = power_spectrum.domain
+    else:
+        power_domain = PowerSpace(domain,
+                                  distribution_strategy=distribution_strategy)
+
     fp = Field(power_domain, val=power_spectrum, dtype=dtype,
                distribution_strategy=distribution_strategy)
     f = fp.power_synthesize(mean=1, std=0, real_signal=False)
     f **= 2
     return DiagonalOperator(domain, diagonal=f, bare=True)
+
+
+def generate_posterior_sample(mean, covariance):
+    """ Generates a posterior sample from a Gaussian distribution with given
+    mean and covariance
+
+    This method generates samples by setting up the observation and
+    reconstruction of a mock signal in order to obtain residuals of the right
+    correlation which are added to the given mean.
+
+    Parameters
+    ----------
+    mean : Field
+        the mean of the posterior Gaussian distribution
+    covariance : WienerFilterCurvature
+        The posterior correlation structure consisting of a
+        response operator, noise covariance and prior signal covariance
+
+    Returns
+    -------
+    sample : Field
+        Returns the a sample from the Gaussian of given mean and covariance.
+
+    """
+
+    S = covariance.S
+    R = covariance.R
+    N = covariance.N
+
+    power = S.diagonal().power_analyze()**.5
+    mock_signal = power.power_synthesize(real_signal=True)
+
+    noise = N.diagonal(bare=True).val
+
+    mock_noise = Field.from_random(random_type="normal", domain=N.domain,
+                                   std=sqrt(noise), dtype=noise.dtype)
+    mock_data = R(mock_signal) + mock_noise
+
+    mock_j = R.adjoint_times(N.inverse_times(mock_data))
+    mock_m = covariance.inverse_times(mock_j)
+    sample = mock_signal - mock_m + mean
+    return sample
diff --git a/test/test_field.py b/test/test_field.py
index 59a00e1bcd9b43f8cfee749c5482ea8a5b3e5cd4..da87059832ab150ba5cb9c8cd902d59850be9812 100644
--- a/test/test_field.py
+++ b/test/test_field.py
@@ -119,3 +119,12 @@ class Test_Functionality(unittest.TestCase):
         assert_allclose(ps2.val.get_full_data()/samples,
                         fp2.val.get_full_data(),
                         rtol=0.1)
+
+    def test_vdot(self):
+        s=RGSpace((10,))
+        f1=Field.from_random("normal",domain=s,dtype=np.complex128)
+        f2=Field.from_random("normal",domain=s,dtype=np.complex128)
+        assert_allclose(f1.vdot(f2),f1.vdot(f2,spaces=0))
+        assert_allclose(f1.vdot(f2),np.conj(f2.vdot(f1)))
+
+