diff --git a/nifty/minimization/descent_minimizer.py b/nifty/minimization/descent_minimizer.py
index 7814824344c6e0f8fbf3804f3d52206076b1961a..5d224b3687da61842335e5e89587d467f1a9b746 100644
--- a/nifty/minimization/descent_minimizer.py
+++ b/nifty/minimization/descent_minimizer.py
@@ -156,24 +156,20 @@ class DescentMinimizer(Loggable, object):
                                                pk=descend_direction,
                                                f_k_minus_1=f_k_minus_1)
             f_k_minus_1 = energy.value
-            
+
             # check if new energy value is bigger than old energy value
             if (new_energy.value - energy.value) > 0:
-                self.logger.info("Line search algorithm was unsuccessful. "
-                                 "Stopping.")
-                new_energy = energy.at( position=energy.position + 
-                    np.random.rand()/10*(new_energy.position-energy.position) )
-                energy = new_energy
-                convergence = self.convergence_level + 2
+                self.logger.info("Line search algorithm returned a new energy "
+                                 "that was larger than the old one. Stopping.")
                 break
 
             energy = new_energy
-
             # check convergence
             delta = abs(gradient).max() * (step_length/gradient_norm)
-            self.logger.debug("Iteration : %08u   step_length = %3.1E   "
-                              "delta = %3.1E" %
-                              (iteration_number, step_length, delta))
+            self.logger.debug("Iteration:%08u step_length=%3.1E "
+                              "delta=%3.1E energy=%3.1E" %
+                              (iteration_number, step_length, delta,
+                               energy.value))
             if delta == 0:
                 convergence = self.convergence_level + 2
                 self.logger.info("Found minimum according to line-search. "
diff --git a/nifty/minimization/steepest_descent.py b/nifty/minimization/steepest_descent.py
index fae8ab527466048985b7b4c9822823db0ec6faaf..97281588d5b3b63a37c7a7e5e3abb263f6f9312d 100644
--- a/nifty/minimization/steepest_descent.py
+++ b/nifty/minimization/steepest_descent.py
@@ -40,8 +40,4 @@ class SteepestDescent(DescentMinimizer):
         """
 
         descend_direction = energy.gradient
-        norm = descend_direction.norm()
-        if norm != 1:
-            return descend_direction / -norm
-        else:
-            return descend_direction * -1
+        return descend_direction * -1
diff --git a/nifty/minimization/vl_bfgs.py b/nifty/minimization/vl_bfgs.py
index 501b427a7062bc70451db8895aed7046b53d9a31..99591a0a45c2494512d00433657797da60a3ba7a 100644
--- a/nifty/minimization/vl_bfgs.py
+++ b/nifty/minimization/vl_bfgs.py
@@ -25,7 +25,7 @@ from .line_searching import LineSearchStrongWolfe
 class VL_BFGS(DescentMinimizer):
     def __init__(self, line_searcher=LineSearchStrongWolfe(), callback=None,
                  convergence_tolerance=1E-4, convergence_level=3,
-                 iteration_limit=None, max_history_length=10):
+                 iteration_limit=None, max_history_length=5):
 
         super(VL_BFGS, self).__init__(
                                 line_searcher=line_searcher,
@@ -84,9 +84,6 @@ class VL_BFGS(DescentMinimizer):
         for i in xrange(1, len(delta)):
             descend_direction += delta[i] * b[i]
 
-        norm = descend_direction.norm()
-        if norm != 1:
-            descend_direction /= norm
         return descend_direction
 
 
diff --git a/test/test_minimization/quadratic_potential.py b/test/test_minimization/quadratic_potential.py
new file mode 100644
index 0000000000000000000000000000000000000000..06b015fe9384b3df43b93c6a833e4c5f975f641f
--- /dev/null
+++ b/test/test_minimization/quadratic_potential.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+
+from nifty import Energy
+
+
+class QuadraticPotential(Energy):
+    def __init__(self, position, eigenvalues):
+        super(QuadraticPotential, self).__init__(position)
+        self.eigenvalues = eigenvalues
+
+    def at(self, position):
+        return self.__class__(position,
+                              eigenvalues=self.eigenvalues)
+
+    @property
+    def value(self):
+        H = 0.5 * self.position.vdot(
+                    self.eigenvalues(self.position))
+        return H.real
+
+    @property
+    def gradient(self):
+        g = self.eigenvalues(self.position)
+        return g
+
+    @property
+    def curvature(self):
+        return self.eigenvalues
diff --git a/test/test_minimization/test_conjugate_gradient.py b/test/test_minimization/test_conjugate_gradient.py
index 74c38a6961c2000b9d4bbc0ca66a53f74b88e26d..4ab686c0102c11945f99c0f879891854ce945812 100644
--- a/test/test_minimization/test_conjugate_gradient.py
+++ b/test/test_minimization/test_conjugate_gradient.py
@@ -1,57 +1,47 @@
-
 import unittest
 
+import numpy as np
 from numpy.testing import assert_equal, assert_almost_equal
 
-from nifty import *
+from nifty import Field, DiagonalOperator, RGSpace, HPSpace
+from nifty import ConjugateGradient
 
-from itertools import product
 from test.common import expand
-from test.common import generate_spaces
-
-np.random.seed(42)
-
-
-class ConjugateGradient_Tests(unittest.TestCase):
-    spaces = generate_spaces()
-
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x0 = Field.from_random('normal', domain=space)
-        A = DiagonalOperator(space, diagonal = 1.)
-        b = Field(space, val=0.)
-        
-        minimizer = ConjugateGradient(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (position, convergence) = minimizer(A=A, x0=x0, b=b)
-        
-        if position.domain[0] != space:
-            raise TypeError
-        if type(convergence) != int:
-            raise TypeError
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x0 = Field.from_random('normal', domain=space)
-        test_x = Field(space, val = 1.)
-        A = DiagonalOperator(space, diagonal = 1.)
-        b = Field(space, val=1.)
-        
-        minimizer = ConjugateGradient(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (position, convergence) = minimizer(A=A, x0=x0, b=b)
-        
-        assert_almost_equal(position.val.get_full_data(), 
-                            test_x.val.get_full_data(), decimal=3)
-        assert_equal(convergence, convergence_level+1)
 
+spaces = [RGSpace([1024, 1024], distances=0.123), HPSpace(32)]
+
+
+class Test_ConjugateGradient(unittest.TestCase):
+
+    def test_interface(self):
+        iteration_limit = 100
+        convergence_level = 4
+        convergence_tolerance = 1E-6
+        callback = lambda z: z
+        minimizer = ConjugateGradient(
+                                iteration_limit=iteration_limit,
+                                convergence_tolerance=convergence_tolerance,
+                                convergence_level=convergence_level,
+                                callback=callback)
+
+        assert_equal(minimizer.iteration_limit, iteration_limit)
+        assert_equal(minimizer.convergence_level, convergence_level)
+        assert_equal(minimizer.convergence_tolerance, convergence_tolerance)
+        assert(minimizer.callback is callback)
+
+    @expand([[space] for space in spaces])
+    def test_minimization(self, space):
+        np.random.seed(42)
+        starting_point = Field.from_random('normal', domain=space)*10
+        covariance_diagonal = Field.from_random('uniform', domain=space) + 0.5
+        covariance = DiagonalOperator(space, diagonal=covariance_diagonal)
+        required_result = Field(space, val=1.)
+
+        minimizer = ConjugateGradient()
+
+        (position, convergence) = minimizer(A=covariance, x0=starting_point,
+                                            b=required_result)
 
+        assert_almost_equal(position.val.get_full_data(),
+                            1./covariance_diagonal.val.get_full_data(),
+                            decimal=3)
diff --git a/test/test_minimization/test_descent_minimizers.py b/test/test_minimization/test_descent_minimizers.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcffa73d7698faec737e979f35faf5a1a4404898
--- /dev/null
+++ b/test/test_minimization/test_descent_minimizers.py
@@ -0,0 +1,51 @@
+import unittest
+
+import numpy as np
+from numpy.testing import assert_equal, assert_almost_equal
+
+from nifty import Field, DiagonalOperator, RGSpace, HPSpace
+from nifty import SteepestDescent, RelaxedNewton, VL_BFGS
+
+from itertools import product
+from test.common import expand
+
+from quadratic_potential import QuadraticPotential
+
+from nifty import logger
+
+minimizers = [SteepestDescent, RelaxedNewton, VL_BFGS]
+spaces = [RGSpace([1024, 1024], distances=0.123), HPSpace(32)]
+
+
+class Test_DescentMinimizers(unittest.TestCase):
+
+    @expand([[minimizer] for minimizer in minimizers])
+    def test_interface(self, minimizer):
+        iteration_limit = 100
+        convergence_level = 4
+        convergence_tolerance = 1E-6
+        callback = lambda z: z
+        minimizer = minimizer(iteration_limit=iteration_limit,
+                              convergence_tolerance=convergence_tolerance,
+                              convergence_level=convergence_level,
+                              callback=callback)
+
+        assert_equal(minimizer.iteration_limit, iteration_limit)
+        assert_equal(minimizer.convergence_level, convergence_level)
+        assert_equal(minimizer.convergence_tolerance, convergence_tolerance)
+        assert(minimizer.callback is callback)
+
+    @expand(product(minimizers, spaces))
+    def test_minimization(self, minimizer_class, space):
+        np.random.seed(42)
+        starting_point = Field.from_random('normal', domain=space)*10
+        covariance_diagonal = Field.from_random('uniform', domain=space) + 0.5
+        covariance = DiagonalOperator(space, diagonal=covariance_diagonal)
+        energy = QuadraticPotential(position=starting_point,
+                                    eigenvalues=covariance)
+        minimizer = minimizer_class(iteration_limit=30)
+
+        (energy, convergence) = minimizer(energy)
+
+        assert_almost_equal(energy.value, 0, decimal=5)
+        assert_almost_equal(energy.position.val.get_full_data(), 0., decimal=5)
diff --git a/test/test_minimization/test_relaxed_newton.py b/test/test_minimization/test_relaxed_newton.py
deleted file mode 100644
index b59ee1eb0260b34bb5bd7414de115bd071ace7bd..0000000000000000000000000000000000000000
--- a/test/test_minimization/test_relaxed_newton.py
+++ /dev/null
@@ -1,86 +0,0 @@
-
-import unittest
-
-from numpy.testing import assert_equal, assert_almost_equal
-
-from nifty import *
-
-from itertools import product
-from test.common import expand
-from test.common import generate_spaces
-
-np.random.seed(42)
-
-
-class QuadraticPot(Energy):
-    def __init__(self, position, N):
-        super(QuadraticPot, self).__init__(position)
-        self.N = N
-        
-    def at(self, position):
-        return self.__class__(position, N = self.N)
-
-
-    @property
-    def value(self):
-        H = 0.5 *self.position.dot(self.N.inverse_times(self.position))
-        return H.real
-
-    @property
-    def gradient(self):
-        g = self.N.inverse_times(self.position)
-        return_g = g.copy_empty(dtype=np.float)
-        return_g.val = g.val.real
-        return return_g
-            
-    @property
-    def curvature(self):
-        return self.N
-
-
-
-class RelaxedNewton_Tests(unittest.TestCase):
-    spaces = generate_spaces()
-
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x = Field.from_random('normal', domain=space)
-        N = DiagonalOperator(space, diagonal = 1.)
-        energy = QuadraticPot(position=x , N=N)
-        
-        minimizer = RelaxedNewton(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (energy, convergence) = minimizer(energy)
-        
-        if energy.position.domain[0] != space:
-            raise TypeError
-        if type(convergence) != int:
-            raise TypeError
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x = Field.from_random('normal', domain=space)
-        test_x = Field(space, val = 0.)
-        N = DiagonalOperator(space, diagonal = 1.)
-        energy = QuadraticPot(position=x , N=N)
-        
-        minimizer = RelaxedNewton(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (energy, convergence) = minimizer(energy)
-        
-        assert_almost_equal(energy.value, 0, decimal=3)
-        assert_almost_equal(energy.position.val.get_full_data(), 
-                            test_x.val.get_full_data(), decimal=3)
-        assert_equal(convergence, convergence_level+2)
-
-
-
diff --git a/test/test_minimization/test_steepest_descent.py b/test/test_minimization/test_steepest_descent.py
deleted file mode 100644
index 59bfe3efacbc5c4b79e2fb173f89938b65f9de65..0000000000000000000000000000000000000000
--- a/test/test_minimization/test_steepest_descent.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import unittest
-
-from numpy.testing import  assert_equal, assert_almost_equal
-
-from nifty import *
-
-from itertools import product
-from test.common import expand
-from test.common import generate_spaces
-
-np.random.seed(42)
-
-
-class QuadraticPot(Energy):
-    def __init__(self, position, N):
-        super(QuadraticPot, self).__init__(position)
-        self.N = N
-        
-    def at(self, position):
-        return self.__class__(position, N = self.N)
-
-
-    @property
-    def value(self):
-        H = 0.5 *self.position.dot(self.N.inverse_times(self.position))
-        return H.real
-
-    @property
-    def gradient(self):
-        g = self.N.inverse_times(self.position)
-        return_g = g.copy_empty(dtype=np.float)
-        return_g.val = g.val.real
-        return return_g
-            
-    @property
-    def curvature(self):
-        return self.N
-
-
-
-class SteepestDescent_Tests(unittest.TestCase):
-    spaces = generate_spaces()
-
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x = Field.from_random('normal', domain=space)
-        N = DiagonalOperator(space, diagonal = 1.)
-        energy = QuadraticPot(position=x , N=N)
-        
-        minimizer = SteepestDescent(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (energy, convergence) = minimizer(energy)
-        
-        if energy.position.domain[0] != space:
-            raise TypeError
-        if type(convergence) != int:
-            raise TypeError
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x = Field.from_random('normal', domain=space)
-        test_x = Field(space, val = 0.)
-        N = DiagonalOperator(space, diagonal = 1.)
-        energy = QuadraticPot(position=x , N=N)
-        
-        minimizer = SteepestDescent(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (energy, convergence) = minimizer(energy)
-        
-        assert_almost_equal(energy.value, 0, decimal=3)
-        assert_almost_equal(energy.position.val.get_full_data(), 
-                            test_x.val.get_full_data(), decimal=3)
-        assert_equal(convergence, convergence_level+2)
diff --git a/test/test_minimization/test_vl_bfgs.py b/test/test_minimization/test_vl_bfgs.py
deleted file mode 100644
index 2eff36431e63ffe979f08085f4215adc449964f5..0000000000000000000000000000000000000000
--- a/test/test_minimization/test_vl_bfgs.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import unittest
-
-from numpy.testing import assert_equal, assert_almost_equal
-
-from nifty import *
-
-from itertools import product
-from test.common import expand
-from test.common import generate_spaces
-
-np.random.seed(42)
-
-
-class QuadraticPot(Energy):
-    def __init__(self, position, N):
-        super(QuadraticPot, self).__init__(position)
-        self.N = N
-        
-    def at(self, position):
-        return self.__class__(position, N = self.N)
-
-
-    @property
-    def value(self):
-        H = 0.5 *self.position.dot(self.N.inverse_times(self.position))
-        return H.real
-
-    @property
-    def gradient(self):
-        g = self.N.inverse_times(self.position)
-        return_g = g.copy_empty(dtype=np.float)
-        return_g.val = g.val.real
-        return return_g
-            
-    @property
-    def curvature(self):
-        return self.N
-
-
-
-class VL_BFGS_Tests(unittest.TestCase):
-    spaces = generate_spaces()
-
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x = Field.from_random('normal', domain=space)
-        N = DiagonalOperator(space, diagonal = 1.)
-        energy = QuadraticPot(position=x , N=N)
-        
-        minimizer = VL_BFGS(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (energy, convergence) = minimizer(energy)
-        
-        if energy.position.domain[0] != space:
-            raise TypeError
-        if type(convergence) != int:
-            raise TypeError
-
-    @expand(product(spaces, [10,  100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
-    def test_property(self, space, iteration_limit, convergence_tolerance, 
-                      convergence_level):
-        
-        x = Field.from_random('normal', domain=space)
-        test_x = Field(space, val = 0.)
-        N = DiagonalOperator(space, diagonal = 1.)
-        energy = QuadraticPot(position=x , N=N)
-        
-        minimizer = VL_BFGS(iteration_limit=iteration_limit,
-                                    convergence_tolerance=convergence_tolerance, 
-                                    convergence_level=convergence_level)
-                                    
-        (energy, convergence) = minimizer(energy)
-        
-        assert_almost_equal(energy.value, 0, decimal=3)
-        assert_almost_equal(energy.position.val.get_full_data(), 
-                            test_x.val.get_full_data(), decimal=3)
-        assert_equal(convergence, convergence_level+2)
-