diff --git a/nifty/minimization/conjugate_gradient.py b/nifty/minimization/conjugate_gradient.py
index e3b71168c70b45c587f98af31833dead1185028e..3470b4744642a74b09288d37f9a39d5223915254 100644
--- a/nifty/minimization/conjugate_gradient.py
+++ b/nifty/minimization/conjugate_gradient.py
@@ -40,7 +40,6 @@ class ConjugateGradient(Minimizer):
     ----------
     Jorge Nocedal & Stephen Wright, "Numerical Optimization", Second Edition,
     2006, Springer-Verlag New York
-
     """
 
     def __init__(self, controller, preconditioner=None):
@@ -62,9 +61,7 @@ class ConjugateGradient(Minimizer):
             state at last point of the iteration
         status : integer
             Can be controller.CONVERGED or controller.ERROR
-
         """
-
         if preconditioner is None:
             preconditioner = self._preconditioner
 
diff --git a/nifty/minimization/descent_minimizer.py b/nifty/minimization/descent_minimizer.py
index 76f5eb6a4135c23b8d23e20705ca9ac14a063cc0..7e55e674a50478db4e68913edc7f5d00d61db4f8 100644
--- a/nifty/minimization/descent_minimizer.py
+++ b/nifty/minimization/descent_minimizer.py
@@ -18,7 +18,6 @@
 
 from __future__ import division
 import abc
-
 from .minimizer import Minimizer
 from .line_searching import LineSearchStrongWolfe
 
@@ -38,7 +37,6 @@ class DescentMinimizer(Minimizer):
     line_searcher : callable *optional*
         Function which infers the step size in the descent direction
         (default : LineSearchStrongWolfe()).
-
     """
 
     def __init__(self, controller, line_searcher=LineSearchStrongWolfe()):
@@ -68,9 +66,7 @@ class DescentMinimizer(Minimizer):
             * the controller returns controller.CONVERGED or controller.ERROR,
             * a perfectly flat point is reached,
             * according to the line-search the minimum is found,
-
         """
-
         f_k_minus_1 = None
         controller = self._controller
         status = controller.start(energy)
@@ -82,22 +78,17 @@ class DescentMinimizer(Minimizer):
             if energy.gradient_norm == 0:
                 return energy, controller.CONVERGED
 
-            # current position is encoded in energy object
-            descent_direction = self.get_descent_direction(energy)
-            # compute the step length, which minimizes energy.value along the
-            # search direction
+            # compute a step length that reduces energy.value sufficiently
             try:
-                new_energy = \
-                    self.line_searcher.perform_line_search(
-                                                   energy=energy,
-                                                   pk=descent_direction,
-                                                   f_k_minus_1=f_k_minus_1)
+                new_energy = self.line_searcher.perform_line_search(
+                    energy=energy, pk=self.get_descent_direction(energy),
+                    f_k_minus_1=f_k_minus_1)
             except RuntimeError:
                 return energy, controller.ERROR
 
             f_k_minus_1 = energy.value
-            # check if new energy value is bigger than old energy value
-            if (new_energy.value - energy.value) > 0:
+
+            if new_energy.value > energy.value:
                 return energy, controller.ERROR
 
             energy = new_energy
diff --git a/nifty/minimization/iteration_controlling/iteration_controller.py b/nifty/minimization/iteration_controlling/iteration_controller.py
index ccdcb3899c77ddd9ba19fdd9afc149ae022bb291..44db3fae259ad4e6c56ff9493972b7980634878a 100644
--- a/nifty/minimization/iteration_controlling/iteration_controller.py
+++ b/nifty/minimization/iteration_controlling/iteration_controller.py
@@ -55,7 +55,6 @@ class IterationController(with_metaclass(NiftyMeta, type('NewBase',
         -------
         status : integer status, can be CONVERGED, CONTINUE or ERROR
         """
-
         raise NotImplementedError
 
     @abc.abstractmethod
@@ -70,5 +69,4 @@ class IterationController(with_metaclass(NiftyMeta, type('NewBase',
         -------
         status : integer status, can be CONVERGED, CONTINUE or ERROR
         """
-
         raise NotImplementedError
diff --git a/nifty/minimization/line_searching/line_search.py b/nifty/minimization/line_searching/line_search.py
index 73199bd450b43e2a6c6a66036189445011778167..04bfd6af961be66d96e2571cd9f98e77726247d2 100644
--- a/nifty/minimization/line_searching/line_search.py
+++ b/nifty/minimization/line_searching/line_search.py
@@ -36,7 +36,6 @@ class LineSearch(with_metaclass(abc.ABCMeta,
         LineEnergy object from which we can extract energy at a specific point.
     preferred_initial_step_size : float
         Initial guess for the step length.
-
     """
 
     def __init__(self):
diff --git a/nifty/minimization/line_searching/line_search_strong_wolfe.py b/nifty/minimization/line_searching/line_search_strong_wolfe.py
index 3658cad4ad076bca13233e4e05fcc67507aa879d..b94da5c8d4b10d5bff9b09a6eee681fcf66bf5ea 100644
--- a/nifty/minimization/line_searching/line_search_strong_wolfe.py
+++ b/nifty/minimization/line_searching/line_search_strong_wolfe.py
@@ -19,7 +19,6 @@
 from __future__ import division
 from builtins import range
 import numpy as np
-
 from .line_search import LineSearch
 from ...energies import LineEnergy
 from ... import dobj
@@ -62,7 +61,6 @@ class LineSearchStrongWolfe(LineSearch):
         Maximum number of iterations performed by the line search algorithm.
     max_zoom_iterations : integer
         Maximum number of iterations performed by the zoom algorithm.
-
     """
 
     def __init__(self, c1=1e-4, c2=0.9,
@@ -99,9 +97,7 @@ class LineSearchStrongWolfe(LineSearch):
         -------
         energy_star : Energy object
             The new Energy object on the new position.
-
         """
-
         le_0 = LineEnergy(0., energy, pk, 0.)
 
         # initialize the zero phis
@@ -203,7 +199,6 @@ class LineSearchStrongWolfe(LineSearch):
         -------
         energy_star : Energy object
             The new Energy object on the new position.
-
         """
         cubic_delta = 0.2  # cubic interpolant checks
         quad_delta = 0.1  # quadratic interpolant checks
@@ -285,7 +280,6 @@ class LineSearchStrongWolfe(LineSearch):
         xmin : float
             Position of the approximated minimum.
         """
-
         with np.errstate(divide='raise', over='raise', invalid='raise'):
             try:
                 C = fpa
diff --git a/nifty/minimization/minimizer.py b/nifty/minimization/minimizer.py
index 0809da8a32d62970a5988e095469a6a2243ae11a..1c6ff8b94fa252368560a7bd5ca7505c6233c365 100644
--- a/nifty/minimization/minimizer.py
+++ b/nifty/minimization/minimizer.py
@@ -22,8 +22,7 @@ from future.utils import with_metaclass
 
 
 class Minimizer(with_metaclass(NiftyMeta, type('NewBase', (object,), {}))):
-    """ A base class used by all minimizers.
-    """
+    """ A base class used by all minimizers."""
 
     @abc.abstractmethod
     def __call__(self, energy, preconditioner=None):
@@ -44,5 +43,4 @@ class Minimizer(with_metaclass(NiftyMeta, type('NewBase', (object,), {}))):
             Latest `energy` of the minimization.
         status : integer
         """
-
         raise NotImplementedError
diff --git a/nifty/minimization/nonlinear_cg.py b/nifty/minimization/nonlinear_cg.py
index 4aedbcc559b8b5921c5d90850a010b89bfb579cc..adba2a0f7927dfc5ae21c994bf4af26f2f0c68f3 100644
--- a/nifty/minimization/nonlinear_cg.py
+++ b/nifty/minimization/nonlinear_cg.py
@@ -34,7 +34,6 @@ class NonlinearCG(Minimizer):
     ----------
     Jorge Nocedal & Stephen Wright, "Numerical Optimization", Second Edition,
     2006, Springer-Verlag New York
-
     """
 
     def __init__(self, controller, line_searcher=LineSearchStrongWolfe()):
@@ -56,9 +55,7 @@ class NonlinearCG(Minimizer):
             state at last point of the iteration
         status : integer
             Can be controller.CONVERGED or controller.ERROR
-
         """
-
         controller = self._controller
         status = controller.start(energy)
         if status != controller.CONTINUE:
diff --git a/nifty/minimization/relaxed_newton.py b/nifty/minimization/relaxed_newton.py
index 4e9e6ac45194d850483a3e1c5e066600e70c1bf0..77d4649cf46566070b439207b4e9f4c8b294e4d9 100644
--- a/nifty/minimization/relaxed_newton.py
+++ b/nifty/minimization/relaxed_newton.py
@@ -24,7 +24,7 @@ class RelaxedNewton(DescentMinimizer):
     def __init__(self, controller, line_searcher=LineSearchStrongWolfe()):
         super(RelaxedNewton, self).__init__(controller=controller,
                                             line_searcher=line_searcher)
-
+        # FIXME: this does not look idiomatic
         self.line_searcher.preferred_initial_step_size = 1.
 
     def get_descent_direction(self, energy):
@@ -46,6 +46,5 @@ class RelaxedNewton(DescentMinimizer):
         descent_direction : Field
            Returns the descent direction with proposed step length. In a
            quadratic potential this corresponds to the optimal step.
-
         """
         return -energy.curvature.inverse_times(energy.gradient)
diff --git a/nifty/minimization/steepest_descent.py b/nifty/minimization/steepest_descent.py
index 388dd95d19f38cf99843846906a1de77901c5462..1ffbde24249caacc7db6796a0281858ba45a3522 100644
--- a/nifty/minimization/steepest_descent.py
+++ b/nifty/minimization/steepest_descent.py
@@ -26,7 +26,7 @@ class SteepestDescent(DescentMinimizer):
         """ Implementation of the steepest descent minimization scheme.
 
         Also known as 'gradient descent'. This algorithm simply follows the
-        functionals gradient for minization.
+        functional's gradient for minization.
 
         Parameters
         ----------
@@ -37,8 +37,6 @@ class SteepestDescent(DescentMinimizer):
         Returns
         -------
         descent_direction : Field
-            Returns the descent direction.
-
+            the descent direction.
         """
-
         return -energy.gradient
diff --git a/nifty/minimization/vl_bfgs.py b/nifty/minimization/vl_bfgs.py
index 321281fa06fe98edfe32f88257f19b8c2b6e4eb5..58df5d7815794d2d69fcaa8a68ec6b9ec8b5544b 100644
--- a/nifty/minimization/vl_bfgs.py
+++ b/nifty/minimization/vl_bfgs.py
@@ -20,7 +20,6 @@ from __future__ import division
 from builtins import range
 from builtins import object
 import numpy as np
-
 from .descent_minimizer import DescentMinimizer
 from .line_searching import LineSearchStrongWolfe
 
@@ -28,10 +27,8 @@ from .line_searching import LineSearchStrongWolfe
 class VL_BFGS(DescentMinimizer):
     def __init__(self, controller, line_searcher=LineSearchStrongWolfe(),
                  max_history_length=5):
-
         super(VL_BFGS, self).__init__(controller=controller,
                                       line_searcher=line_searcher)
-
         self.max_history_length = max_history_length
 
     def __call__(self, energy):
@@ -117,7 +114,6 @@ class InformationStore(object):
         2D circular buffer of scalar products between elements of s and y.
     yy : numpy.ndarray
         2D circular buffer of scalar products between different elements of y.
-
     """
     def __init__(self, max_history_length, x0, gradient):
         self.max_history_length = max_history_length
@@ -134,9 +130,7 @@ class InformationStore(object):
 
     @property
     def history_length(self):
-        """Returns the number of currently stored updates.
-
-        """
+        """Returns the number of currently stored updates."""
         return min(self.k, self.max_history_length)
 
     @property
@@ -173,7 +167,6 @@ class InformationStore(object):
         -------
         result : numpy.ndarray
             Scalar matrix.
-
         """
         m = self.history_length
         mmax = self.max_history_length
@@ -216,7 +209,6 @@ class InformationStore(object):
         -------
         delta : List
             List of the new scalar coefficients (deltas).
-
         """
         m = self.history_length
         b_dot_b = self.b_dot_b
@@ -246,7 +238,6 @@ class InformationStore(object):
 
         Calculates the new position and gradient differences and enters them
         into the respective list.
-
         """
         mmax = self.max_history_length
         self.s[self.k % mmax] = x - self.last_x