diff --git a/nifty/minimization/bare_dot.py b/nifty/minimization/bare_dot.py
new file mode 100644
index 0000000000000000000000000000000000000000..31e0864d4f0dc049acc5e965d88421fc9e263bb6
--- /dev/null
+++ b/nifty/minimization/bare_dot.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+
+import numpy as np
+
+
+def bare_dot(a, b):
+    try:
+        return a.dot(b, bare=True)
+    except(AttributeError, TypeError):
+        pass
+
+    try:
+        return a.vdot(b)
+    except(AttributeError):
+        pass
+
+    return np.vdot(a, b)
diff --git a/nifty/minimization/line_searching/line_search.py b/nifty/minimization/line_searching/line_search.py
index 7c0808019dd3051d46dc7b09fe79abd7acd4c9ea..169697512748ab1c2cd8f6b5c11b7e68adbe876e 100644
--- a/nifty/minimization/line_searching/line_search.py
+++ b/nifty/minimization/line_searching/line_search.py
@@ -1,22 +1,8 @@
 import abc
 
-import numpy as np
-
 from keepers import Loggable
 
-
-def bare_dot(a, b):
-    try:
-        return a.dot(b, bare=True)
-    except(AttributeError, TypeError):
-        pass
-
-    try:
-        return a.vdot(b)
-    except(AttributeError):
-        pass
-
-    return np.vdot(a, b)
+from ..bare_dot import bare_dot
 
 
 class LineSearch(object, Loggable):
@@ -78,8 +64,8 @@ class LineSearch(object, Loggable):
 
         """
 
-        self.xk = xk
-        self.pk = pk
+        self.xk = xk.copy()
+        self.pk = pk.copy()
 
         if f_k is None:
             self.f_k = self.f(xk)
@@ -91,6 +77,8 @@ class LineSearch(object, Loggable):
         else:
             self.fprime_k = fprime_k
 
+        if f_k_minus_1 is not None:
+            f_k_minus_1 = f_k_minus_1.copy()
         self.f_k_minus_1 = f_k_minus_1
 
     def _phi(self, alpha):
diff --git a/nifty/minimization/line_searching/line_search_strong_wolfe.py b/nifty/minimization/line_searching/line_search_strong_wolfe.py
index cb7098fa38d544e1985c4a416d59b7bb1e3b04f3..151c1e1300364fea8c8acdd977a054f7fa7a1143 100644
--- a/nifty/minimization/line_searching/line_search_strong_wolfe.py
+++ b/nifty/minimization/line_searching/line_search_strong_wolfe.py
@@ -10,7 +10,7 @@ class LineSearchStrongWolfe(LineSearch):
 
     def __init__(self, c1=1e-4, c2=0.9,
                  max_step_size=50, max_iterations=10,
-                 max_zoom_iterations=100):
+                 max_zoom_iterations=10):
 
         """
         Parameters
@@ -82,7 +82,6 @@ class LineSearchStrongWolfe(LineSearch):
         # start the minimization loop
         for i in xrange(max_iterations):
             phi_alpha1 = self._phi(alpha1)
-
             if alpha1 == 0:
                 self.logger.warn("Increment size became 0.")
                 alpha_star = 0.
@@ -137,6 +136,10 @@ class LineSearchStrongWolfe(LineSearch):
         cubic_delta = 0.2  # cubic
         quad_delta = 0.1  # quadratic
 
+        cubic_delta = 0.0  # cubic
+        quad_delta = 0.0  # quadratic
+
+
         # initialize the most recent versions (j-1) of phi and alpha
         alpha_recent = 0
         phi_recent = phi_0
diff --git a/nifty/minimization/quasi_newton_minimizer.py b/nifty/minimization/quasi_newton_minimizer.py
index 4a723d5352d51cfc70442f33b3444ed7f1e9686b..96915424953fcca0004f9ddb8c4b3ba58080fdc2 100644
--- a/nifty/minimization/quasi_newton_minimizer.py
+++ b/nifty/minimization/quasi_newton_minimizer.py
@@ -88,13 +88,14 @@ class QuasiNewtonMinimizer(object, Loggable):
 
             # compute the step length, which minimizes f_k along the
             # search direction = the gradient
-            step_length, f_k = self.line_searcher.perform_line_search(
+            step_length, new_f_k = self.line_searcher.perform_line_search(
                                                xk=x,
                                                pk=descend_direction,
                                                f_k=f_k,
                                                fprime_k=gradient,
                                                f_k_minus_1=f_k_minus_1)
             f_k_minus_1 = f_k
+            f_k = new_f_k
 
             # update x
             x += descend_direction*step_length
@@ -106,7 +107,8 @@ class QuasiNewtonMinimizer(object, Loggable):
                               (iteration_number, step_length, delta))
             if delta == 0:
                 convergence = self.convergence_level + 2
-                self.logger.info("Found minimum. Stopping.")
+                self.logger.info("Found minimum according to line-search. "
+                                 "Stopping.")
                 break
             elif delta < self.convergence_tolerance:
                 convergence += 1
diff --git a/nifty/minimization/vl_bfgs.py b/nifty/minimization/vl_bfgs.py
index bd5ed83dcb9d837c6892fbfe653756c3fed76d05..5cef3879e8c5f98cfbebf39a108bf7c708fdeff8 100644
--- a/nifty/minimization/vl_bfgs.py
+++ b/nifty/minimization/vl_bfgs.py
@@ -5,6 +5,8 @@ import numpy as np
 from .quasi_newton_minimizer import QuasiNewtonMinimizer
 from .line_searching import LineSearchStrongWolfe
 
+from .bare_dot import bare_dot
+
 
 class VL_BFGS(QuasiNewtonMinimizer):
     def __init__(self, line_searcher=LineSearchStrongWolfe(), callback=None,
@@ -20,6 +22,10 @@ class VL_BFGS(QuasiNewtonMinimizer):
 
         self.max_history_length = max_history_length
 
+    def __call__(self, x0, f, fprime, f_args=()):
+        self._information_store = None
+        return super(VL_BFGS, self).__call__(x0, f, fprime, f_args=())
+
     def _get_descend_direction(self, x, gradient):
         # initialize the information store if it doesn't already exist
         try:
@@ -36,6 +42,9 @@ class VL_BFGS(QuasiNewtonMinimizer):
         for i in xrange(1, len(delta)):
             descend_direction += delta[i] * b[i]
 
+        norm = np.sqrt(bare_dot(descend_direction, descend_direction))
+        if norm != 1:
+            descend_direction /= norm
         return descend_direction
 
 
@@ -44,8 +53,8 @@ class InformationStore(object):
         self.max_history_length = max_history_length
         self.s = LimitedList(max_history_length)
         self.y = LimitedList(max_history_length)
-        self.last_x = x0
-        self.last_gradient = gradient
+        self.last_x = x0.copy()
+        self.last_gradient = gradient.copy()
         self.k = 0
 
         self._ss_store = {}
@@ -168,8 +177,8 @@ class InformationStore(object):
         new_y = gradient - self.last_gradient
         self.y.add(new_y)
 
-        self.last_x = x
-        self.last_gradient = gradient
+        self.last_x = x.copy()
+        self.last_gradient = gradient.copy()
 
 #
 #        k = self.k