diff --git a/demos/critical_filtering.py b/demos/critical_filtering.py index 776422c081e92ccc1aad67f32530da6fb9885a50..ee45c52e10af499adee692bd9411c8a60b6e81c2 100644 --- a/demos/critical_filtering.py +++ b/demos/critical_filtering.py @@ -115,10 +115,13 @@ if __name__ == "__main__": iteration_limit=3, callback=convergence_measure) - minimizer2 = VL_BFGS(convergence_tolerance=0, - iteration_limit=7, + minimizer2 = VL_BFGS(convergence_tolerance=1e-3, + iteration_limit=70, callback=convergence_measure, - max_history_length=3) + max_history_length=10) + minimizer3 = SteepestDescent(convergence_tolerance=1e-3, + iteration_limit=70, + callback=convergence_measure) # Setting starting position flat_power = Field(p_space,val=1e-8) @@ -140,7 +143,7 @@ if __name__ == "__main__": # Initializing the power energy with updated parameters power_energy = CriticalPowerEnergy(position=t0, m=m0, D=D0, smoothness_prior=10., samples=3) - (power_energy, convergence) = minimizer1(power_energy) + (power_energy, convergence) = minimizer2(power_energy) # Setting new power spectrum diff --git a/nifty/minimization/descent_minimizer.py b/nifty/minimization/descent_minimizer.py index 964489c1460870b05681cd058c9ab5af4ce6313c..f6d5c46326ba0be33c30eb0be3a446b3a7cce7d6 100644 --- a/nifty/minimization/descent_minimizer.py +++ b/nifty/minimization/descent_minimizer.py @@ -121,18 +121,18 @@ class DescentMinimizer(Loggable, object): """ - print "into line search:" - print " pos: ",energy.position.val[0] - print " ene: ",energy.value + #print "into line search:" + #print " pos: ",energy.position.val[0] + #print " ene: ",energy.value convergence = 0 f_k_minus_1 = None step_length = 0 iteration_number = 1 while True: - print "line search next iteration:" - print " pos: ",energy.position.val[0] - print " ene: ",energy.value + #print "line search next iteration:" + #print " pos: ",energy.position.val[0] + #print " ene: ",energy.value if self.callback is not None: try: self.callback(energy, iteration_number) @@ -153,7 +153,7 @@ class DescentMinimizer(Loggable, object): # current position is encoded in energy object descent_direction = self.get_descent_direction(energy) - print "descent direction:",descent_direction.val[0] + #print "descent direction:",descent_direction.val[0] # compute the step length, which minimizes energy.value along the # search direction step_length, f_k, new_energy = \ @@ -161,17 +161,17 @@ class DescentMinimizer(Loggable, object): energy=energy, pk=descent_direction, f_k_minus_1=f_k_minus_1) - print "out of wolfe:" - print " old pos: ",energy.position.val[0] - print " old ene: ",energy.value - print " new pos: ",new_energy.position.val[0] - print " new ene: ",new_energy.value - print " f_k: ",f_k + #print "out of wolfe:" + #print " old pos: ",energy.position.val[0] + #print " old ene: ",energy.value + #print " new pos: ",new_energy.position.val[0] + #print " new ene: ",new_energy.value + #print " f_k: ",f_k f_k_minus_1 = energy.value - print " step length: ", step_length + #print " step length: ", step_length tx1=energy.position-new_energy.position - print " step length 2: ", (energy.position-new_energy.position).norm() - print " step length 3: ", new_energy.position.val[0]-energy.position.val[0] + #print " step length 2: ", (energy.position-new_energy.position).norm() + #print " step length 3: ", new_energy.position.val[0]-energy.position.val[0] # check if new energy value is bigger than old energy value if (new_energy.value - energy.value) > 0: print "Line search algorithm returned a new energy that was larger than the old one. Stopping." diff --git a/nifty/minimization/line_searching/line_search_strong_wolfe.py b/nifty/minimization/line_searching/line_search_strong_wolfe.py index c9424512f18705490d32251d29a547860429c74a..b14cbce78dcf0dcb167bada801d121ee603427ca 100644 --- a/nifty/minimization/line_searching/line_search_strong_wolfe.py +++ b/nifty/minimization/line_searching/line_search_strong_wolfe.py @@ -61,6 +61,9 @@ class LineSearchStrongWolfe(LineSearch): """ +# def __init__(self, c1=1e-4, c2=0.9, +# max_step_size=1000000000, max_iterations=100, +# max_zoom_iterations=100): def __init__(self, c1=1e-4, c2=0.9, max_step_size=50, max_iterations=10, max_zoom_iterations=10): @@ -111,6 +114,7 @@ class LineSearchStrongWolfe(LineSearch): le_0 = self.line_energy.at(0) phi_0 = le_0.value phiprime_0 = le_0.dd + assert phiprime_0<0, "input direction must be a descent direction" if phiprime_0 == 0: self.logger.warn("Flat gradient in search direction.") @@ -133,12 +137,12 @@ class LineSearchStrongWolfe(LineSearch): # start the minimization loop for i in xrange(max_iterations): - print "a0a1:",alpha0, alpha1 - print "line search outer iteration", i + #print "a0a1:",alpha0, alpha1 + #print "line search outer iteration", i le_alpha1 = self.line_energy.at(alpha1) - print "position:", le_alpha1.energy.position.val[0] + #print "position:", le_alpha1.energy.position.val[0] phi_alpha1 = le_alpha1.value - print "energy:", le_alpha1.value + #print "energy:", le_alpha1.value if alpha1 == 0: self.logger.warn("Increment size became 0.") alpha_star = 0. @@ -175,6 +179,7 @@ class LineSearchStrongWolfe(LineSearch): # update alphas alpha0, alpha1 = alpha1, min(2*alpha1, max_step_size) if alpha1 == max_step_size: + print "bailout" alpha_star = alpha1 phi_star = phi_alpha1 le_star = le_alpha1 @@ -235,10 +240,10 @@ class LineSearchStrongWolfe(LineSearch): The new Energy object on the new position. """ - print "entering zoom" - print alpha_lo, alpha_hi - print "pos1:",self.line_energy.at(alpha_lo).energy.position.val[0] - print "pos2:",self.line_energy.at(alpha_hi).energy.position.val[0] + #print "entering zoom" + #print alpha_lo, alpha_hi + #print "pos1:",self.line_energy.at(alpha_lo).energy.position.val[0] + #print "pos2:",self.line_energy.at(alpha_hi).energy.position.val[0] max_iterations = self.max_zoom_iterations # define the cubic and quadratic interpolant checks cubic_delta = 0.2 # cubic