diff --git a/nifty5/domains/rg_space.py b/nifty5/domains/rg_space.py index d3db6a98f08910c18ef5351f2a0637805887cb01..181bc4359e3070ae4d2120eb2d8ff393ca03b353 100644 --- a/nifty5/domains/rg_space.py +++ b/nifty5/domains/rg_space.py @@ -141,9 +141,7 @@ class RGSpace(StructuredDomain): @staticmethod def _kernel(x, sigma): from ..sugar import exp - tmp = x*x - tmp *= -2.*np.pi*np.pi*sigma*sigma - return exp(tmp) + return exp(x*x * (-2.*np.pi*np.pi*sigma*sigma)) def get_fft_smoothing_kernel_function(self, sigma): if (not self.harmonic): diff --git a/nifty5/extra/energy_and_model_tests.py b/nifty5/extra/energy_and_model_tests.py index 8f49f1499bce9ebf1af7700111c0e9ce69e09184..fbd1e6132b61123582386c559d88c9017d960d92 100644 --- a/nifty5/extra/energy_and_model_tests.py +++ b/nifty5/extra/energy_and_model_tests.py @@ -31,7 +31,7 @@ def _get_acceptable_model(M): raise ValueError('Initial Model value must be finite') dir = from_random("normal", M.position.domain) dirder = M.jacobian(dir) - dir *= val/(dirder).norm()*1e-5 + dir = dir * val * (1e-5/dirder.norm()) # Find a step length that leads to a "reasonable" Model for i in range(50): try: @@ -40,7 +40,7 @@ def _get_acceptable_model(M): break except FloatingPointError: pass - dir *= 0.5 + dir = dir*0.5 else: raise ValueError("could not find a reasonable initial step") return M2 @@ -52,7 +52,7 @@ def _get_acceptable_energy(E): raise ValueError('Initial Energy must be finite') dir = from_random("normal", E.position.domain) dirder = E.gradient.vdot(dir) - dir *= np.abs(val)/np.abs(dirder)*1e-5 + dir = dir * (np.abs(val)/np.abs(dirder)*1e-5) # Find a step length that leads to a "reasonable" energy for i in range(50): try: @@ -61,7 +61,7 @@ def _get_acceptable_energy(E): break except FloatingPointError: pass - dir *= 0.5 + dir = dir*0.5 else: raise ValueError("could not find a reasonable initial step") return E2 @@ -92,7 +92,7 @@ def check_value_gradient_consistency(E, tol=1e-8, ntries=100): xtol = tol*Emid.gradient_norm if abs(numgrad-dirder) < xtol: break - dir *= 0.5 + dir = dir*0.5 dirnorm *= 0.5 E2 = Emid else: @@ -117,7 +117,7 @@ def check_value_gradient_metric_consistency(E, tol=1e-8, ntries=100): if abs((E2.value-val)/dirnorm - dirder) < xtol and \ (abs((E2.gradient-E.gradient)/dirnorm-dgrad) < xtol).all(): break - dir *= 0.5 + dir = dir*0.5 dirnorm *= 0.5 E2 = Emid else: diff --git a/nifty5/field.py b/nifty5/field.py index 8b5b2344b396e0f4ad4cd38e5b53a8022625da44..4e85babf7b373acf1d79759ce5976ea292aff530 100644 --- a/nifty5/field.py +++ b/nifty5/field.py @@ -670,3 +670,12 @@ for op in ["__add__", "__radd__", return NotImplemented return func2 setattr(Field, op, func(op)) + +for op in ["__iadd__", "__isub__", "__imul__", "__idiv__", + "__itruediv__", "__ifloordiv__", "__ipow__"]: + def func(op): + def func2(self, other): + raise TypeError( + "In-place operations are deliberately not supported") + return func2 + setattr(Field, op, func(op)) diff --git a/nifty5/minimization/conjugate_gradient.py b/nifty5/minimization/conjugate_gradient.py index 94cd8991e9f74bf908df26f923f79d6c2ed03251..43f4e07819aa07de1b51cc5272e11819319570bf 100644 --- a/nifty5/minimization/conjugate_gradient.py +++ b/nifty5/minimization/conjugate_gradient.py @@ -84,8 +84,7 @@ class ConjugateGradient(Minimizer): logger.error("Error: ConjugateGradient: alpha<0.") return energy, controller.ERROR - q *= -alpha - r = r + q + r = r - q*alpha energy = energy.at_with_grad(energy.position - alpha*d, r) @@ -103,7 +102,6 @@ class ConjugateGradient(Minimizer): if status != controller.CONTINUE: return energy, status - d *= max(0, gamma/previous_gamma) - d += s + d = d * max(0, gamma/previous_gamma) + s previous_gamma = gamma diff --git a/nifty5/minimization/l_bfgs.py b/nifty5/minimization/l_bfgs.py index fda0e19404677fb65ae4db2c6a730214a6248e34..e2a75b2a1f5d4e8130883d0ba22216d90105e9fc 100644 --- a/nifty5/minimization/l_bfgs.py +++ b/nifty5/minimization/l_bfgs.py @@ -61,16 +61,16 @@ class L_BFGS(DescentMinimizer): for i in range(k-1, k-nhist-1, -1): idx = i % maxhist alpha[idx] = s[idx].vdot(p)/s[idx].vdot(y[idx]) - p -= alpha[idx]*y[idx] + p = p - alpha[idx]*y[idx] idx = (k-1) % maxhist fact = s[idx].vdot(y[idx]) / y[idx].vdot(y[idx]) if fact <= 0.: logger.error("L-BFGS curvature not positive definite!") - p *= fact + p = p*fact for i in range(k-nhist, k): idx = i % maxhist beta = y[idx].vdot(p) / s[idx].vdot(y[idx]) - p += (alpha[idx]-beta)*s[idx] + p = p + (alpha[idx]-beta)*s[idx] self._lastx = x self._lastgrad = gradient self._k += 1 diff --git a/nifty5/minimization/vl_bfgs.py b/nifty5/minimization/vl_bfgs.py index 67e67ca925dba209b8124805a40b52cd606f07b7..6ebde78248497f600431b3dc13576e6d83fa1434 100644 --- a/nifty5/minimization/vl_bfgs.py +++ b/nifty5/minimization/vl_bfgs.py @@ -67,7 +67,7 @@ class VL_BFGS(DescentMinimizer): descent_direction = delta[0] * b[0] for i in range(1, len(delta)): - descent_direction += delta[i] * b[i] + descent_direction = descent_direction + delta[i]*b[i] return descent_direction diff --git a/nifty5/multi/multi_field.py b/nifty5/multi/multi_field.py index 18011add3443c9d73c16ebc94b4dadc5bcb95c7e..cf2e5393cf40d187a2d9df0852a394992779db7d 100644 --- a/nifty5/multi/multi_field.py +++ b/nifty5/multi/multi_field.py @@ -220,3 +220,12 @@ for op in ["__add__", "__radd__", return MultiField(result_val) return func2 setattr(MultiField, op, func(op)) + +for op in ["__iadd__", "__isub__", "__imul__", "__idiv__", + "__itruediv__", "__ifloordiv__", "__ipow__"]: + def func(op): + def func2(self, other): + raise TypeError( + "In-place operations are deliberately not supported") + return func2 + setattr(MultiField, op, func(op)) diff --git a/nifty5/operators/diagonal_operator.py b/nifty5/operators/diagonal_operator.py index b619083e9bec89fd91609c9996590bc1732c42bd..3412b562f5ce7da485a9f1335c11f1cace7fa305 100644 --- a/nifty5/operators/diagonal_operator.py +++ b/nifty5/operators/diagonal_operator.py @@ -185,7 +185,6 @@ class DiagonalOperator(EndomorphicOperator): res = Field.from_random(random_type="normal", domain=self._domain, dtype=dtype) if from_inverse: - res /= np.sqrt(self._ldiag) + return res/np.sqrt(self._ldiag) else: - res *= np.sqrt(self._ldiag) - return res + return res*np.sqrt(self._ldiag) diff --git a/nifty5/operators/fft_operator.py b/nifty5/operators/fft_operator.py index 781b072f5bdf652fbc410ad7746b8af326ea64a7..be91ef5fad3ffbcab88c6ab696bda977ce686153 100644 --- a/nifty5/operators/fft_operator.py +++ b/nifty5/operators/fft_operator.py @@ -118,10 +118,7 @@ class FFTOperator(LinearOperator): fct = self._domain[self._space].scalar_dvol else: fct = self._target[self._space].scalar_dvol - if fct != 1: - Tval *= fct - - return Tval + return Tval if fct == 1 else Tval*fct @property def domain(self): diff --git a/nifty5/operators/sum_operator.py b/nifty5/operators/sum_operator.py index 62c22bb8cb8c1842cc3d62acdc242127f6dd9f9a..5cbe8eb3de943eff8a0ddcd9f6afe855674f6f84 100644 --- a/nifty5/operators/sum_operator.py +++ b/nifty5/operators/sum_operator.py @@ -172,14 +172,15 @@ class SumOperator(LinearOperator): def apply(self, x, mode): self._check_mode(mode) - for i, op in enumerate(self._ops): - if i == 0: - res = -op.apply(x, mode) if self._neg[i] else op.apply(x, mode) + res = None + for op, neg in zip(self._ops, self._neg): + if res is None: + res = -op.apply(x, mode) if neg else op.apply(x, mode) else: - if self._neg[i]: - res -= op.apply(x, mode) + if neg: + res = res - op.apply(x, mode) else: - res += op.apply(x, mode) + res = res + op.apply(x, mode) return res def draw_sample(self, from_inverse=False, dtype=np.float64): diff --git a/nifty5/probing/utils.py b/nifty5/probing/utils.py index 72f653f18298a7dd6046f74988a6d738c3454e46..a1c909a07335b155b2b1420e66b24eb92d2e59c6 100644 --- a/nifty5/probing/utils.py +++ b/nifty5/probing/utils.py @@ -31,9 +31,9 @@ class StatCalculator(object): self._M2 = 0.*value else: delta = value - self._mean - self._mean += delta*(1./self._count) + self._mean = self.mean + delta*(1./self._count) delta2 = value - self._mean - self._M2 += delta*delta2 + self._M2 = self._M2 + delta*delta2 @property def mean(self):