Commit bbf22449 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Model.gradient -> Model.jacobian

parent c15669d0
......@@ -30,7 +30,7 @@ def _get_acceptable_model(M):
if not np.isfinite(val.sum()):
raise ValueError('Initial Model value must be finite')
dir = from_random("normal", M.position.domain)
dirder = M.gradient(dir)
dirder = M.jacobian(dir)
dir *= val/(dirder).norm()*1e-5
# Find a step length that leads to a "reasonable" Model
for i in range(50):
......@@ -82,7 +82,7 @@ def check_value_gradient_consistency(E, tol=1e-8, ntries=100):
if isinstance(E, Energy):
dirder = Emid.gradient.vdot(dir)/dirnorm
else:
dirder = Emid.gradient(dir)/dirnorm
dirder = Emid.jacobian(dir)/dirnorm
numgrad = (E2.value-val)/dirnorm
if isinstance(E, Model):
xtol = tol * dirder.norm() / np.sqrt(dirder.size)
......
......@@ -5,4 +5,4 @@ def ApplyData(data, var, model_data):
from ..sugar import sqrt
sqrt_n = DiagonalOperator(sqrt(var))
data = Constant(model_data.position, data)
return sqrt_n.inverse(model_data - data)
return sqrt_n.inverse_times(model_data - data)
......@@ -49,18 +49,20 @@ class GaussianEnergy(Energy):
def value(self):
if self._cov is None:
return .5 * self.residual.vdot(self.residual).real
return .5 * self.residual.vdot(self._cov.inverse(self.residual)).real
return .5 * self.residual.vdot(
self._cov.inverse_times(self.residual)).real
@property
@memo
def gradient(self):
if self._cov is None:
return self._inp.gradient.adjoint(self.residual)
return self._inp.gradient.adjoint(self._cov.inverse(self.residual))
return self._inp.jacobian.adjoint_times(self.residual)
return self._inp.jacobian.adjoint_times(
self._cov.inverse_times(self.residual))
@property
@memo
def curvature(self):
if self._cov is None:
return SandwichOperator.make(self._inp.gradient, None)
return SandwichOperator.make(self._inp.gradient, self._cov.inverse)
return SandwichOperator.make(self._inp.jacobian, None)
return SandwichOperator.make(self._inp.jacobian, self._cov.inverse)
......@@ -40,11 +40,11 @@ class PoissonianEnergy(Energy):
self._value = lamb_val.sum() - d.vdot(log(lamb_val))
if isnan(self._value):
self._value = inf
self._gradient = self._lamb.gradient.adjoint_times(1 - d/lamb_val)
self._gradient = self._lamb.jacobian.adjoint_times(1 - d/lamb_val)
# metric = makeOp(d/lamb_val/lamb_val)
metric = makeOp(1./lamb_val)
self._curvature = SandwichOperator.make(self._lamb.gradient, metric)
self._curvature = SandwichOperator.make(self._lamb.jacobian, metric)
def at(self, position):
return self.__class__(self._lamb.at(position), self._d)
......
......@@ -42,7 +42,7 @@ class ScalarMul(Model):
self._factor = factor
self._value = self._factor * self._model.value
self._gradient = self._factor * self._model.gradient
self._jacobian = self._factor * self._model.jacobian
def at(self, position):
return self.__class__(self._factor, self._model.at(position))
......@@ -57,7 +57,7 @@ class Add(Model):
self._model2 = model2.at(position)
self._value = self._model1.value + self._model2.value
self._gradient = self._model1.gradient + self._model2.gradient
self._jacobian = self._model1.jacobian + self._model2.jacobian
@staticmethod
def make(model1, model2):
......@@ -87,8 +87,8 @@ class Mul(Model):
self._model2 = model2.at(position)
self._value = self._model1.value * self._model2.value
self._gradient = (makeOp(self._model1.value) * self._model2.gradient +
makeOp(self._model2.value) * self._model1.gradient)
self._jacobian = (makeOp(self._model1.value) * self._model2.jacobian +
makeOp(self._model2.value) * self._model1.jacobian)
@staticmethod
def make(model1, model2):
......
......@@ -32,7 +32,7 @@ class Constant(Model):
-----
Since there is no model-function associated:
- Position has no influence on value.
- There is no gradient.
- There is no Jacobian.
"""
# TODO Remove position
def __init__(self, position, constant):
......@@ -40,7 +40,7 @@ class Constant(Model):
self._constant = constant
self._value = self._constant
self._gradient = 0.
self._jacobian = 0.
def at(self, position):
return self.__class__(position, self._constant)
......@@ -34,7 +34,7 @@ class LinearModel(Model):
-------
Model with linear Operator applied:
- Model.value = LinOp (inp.value) [key-wise]
- Gradient = LinOp * inp.gradient
- Jacobian = LinOp * inp.jacobian
"""
from ..operators.linear_operator import LinearOperator
super(LinearModel, self).__init__(inp.position)
......@@ -49,7 +49,7 @@ class LinearModel(Model):
self._lin_op._key)
self._value = self._lin_op(self._inp.value)
self._gradient = self._lin_op*self._inp.gradient
self._jacobian = self._lin_op*self._inp.jacobian
def at(self, position):
return self.__class__(self._inp.at(position), self._lin_op)
......@@ -27,7 +27,7 @@ class LocalModel(Model):
"""
Computes nonlinearity(inp)
- LocalModel.value = nonlinearity(value) (pointwise)
- LocalModel.gradient = Outer Product of gradients
- LocalModel.jacobian = Outer Product of Jacobians
Parameters
----------
......@@ -40,9 +40,9 @@ class LocalModel(Model):
self._inp = inp
self._nonlinearity = nonlinearity
self._value = nonlinearity(self._inp.value)
d_inner = self._inp.gradient
d_inner = self._inp.jacobian
d_outer = makeOp(self._nonlinearity.derivative(self._inp.value))
self._gradient = d_outer * d_inner
self._jacobian = d_outer * d_inner
def at(self, position):
return self.__class__(self._inp.at(position), self._nonlinearity)
......
......@@ -28,7 +28,7 @@ class Model(NiftyMetaBase()):
The Model object is an implementation of a * which knows:
- position in parameterspace. (Field, MulitField)
- value according to its modelfunction A. A(position)
- gradient of the modelfunction at the current position.
- Jacobian of the model function at the current position.
Parameters
----------
......@@ -37,9 +37,9 @@ class Model(NiftyMetaBase()):
Notes
-----
An instance of the model class knows its position, value and gradient.
An instance of the model class knows its position, value and Jacobian.
One can 'jump' to a new position, with the help of the 'at' method, whereby
one automatically gets the value and gradient of the model. The 'at' method
one automatically gets the value and Jacobian of the model. The 'at' method
creates a new instance of the class.
"""
def __init__(self, position):
......@@ -65,7 +65,7 @@ class Model(NiftyMetaBase()):
"""
Field or MultiField: selected location in parameter space.
The location in parameter space where value and gradient are
The location in parameter space where value and Jacobian are
evaluated.
"""
return self._position
......@@ -80,11 +80,11 @@ class Model(NiftyMetaBase()):
return self._value
@property
def gradient(self):
def jacobian(self):
"""
LinearOperator : The derivative of the model at given `position`.
"""
return self._gradient
return self._jacobian
def __getitem__(self, key):
sel = SelectionOperator(self.value.domain, key)
......
......@@ -34,7 +34,8 @@ class MultiModel(Model):
if not isinstance(val.domain, DomainTuple):
raise TypeError
self._value = MultiField({key: val})
self._gradient = MultiAdaptor(self.value.domain) * self._model.gradient
self._jacobian = (MultiAdaptor(self.value.domain) *
self._model.jacobian)
def at(self, position):
return self.__class__(self._model.at(position), self._key)
......@@ -32,7 +32,7 @@ class Variable(Model):
super(Variable, self).__init__(position)
self._value = position
self._gradient = ScalingOperator(1., position.domain)
self._jacobian = ScalingOperator(1., position.domain)
def at(self, position):
return self.__class__(position)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment