Commit ce7c2ceb authored by Neel Shah's avatar Neel Shah
Browse files

rename operator as TensorDotOperator

parent e5019b2f
Pipeline #106972 canceled with stages
......@@ -23,23 +23,26 @@ from ..domains.rg_space import RGSpace
from ..field import Field
from .linear_operator import LinearOperator
class MatrixProductOperator(LinearOperator):
"""Matrix multiplication with input field.
class TensorDotOperator(LinearOperator):
"""Contraction of the last few dimensions
of a tensor with selected dimensions of input field.
This operator supports scipy.sparse matrices and numpy arrays
as the matrix to be applied.
as the tensor to be contracted with. Its output coincides with the
endomorphic MatrixProductOperator with the same input arguments
whenever the latter is applicable.
For numpy array matrices, can apply the matrix over any subspaces
For numpy arrays, it can contract the tensor with any subspaces
of the input.
If the input arrays have more than one dimension, for
scipy.sparse matrices the `flatten` keyword argument must be
set to true. This means that the input field will be flattened
before applying the matrix and reshaped to its original shape
before contracting with the tensor and reshaped to its original shape
afterwards. Flattening is only supported when the domain and target
are the same, and a target can't be specified if flatten=True.
Matrices are tested regarding their compatibility with the
Arrays are tested regarding their compatibility with the
called for application method.
Flattening and subspace application are mutually exclusive.
......@@ -54,32 +57,31 @@ class MatrixProductOperator(LinearOperator):
When applied to specific subspaces of the domain, the
non-participating subspaces of the domain retain their positions
in the target space. The order of other axes in the target space is
the matrix's axes in their original order. This convention is for
the tensor's axes in their original order. This convention is for
preserving the space's shape when the application is endomorphic.
A technicality related to this point: The absolute positions of the
non-participating subspaces of the domain cannot be different in the
target space, thus the matrix must have enough unsummed axes to stand
target space, thus the tensor must have enough unsummed axes to stand
in the places of summed-over axes of the domain, if those summed-over
axes are followed by any unsummed (inactive) axes. Example to make
this clear: If the first 2 spaces of the domain are summed over in
the matrix multiplication and the 3rd space doesn't participate, the
matrix must have (at least) 2 axes that don't participate in the
the contraction and the 3rd space doesn't participate, the
tensor must have (at least) 2 axes that don't participate in the
multiplication, so that these 2 axes can come before the 3rd subspace
of the domain takes its position in the target space. Otherwise an
error will be raised when the operator is applied informing that
the matrix has too few extra dimensions.
the tensor has too few extra dimensions.
Parameters
----------
domain: :class:`Domain` or :class:`DomainTuple`
Domain of the operator.
If :class:`Domain` it is assumed to have only one subspace.
matrix: scipy.sparse matrix or numpy array
Quadratic matrix of shape `(domain.shape, domain.shape)`
(if `not flatten`) that supports `matrix.transpose()`.
If it is not a numpy array, needs to be applicable to the val
array of input fields by `matrix.dot()`.
tensor: scipy.sparse matrix or numpy array
Tensor of a shape whose last few axes should match the shape
of the axes of the field to be summed over (if not 'flatten').
If `flatten`, needs to be applicable to the val array of
input fields by `tensor.dot()`.
spaces: int or tuple of int, optional
The subdomain(s) of "domain" which the operator acts on.
If None, it acts on all elements.
......@@ -88,7 +90,7 @@ class MatrixProductOperator(LinearOperator):
mandatory.
flatten: boolean, optional
Whether the input value array should be flattened before
applying the matrix and reshaped to its original shape
contracting with the field and reshaped to its original shape
afterwards.
Needed for scipy.sparse matrices if `len(domain) > 1`.
target: :class:`Domain` or :class:`DomainTuple`, optional
......@@ -96,11 +98,11 @@ class MatrixProductOperator(LinearOperator):
parameters like domain type and distances are flexible. The
default is an RGSpace with default distances convention.
"""
def __init__(self, domain, matrix, spaces=None, flatten=False, target=None):
def __init__(self, domain, tensor, spaces=None, flatten=False, target=None):
self._capability = self.TIMES | self.ADJOINT_TIMES
self._domain = DomainTuple.make(domain)
mat_dim = len(matrix.shape)
tensor_dim = len(tensor.shape)
domain_shape = domain.shape
domain_dim = len(domain_shape)
......@@ -116,17 +118,17 @@ class MatrixProductOperator(LinearOperator):
domain_shape = (utilities.my_product(domain_shape), )
target_space_shape = domain_shape
target_dim = len(target_space_shape)
mat_inactive_axes_dim = mat_dim - len(domain_shape)
if mat_inactive_axes_dim < 0:
raise ValueError("Domain has more dimensions than matrix.")
target_space_shape = matrix.shape[:mat_inactive_axes_dim]
target_dim = mat_inactive_axes_dim
tensor_inactive_dim = tensor_dim - len(domain_shape)
if tensor_inactive_dim < 0:
raise ValueError("Domain has more dimensions than tensor.")
target_space_shape = tensor.shape[:tensor_inactive_dim]
target_dim = tensor_inactive_dim
else:
if flatten:
raise ValueError(
"Cannot flatten input AND apply to a subspace")
if not isinstance(matrix, np.ndarray):
if not isinstance(tensor, np.ndarray):
raise ValueError(
"Application to subspaces only supported for numpy array matrices.")
......@@ -142,31 +144,31 @@ class MatrixProductOperator(LinearOperator):
domain_shape = tuple(domain_shape)
self._active_axes = tuple(active_axes)
self._inactive_axes = tuple(self._inactive_axes)
mat_inactive_axes_dim = mat_dim - len(domain_shape)
if mat_inactive_axes_dim < 0:
raise ValueError("Domain has more dimensions than matrix.")
tensor_inactive_dim = tensor_dim - len(domain_shape)
if tensor_inactive_dim < 0:
raise ValueError("Domain has more dimensions than tensor.")
target_dim = mat_inactive_axes_dim + len(self._inactive_axes)
target_dim = tensor_inactive_dim + len(self._inactive_axes)
domain_dim = len(domain_shape)
target_space_shape = []
matrix_shape_idx = 0
tensor_shape_idx = 0
for i in range(target_dim):
if i in tuple(self._inactive_axes):
for j in range(len(self._domain[i].shape)):
target_space_shape.append(self._domain[i].shape[j])
else:
target_space_shape.append(matrix.shape[matrix_shape_idx])
matrix_shape_idx += 1
target_space_shape.append(tensor.shape[tensor_shape_idx])
tensor_shape_idx += 1
target_space_shape = tuple(target_space_shape)
self._mat_last_n = tuple([-domain_dim + i for i in range(domain_dim)])
self._mat_first_n = np.arange(domain_dim)
self._tensor_last_n = tuple([-domain_dim + i for i in range(domain_dim)])
self._tensor_first_n = np.arange(domain_dim)
self._target_last_n = tuple([-len(self._inactive_axes) + i for
i in range(len(self._inactive_axes))])
# mat_last_m is needed for adjoint application even if spaces = None
self._mat_last_m = tuple([-mat_inactive_axes_dim + i for i in
range(mat_inactive_axes_dim)])
# tensor_last_m is needed for adjoint application even if spaces = None
self._tensor_last_m = tuple([-tensor_inactive_dim + i for i in
range(tensor_inactive_dim)])
self._target_axes = tuple(range(len(target_space_shape)))
if spaces != None:
self._field_axes = list(self._target_axes)
......@@ -193,59 +195,59 @@ class MatrixProductOperator(LinearOperator):
elif target.shape == target_space_shape:
self._target = DomainTuple.make(target)
else:
raise ValueError(f"Target space has invalid shape.\n"
+"Its shape should be {target_space_shape}.")
raise ValueError("Target space has invalid shape.\n"
+f"Its shape should be {target_space_shape}.")
matrix_appl_shape = matrix.shape[mat_inactive_axes_dim:]
if matrix_appl_shape != domain_shape:
raise ValueError("Matrix doesn't fit with the domain.\n" +
f"Shape of matrix axes used in summation: {matrix_appl_shape},\n " +
tensor_appl_shape = tensor.shape[tensor_inactive_dim:]
if tensor_appl_shape != domain_shape:
raise ValueError("Tensor doesn't fit with the domain.\n" +
f"Shape of tensor axes used in summation: {tensor_appl_shape},\n " +
f"Shape of domain axes used in summation: {domain_shape}.")
self._mat = matrix
self._mat_tr = matrix.transpose().conjugate()
self._tensor = tensor
self._tensor_tr = tensor.transpose().conjugate()
self._flatten = flatten
def apply(self, x, mode):
self._check_input(x, mode)
times = (mode == self.TIMES)
m = self._mat if times else self._mat_tr
t = self._tensor if times else self._tensor_tr
target = self._target if times else self._domain
if self._spaces is None:
if not self._flatten:
if times:
res = np.tensordot(m, x.val, axes = len(self._domain.shape))
res = np.tensordot(t, x.val, axes = len(self._domain.shape))
else:
mat_axes = np.flip(self._mat_last_m)
mat_axes = np.flip(self._tensor_last_m)
field_axes = self._target_axes
res = np.tensordot(m, x.val, axes=(mat_axes, field_axes))
res = np.tensordot(t, x.val, axes=(mat_axes, field_axes))
res = res.transpose()
else:
res = m.dot(x.val.ravel()).reshape(self._domain.shape)
res = t.dot(x.val.ravel()).reshape(self._domain.shape)
return Field(target, res)
if times:
mat_axes = self._mat_last_n
mat_axes = self._tensor_last_n
move_axes = self._target_last_n
res = np.tensordot(m, x.val, axes=(mat_axes, self._active_axes))
res = np.tensordot(t, x.val, axes=(mat_axes, self._active_axes))
try:
res = np.moveaxis(res, move_axes, self._inactive_axes)
except np.AxisError:
raise ValueError("The matrix has too few extra dimensions.\n" +
"Number of dimensions in matrix:" +
f"{len(m.shape)}\n")
raise ValueError("The tensor has too few extra dimensions.\n" +
"Number of dimensions in tensor:" +
f"{len(t.shape)}\n")
else:
mat_axes = np.flip(self._mat_last_m)
move_axes = np.flip(self._mat_first_n)
mat_axes = np.flip(self._tensor_last_m)
move_axes = np.flip(self._tensor_first_n)
field_axes = self._field_axes
try:
res = np.tensordot(m, x.val, axes=(mat_axes, field_axes))
res = np.tensordot(t, x.val, axes=(mat_axes, field_axes))
except ValueError as e:
if e.args[0] == "shape-mismatch for sum":
raise ValueError("The matrix has too few extra dimensions.\n" +
"Number of dimensions in matrix: " +
f"{len(self._mat.shape)}\n")
raise ValueError("The tensor has too few extra dimensions.\n" +
"Number of dimensions in tensor: " +
f"{len(self._tensor.shape)}\n")
else:
raise e
res = np.moveaxis(res, move_axes, self._active_axes)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment