Commit b3dffa01 authored by Neel Shah's avatar Neel Shah
Browse files

renamed new operator as MatrixProductOperator

parent 8dd3b1a6
Pipeline #105955 canceled with stages
......@@ -19,29 +19,56 @@ import numpy as np
from .. import utilities
from ..domain_tuple import DomainTuple
from ..domains.rg_space import RGSpace
from ..field import Field
from .endomorphic_operator import EndomorphicOperator
from .linear_operator import LinearOperator
class MatrixProductOperator(EndomorphicOperator):
"""Endomorphic matrix multiplication with input field.
class MatrixProductOperator(LinearOperator):
"""Matrix multiplication with input field.
This operator supports scipy.sparse matrices and numpy arrays
as the matrix to be applied.
For numpy array matrices, can apply the matrix over a subspace
For numpy array matrices, can apply the matrix over any subspaces
of the input.
If the input arrays have more than one dimension, for
scipy.sparse matrices the `flatten` keyword argument must be
set to true. This means that the input field will be flattened
before applying the matrix and reshaped to its original shape
afterwards.
afterwards. Flattening is only supported when the domain and target
are the same, and a target can't be specified if flatten=True.
Matrices are tested regarding their compatibility with the
called for application method.
Flattening and subspace application are mutually exclusive.
The target space type and distances can be specified. If
unspecified, it defaults to an RGSpace with default distance
convention. An exception is if the target space shape is the
same as the domain's shape, in which case the default target
space is the domain itself. Either a single domain or a
DomainTuple of the valid shape can be set as the target.
When applied to specific subspaces of the domain, the
non-participating subspaces of the domain retain their positions
in the target space. The order of other axes in the target space is
the matrix's axes in their original order. This convention is for
preserving the space's shape when the application is endomorphic.
A technicality related to this point: The absolute positions of the
non-participating subspaces of the domain cannot be different in the
target space, thus the matrix must have enough unsummed axes to stand
in the places of summed-over axes of the domain, if those summed-over
axes are followed by any unsummed(inactive) axes. Example to make
this clear: If the first 2 spaces of the domain are summed over in
the matrix multiplication and the 3rd space doesn't participate, the
matrix must have (at least) 2 axes that don't participate in the
multiplication, so that these 2 axes can come before the 3rd subspace
of the domain takes its position in the target space. Otherwise an
error will be raised when the operator is applied informing that
the matrix has too few extra dimensions.
Parameters
----------
......@@ -64,17 +91,18 @@ class MatrixProductOperator(EndomorphicOperator):
applying the matrix and reshaped to its original shape
afterwards.
Needed for scipy.sparse matrices if `len(domain) > 1`.
target: :class:`Domain` or :class:`DomainTuple`, optional
Target of the operator. It must be of the valid shape, other
parameters like domain type and distances are flexible. The
default is an RGSpace with default distances convention.
"""
def __init__(self, domain, matrix, spaces=None, flatten=False):
def __init__(self, domain, matrix, spaces=None, flatten=False, target=None):
self._capability = self.TIMES | self.ADJOINT_TIMES
self._domain = DomainTuple.make(domain)
mat_dim = len(matrix.shape)
if mat_dim % 2 != 0 or \
matrix.shape != (matrix.shape[:mat_dim//2] + matrix.shape[:mat_dim//2]):
raise ValueError("Matrix must be quadratic.")
appl_dim = mat_dim // 2 # matrix application space dimension
domain_shape = domain.shape
domain_dim = len(domain_shape)
# take shortcut for trivial case
if spaces is not None:
......@@ -84,36 +112,96 @@ class MatrixProductOperator(EndomorphicOperator):
if spaces is None:
self._spaces = None
self._active_axes = utilities.my_sum(self._domain.axes)
appl_space_shape = self._domain.shape
self._inactive_axes = ()
if flatten:
appl_space_shape = (utilities.my_product(appl_space_shape), )
domain_shape = (utilities.my_product(domain_shape), )
target_space_shape = domain_shape
target_dim = len(target_space_shape)
mat_inactive_axes_dim = mat_dim - len(domain_shape)
if mat_inactive_axes_dim < 0:
raise ValueError("Domain has more dimensions than matrix.")
target_space_shape = matrix.shape[:mat_inactive_axes_dim]
target_dim = mat_inactive_axes_dim
else:
if flatten:
raise ValueError(
"Cannot flatten input AND apply to a subspace")
if not isinstance(matrix, np.ndarray):
raise ValueError(
"Application to subspaces only supported for numpy array matrices."
)
"Application to subspaces only supported for numpy array matrices.")
self._spaces = utilities.parse_spaces(spaces, len(self._domain))
appl_space_shape = []
active_axes = []
self._inactive_axes = list(range(len(self._domain)))
domain_shape = []
for space_idx in spaces:
appl_space_shape += self._domain[space_idx].shape
domain_shape += self._domain[space_idx].shape
active_axes += self._domain.axes[space_idx]
appl_space_shape = tuple(appl_space_shape)
if space_idx in self._inactive_axes:
self._inactive_axes.remove(space_idx)
domain_shape = tuple(domain_shape)
self._active_axes = tuple(active_axes)
self._inactive_axes = tuple(self._inactive_axes)
mat_inactive_axes_dim = mat_dim - len(domain_shape)
if mat_inactive_axes_dim < 0:
raise ValueError("Domain has more dimensions than matrix.")
target_dim = mat_inactive_axes_dim + len(self._inactive_axes)
domain_dim = len(domain_shape)
target_space_shape = []
matrix_shape_idx = 0
for i in range(target_dim):
if i in tuple(self._inactive_axes):
for j in range(len(self._domain[i].shape)):
target_space_shape.append(self._domain[i].shape[j])
else:
target_space_shape.append(matrix.shape[matrix_shape_idx])
matrix_shape_idx +=1
target_space_shape = tuple(target_space_shape)
self._mat_last_n = tuple([-domain_dim + i for i in range(domain_dim)])
self._mat_first_n = np.arange(domain_dim)
self._target_last_n = tuple([-len(self._inactive_axes) + i for
i in range(len(self._inactive_axes))])
# mat_last_m is needed for adjoint application even if spaces = None
self._mat_last_m = tuple([-mat_inactive_axes_dim + i for i in
range(mat_inactive_axes_dim)])
self._target_axes = tuple(range(len(target_space_shape)))
if spaces != None:
self._field_axes = list(self._target_axes)
for i in list(self._target_axes):
if i in self._inactive_axes:
self._field_axes.remove(i)
self._field_axes = tuple(self._field_axes)
if target == None:
if flatten:
self._target = self._domain
else:
if target_space_shape == self._domain.shape:
default_target = self._domain
elif target_dim != 0:
default_target = DomainTuple.make(RGSpace(shape = target_space_shape))
else:
default_target = DomainTuple.make(None)
self._target = default_target
elif flatten:
raise ValueError("Flattening is supported only for endomorphic application,"
+ " and you can't specify a target.")
elif target.shape == target_space_shape:
self._target = DomainTuple.make(target)
else:
raise ValueError(f"Target space has invalid shape.\n"
+"Its shape should be {target_space_shape}.")
self._mat_last_n = tuple([-appl_dim + i for i in range(appl_dim)])
self._mat_first_n = np.arange(appl_dim)
# Test if the matrix and the array it will be applied to fit
if matrix.shape[:appl_dim] != appl_space_shape:
raise ValueError(
"Matrix and domain shapes are incompatible under the requested "
+ "application scheme.\n" +
f"Matrix appl shape: {matrix.shape[:appl_dim]}, " +
f"appl_space_shape: {appl_space_shape}.")
matrix_appl_shape = matrix.shape[mat_inactive_axes_dim:]
if matrix_appl_shape != domain_shape:
raise ValueError("Matrix doesn't fit with the domain.\n" +
f"Shape of matrix axes used in summation: {matrix_appl_shape},\n " +
f"Shape of domain axes used in summation: {domain_shape}.")
self._mat = matrix
self._mat_tr = matrix.transpose().conjugate()
......@@ -123,22 +211,43 @@ class MatrixProductOperator(EndomorphicOperator):
self._check_input(x, mode)
times = (mode == self.TIMES)
m = self._mat if times else self._mat_tr
target = self._target if times else self._domain
if self._spaces is None:
if not self._flatten:
if times:
res = np.tensordot(m,x.val,axes=len(x.shape))
res = np.tensordot(m, x.val, axes = len(x.domain.shape))
else:
mat_axes = np.flip(self._mat_last_n)
field_axes = self._mat_first_n
res = np.tensordot(m,x.val,axes=(mat_axes,field_axes))
mat_axes = np.flip(self._mat_last_m)
field_axes = self._target_axes
res = np.tensordot(m, x.val, axes=(mat_axes, field_axes))
res = res.transpose()
else:
res = m.dot(x.val.flatten()).reshape(self._domain.shape)
return Field(self._domain, res)
mat_axes = self._mat_last_n if times else np.flip(self._mat_last_n)
move_axes = self._mat_first_n if times else np.flip(self._mat_first_n)
res = np.tensordot(m, x.val, axes=(mat_axes, self._active_axes))
res = np.moveaxis(res, move_axes, self._active_axes)
return Field(self._domain, res)
return Field(target, res)
if times:
mat_axes = self._mat_last_n
move_axes = self._target_last_n
res = np.tensordot(m, x.val, axes=(mat_axes, self._active_axes))
try:
res = np.moveaxis(res, move_axes, self._inactive_axes)
except np.AxisError:
raise ValueError("The matrix has too few extra dimensions.\n" +
"Number of dimensions in matrix:" +
f"{len(m.shape)}\n")
else:
mat_axes = np.flip(self._mat_last_m)
move_axes = np.flip(self._mat_first_n)
field_axes = self._field_axes
try:
res = np.tensordot(m, x.val, axes=(mat_axes, field_axes))
except ValueError as e:
if e.args[0] == "shape-mismatch for sum":
raise ValueError("The matrix has too few extra dimensions.\n" +
"Number of dimensions in matrix: " +
f"{len(self._mat.shape)}\n")
else:
raise e
res = np.moveaxis(res, move_axes, self._active_axes)
return Field(target, res)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment