Commit a272d7c5 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

more renamings

parent 914f053a
......@@ -427,12 +427,12 @@
"\n",
"mask = np.full(s_space.shape, 1.)\n",
"mask[l:h] = 0\n",
"mask = ift.Field.from_arr(s_space, mask)\n",
"mask = ift.Field.from_raw(s_space, mask)\n",
"\n",
"R = ift.DiagonalOperator(mask)(HT)\n",
"n = n.val.copy()\n",
"n = n.val_rw()\n",
"n[l:h] = 0\n",
"n = ift.Field.from_arr(s_space, n)\n",
"n = ift.Field.from_raw(s_space, n)\n",
"\n",
"d = R(sh) + n"
]
......@@ -501,7 +501,7 @@
"m_data = HT(m).val\n",
"m_var_data = m_var.val\n",
"uncertainty = np.sqrt(m_var_data)\n",
"d_data = d.val.copy()\n",
"d_data = d.val_rw()\n",
"\n",
"# Set lost data to NaN for proper plotting\n",
"d_data[d_data == 0] = np.nan"
......@@ -583,12 +583,12 @@
"\n",
"mask = np.full(s_space.shape, 1.)\n",
"mask[l:h,l:h] = 0.\n",
"mask = ift.Field.from_arr(s_space, mask)\n",
"mask = ift.Field.from_raw(s_space, mask)\n",
"\n",
"R = ift.DiagonalOperator(mask)(HT)\n",
"n = n.val.copy()\n",
"n = n.val_rw()\n",
"n[l:h, l:h] = 0\n",
"n = ift.Field.from_arr(s_space, n)\n",
"n = ift.Field.from_raw(s_space, n)\n",
"curv = Curvature(R=R, N=N, Sh=Sh)\n",
"D = curv.inverse\n",
"\n",
......
%% Cell type:markdown id: tags:
# A NIFTy demonstration
%% Cell type:markdown id: tags:
## IFT: Big Picture
IFT starting point:
$$d = Rs+n$$
Typically, $s$ is a continuous field, $d$ a discrete data vector. Particularly, $R$ is not invertible.
IFT aims at **inverting** the above uninvertible problem in the **best possible way** using Bayesian statistics.
## NIFTy
NIFTy (Numerical Information Field Theory) is a Python framework in which IFT problems can be tackled easily.
Main Interfaces:
- **Spaces**: Cartesian, 2-Spheres (Healpix, Gauss-Legendre) and their respective harmonic spaces.
- **Fields**: Defined on spaces.
- **Operators**: Acting on fields.
%% Cell type:markdown id: tags:
## Wiener Filter: Formulae
### Assumptions
- $d=Rs+n$, $R$ linear operator.
- $\mathcal P (s) = \mathcal G (s,S)$, $\mathcal P (n) = \mathcal G (n,N)$ where $S, N$ are positive definite matrices.
### Posterior
The Posterior is given by:
$$\mathcal P (s|d) \propto P(s,d) = \mathcal G(d-Rs,N) \,\mathcal G(s,S) \propto \mathcal G (s-m,D) $$
where
$$\begin{align}
m &= Dj \\
D^{-1}&= (S^{-1} +R^\dagger N^{-1} R )\\
j &= R^\dagger N^{-1} d
\end{align}$$
Let us implement this in NIFTy!
%% Cell type:markdown id: tags:
## Wiener Filter: Example
- We assume statistical homogeneity and isotropy. Therefore the signal covariance $S$ is diagonal in harmonic space, and is described by a one-dimensional power spectrum, assumed here as $$P(k) = P_0\,\left(1+\left(\frac{k}{k_0}\right)^2\right)^{-\gamma /2},$$
with $P_0 = 0.2, k_0 = 5, \gamma = 4$.
- $N = 0.2 \cdot \mathbb{1}$.
- Number of data points $N_{pix} = 512$.
- reconstruction in harmonic space.
- Response operator:
$$R = FFT_{\text{harmonic} \rightarrow \text{position}}$$
%% Cell type:code id: tags:
``` python
N_pixels = 512 # Number of pixels
def pow_spec(k):
P0, k0, gamma = [.2, 5, 4]
return P0 / ((1. + (k/k0)**2)**(gamma / 2))
```
%% Cell type:markdown id: tags:
## Wiener Filter: Implementation
%% Cell type:markdown id: tags:
### Import Modules
%% Cell type:code id: tags:
``` python
import numpy as np
np.random.seed(40)
import nifty6 as ift
import matplotlib.pyplot as plt
%matplotlib inline
```
%% Cell type:markdown id: tags:
### Implement Propagator
%% Cell type:code id: tags:
``` python
def Curvature(R, N, Sh):
IC = ift.GradientNormController(iteration_limit=50000,
tol_abs_gradnorm=0.1)
# WienerFilterCurvature is (R.adjoint*N.inverse*R + Sh.inverse) plus some handy
# helper methods.
return ift.WienerFilterCurvature(R,N,Sh,iteration_controller=IC,iteration_controller_sampling=IC)
```
%% Cell type:markdown id: tags:
### Conjugate Gradient Preconditioning
- $D$ is defined via:
$$D^{-1} = \mathcal S_h^{-1} + R^\dagger N^{-1} R.$$
In the end, we want to apply $D$ to $j$, i.e. we need the inverse action of $D^{-1}$. This is done numerically (algorithm: *Conjugate Gradient*).
<!--
- One can define the *condition number* of a non-singular and normal matrix $A$:
$$\kappa (A) := \frac{|\lambda_{\text{max}}|}{|\lambda_{\text{min}}|},$$
where $\lambda_{\text{max}}$ and $\lambda_{\text{min}}$ are the largest and smallest eigenvalue of $A$, respectively.
- The larger $\kappa$ the slower Conjugate Gradient.
- By default, conjugate gradient solves: $D^{-1} m = j$ for $m$, where $D^{-1}$ can be badly conditioned. If one knows a non-singular matrix $T$ for which $TD^{-1}$ is better conditioned, one can solve the equivalent problem:
$$\tilde A m = \tilde j,$$
where $\tilde A = T D^{-1}$ and $\tilde j = Tj$.
- In our case $S^{-1}$ is responsible for the bad conditioning of $D$ depending on the chosen power spectrum. Thus, we choose
$$T = \mathcal F^\dagger S_h^{-1} \mathcal F.$$
-->
%% Cell type:markdown id: tags:
### Generate Mock data
- Generate a field $s$ and $n$ with given covariances.
- Calculate $d$.
%% Cell type:code id: tags:
``` python
s_space = ift.RGSpace(N_pixels)
h_space = s_space.get_default_codomain()
HT = ift.HarmonicTransformOperator(h_space, target=s_space)
# Operators
Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
R = HT #*ift.create_harmonic_smoothing_operator((h_space,), 0, 0.02)
# Fields and data
sh = Sh.draw_sample()
noiseless_data=R(sh)
noise_amplitude = np.sqrt(0.2)
N = ift.ScalingOperator(noise_amplitude**2, s_space)
n = ift.Field.from_random(domain=s_space, random_type='normal',
std=noise_amplitude, mean=0)
d = noiseless_data + n
j = R.adjoint_times(N.inverse_times(d))
curv = Curvature(R=R, N=N, Sh=Sh)
D = curv.inverse
```
%% Cell type:markdown id: tags:
### Run Wiener Filter
%% Cell type:code id: tags:
``` python
m = D(j)
```
%% Cell type:markdown id: tags:
### Signal Reconstruction
%% Cell type:code id: tags:
``` python
# Get signal data and reconstruction data
s_data = HT(sh).val
m_data = HT(m).val
d_data = d.val
plt.figure(figsize=(15,10))
plt.plot(s_data, 'r', label="Signal", linewidth=3)
plt.plot(d_data, 'k.', label="Data")
plt.plot(m_data, 'k', label="Reconstruction",linewidth=3)
plt.title("Reconstruction")
plt.legend()
plt.show()
```
%% Cell type:code id: tags:
``` python
plt.figure(figsize=(15,10))
plt.plot(s_data - s_data, 'r', label="Signal", linewidth=3)
plt.plot(d_data - s_data, 'k.', label="Data")
plt.plot(m_data - s_data, 'k', label="Reconstruction",linewidth=3)
plt.axhspan(-noise_amplitude,noise_amplitude, facecolor='0.9', alpha=.5)
plt.title("Residuals")
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
### Power Spectrum
%% Cell type:code id: tags:
``` python
s_power_data = ift.power_analyze(sh).val
m_power_data = ift.power_analyze(m).val
plt.figure(figsize=(15,10))
plt.loglog()
plt.xlim(1, int(N_pixels/2))
ymin = min(m_power_data)
plt.ylim(ymin, 1)
xs = np.arange(1,int(N_pixels/2),.1)
plt.plot(xs, pow_spec(xs), label="True Power Spectrum", color='k',alpha=0.5)
plt.plot(s_power_data, 'r', label="Signal")
plt.plot(m_power_data, 'k', label="Reconstruction")
plt.axhline(noise_amplitude**2 / N_pixels, color="k", linestyle='--', label="Noise level", alpha=.5)
plt.axhspan(noise_amplitude**2 / N_pixels, ymin, facecolor='0.9', alpha=.5)
plt.title("Power Spectrum")
plt.legend()
plt.show()
```
%% Cell type:markdown id: tags:
## Wiener Filter on Incomplete Data
%% Cell type:code id: tags:
``` python
# Operators
Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
N = ift.ScalingOperator(noise_amplitude**2,s_space)
# R is defined below
# Fields
sh = Sh.draw_sample()
s = HT(sh)
n = ift.Field.from_random(domain=s_space, random_type='normal',
std=noise_amplitude, mean=0)
```
%% Cell type:markdown id: tags:
### Partially Lose Data
%% Cell type:code id: tags:
``` python
l = int(N_pixels * 0.2)
h = int(N_pixels * 0.2 * 2)
mask = np.full(s_space.shape, 1.)
mask[l:h] = 0
mask = ift.Field.from_arr(s_space, mask)
mask = ift.Field.from_raw(s_space, mask)
R = ift.DiagonalOperator(mask)(HT)
n = n.val.copy()
n = n.val_rw()
n[l:h] = 0
n = ift.Field.from_arr(s_space, n)
n = ift.Field.from_raw(s_space, n)
d = R(sh) + n
```
%% Cell type:code id: tags:
``` python
curv = Curvature(R=R, N=N, Sh=Sh)
D = curv.inverse
j = R.adjoint_times(N.inverse_times(d))
m = D(j)
```
%% Cell type:markdown id: tags:
### Compute Uncertainty
%% Cell type:code id: tags:
``` python
m_mean, m_var = ift.probe_with_posterior_samples(curv, HT, 200)
```
%% Cell type:markdown id: tags:
### Get data
%% Cell type:code id: tags:
``` python
# Get signal data and reconstruction data
s_data = s.val
m_data = HT(m).val
m_var_data = m_var.val
uncertainty = np.sqrt(m_var_data)
d_data = d.val.copy()
d_data = d.val_rw()
# Set lost data to NaN for proper plotting
d_data[d_data == 0] = np.nan
```
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(15,10))
plt.axvspan(l, h, facecolor='0.8',alpha=0.5)
plt.fill_between(range(N_pixels), m_data - uncertainty, m_data + uncertainty, facecolor='0.5', alpha=0.5)
plt.plot(s_data, 'r', label="Signal", alpha=1, linewidth=3)
plt.plot(d_data, 'k.', label="Data")
plt.plot(m_data, 'k', label="Reconstruction", linewidth=3)
plt.title("Reconstruction of incomplete data")
plt.legend()
```
%% Cell type:markdown id: tags:
# 2d Example
%% Cell type:code id: tags:
``` python
N_pixels = 256 # Number of pixels
sigma2 = 2. # Noise variance
def pow_spec(k):
P0, k0, gamma = [.2, 2, 4]
return P0 * (1. + (k/k0)**2)**(-gamma/2)
s_space = ift.RGSpace([N_pixels, N_pixels])
```
%% Cell type:code id: tags:
``` python
h_space = s_space.get_default_codomain()
HT = ift.HarmonicTransformOperator(h_space,s_space)
# Operators
Sh = ift.create_power_operator(h_space, power_spectrum=pow_spec)
N = ift.ScalingOperator(sigma2,s_space)
# Fields and data
sh = Sh.draw_sample()
n = ift.Field.from_random(domain=s_space, random_type='normal',
std=np.sqrt(sigma2), mean=0)
# Lose some data
l = int(N_pixels * 0.33)
h = int(N_pixels * 0.33 * 2)
mask = np.full(s_space.shape, 1.)
mask[l:h,l:h] = 0.
mask = ift.Field.from_arr(s_space, mask)
mask = ift.Field.from_raw(s_space, mask)
R = ift.DiagonalOperator(mask)(HT)
n = n.val.copy()
n = n.val_rw()
n[l:h, l:h] = 0
n = ift.Field.from_arr(s_space, n)
n = ift.Field.from_raw(s_space, n)
curv = Curvature(R=R, N=N, Sh=Sh)
D = curv.inverse
d = R(sh) + n
j = R.adjoint_times(N.inverse_times(d))
# Run Wiener filter
m = D(j)
# Uncertainty
m_mean, m_var = ift.probe_with_posterior_samples(curv, HT, 20)
# Get data
s_data = HT(sh).val
m_data = HT(m).val
m_var_data = m_var.val
d_data = d.val
uncertainty = np.sqrt(np.abs(m_var_data))
```
%% Cell type:code id: tags:
``` python
cm = ['magma', 'inferno', 'plasma', 'viridis'][1]
mi = np.min(s_data)
ma = np.max(s_data)
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
data = [s_data, d_data]
caption = ["Signal", "Data"]
for ax in axes.flat:
im = ax.imshow(data.pop(0), interpolation='nearest', cmap=cm, vmin=mi,
vmax=ma)
ax.set_title(caption.pop(0))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
```
%% Cell type:code id: tags:
``` python
mi = np.min(s_data)
ma = np.max(s_data)
fig, axes = plt.subplots(3, 2, figsize=(15, 22.5))
sample = HT(curv.draw_sample(from_inverse=True)+m).val
post_mean = (m_mean + HT(m)).val
data = [s_data, m_data, post_mean, sample, s_data - m_data, uncertainty]
caption = ["Signal", "Reconstruction", "Posterior mean", "Sample", "Residuals", "Uncertainty Map"]
for ax in axes.flat:
im = ax.imshow(data.pop(0), interpolation='nearest', cmap=cm, vmin=mi, vmax=ma)
ax.set_title(caption.pop(0))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
```
%% Cell type:markdown id: tags:
### Is the uncertainty map reliable?
%% Cell type:code id: tags:
``` python
precise = (np.abs(s_data-m_data) < uncertainty)
print("Error within uncertainty map bounds: " + str(np.sum(precise) * 100 / N_pixels**2) + "%")
plt.figure(figsize=(15,10))
plt.imshow(precise.astype(float), cmap="brg")
plt.colorbar()
```
%% Cell type:markdown id: tags:
# Start Coding
## NIFTy Repository + Installation guide
https://gitlab.mpcdf.mpg.de/ift/NIFTy
NIFTy v5 **more or less stable!**
......
......@@ -63,7 +63,7 @@ if __name__ == '__main__':
mock_position = ift.from_random('normal', harmonic_space)
tmp = p(mock_position).val.astype(np.float64)
data = np.random.binomial(1, tmp)
data = ift.Field.from_arr(R.target, data)
data = ift.Field.from_raw(R.target, data)
# Compute likelihood and Hamiltonian
position = ift.from_random('normal', harmonic_space)
......
......@@ -57,7 +57,7 @@ if __name__ == '__main__':
for _ in range(n_samps):
fld = pspec(ift.from_random('normal', pspec.domain))
klengths = fld.domain[0].k_lengths
ycoord = fld.val.copy()
ycoord = fld.val_rw()
ycoord[0] = ycoord[1]
ax.plot(klengths, ycoord, alpha=1)
......
......@@ -95,7 +95,7 @@ if __name__ == '__main__':
# and harmonic transformaion
# Masking operator to model that parts of the field have not been observed
mask = ift.Field.from_arr(position_space, mask)
mask = ift.Field.from_raw(position_space, mask)
Mask = ift.MaskOperator(mask)
# The response operator consists of
......
......@@ -40,7 +40,7 @@ def exposure_2d():
exposure[:, x_shape*4//5:x_shape] *= .1
exposure[:, x_shape//2:x_shape*3//2] *= 3.
return ift.Field.from_arr(position_space, exposure)
return ift.Field.from_raw(position_space, exposure)
if __name__ == '__main__':
......@@ -95,7 +95,7 @@ if __name__ == '__main__':
mock_position = ift.from_random('normal', domain)
data = lamb(mock_position)
data = np.random.poisson(data.val.astype(np.float64))
data = ift.Field.from_arr(d_space, data)
data = ift.Field.from_raw(d_space, data)
likelihood = ift.PoissonianEnergy(data)(lamb)
# Settings for minimization
......
......@@ -71,7 +71,7 @@ class PolynomialResponse(ift.LinearOperator):
def apply(self, x, mode):
self._check_input(x, mode)
val = x.val.copy()
val = x.val_rw()
if mode == self.TIMES:
# FIXME Use polynomial() here
out = self._mat.dot(val)
......
......@@ -87,7 +87,7 @@ class LMSpace(StructuredDomain):
for m in range(1, mmax+1):
ldist[idx:idx+2*(lmax+1-m)] = tmp[2*m:]
idx += 2*(lmax+1-m)
return Field.from_arr(self, ldist)
return Field.from_raw(self, ldist)
def get_unique_k_lengths(self):
return np.arange(self.lmax+1, dtype=np.float64)
......@@ -123,7 +123,7 @@ class LMSpace(StructuredDomain):
lm0 = gl.get_default_codomain()
theta = pyHealpix.GL_thetas(gl.nlat)
# evaluate the kernel function at the required thetas
kernel_sphere = Field.from_arr(gl, func(theta))
kernel_sphere = Field.from_raw(gl, func(theta))
# normalize the kernel such that the integral over the sphere is 4pi
kernel_sphere = kernel_sphere * (4 * np.pi / kernel_sphere.integrate())
# compute the spherical harmonic coefficients of the kernel
......@@ -131,7 +131,7 @@ class LMSpace(StructuredDomain):
kernel_lm = op.adjoint_times(kernel_sphere.weight(1)).val
# evaluate the k lengths of the harmonic space
k_lengths = self.get_k_length_array().val.astype(np.int)
return Field.from_arr(self, kernel_lm[k_lengths])
return Field.from_raw(self, kernel_lm[k_lengths])
@property
def lmax(self):
......
......@@ -240,7 +240,7 @@ class PowerSpace(StructuredDomain):
@property
def pindex(self):
"""data_object : bin indices
"""numpy.ndarray : bin indices
Bin index for every pixel in the harmonic partner.
"""
......
......@@ -97,14 +97,14 @@ class RGSpace(StructuredDomain):
res = np.arange(self.shape[0], dtype=np.float64)
res = np.minimum(res, self.shape[0]-res)*self.distances[0]
if len(self.shape) == 1:
return Field.from_arr(self, res)
return Field.from_raw(self, res)
res *= res
for i in range(1, len(self.shape)):
tmp = np.arange(self.shape[i], dtype=np.float64)
tmp = np.minimum(tmp, self.shape[i]-tmp)*self.distances[i]
tmp *= tmp
res = np.add.outer(res, tmp)
return Field.from_arr(self, np.sqrt(res))
return Field.from_raw(self, np.sqrt(res))
def get_k_length_array(self):
if (not self.harmonic):
......
......@@ -32,8 +32,8 @@ class Field(object):
----------
domain : DomainTuple
The domain of the new Field.
val : data_object
This object's global shape must match the domain shape
val : numpy.ndarray
This object's shape must match the domain shape
After construction, the object will no longer be writeable!
Notes
......@@ -93,7 +93,7 @@ class Field(object):
return Field(domain, val)
@staticmethod
def from_arr(domain, arr):
def from_raw(domain, arr):
"""Returns a Field constructed from `domain` and `arr`.
Parameters
......@@ -148,15 +148,19 @@ class Field(object):
@property
def val(self):
"""numpy.ndarray : the data object storing the field's entries.
"""numpy.ndarray : the array storing the field's entries.
Notes
-----
This property is intended for low-level, internal use only. Do not use
from outside of NIFTy's core; there should be better alternatives.
The returned array is read-only.
"""
return self._val
def val_rw(self):
"""numpy.ndarray : a copy of the array storing the field's entries.
"""
return self._val.copy()
@property
def dtype(self):
"""type : the data type of the field's entries"""
......@@ -241,7 +245,7 @@ class Field(object):
Field
The weighted field.
"""
aout = self.val.copy()
aout = self.val_rw()
spaces = utilities.parse_spaces(spaces, len(self._domain))
......
......@@ -179,7 +179,7 @@ class _TwoLogIntegrations(LinearOperator):
res[from_third] = (res[from_third] + res[no_border])/2*self._log_vol[extender_sl] + x[first]
res[from_third] = np.cumsum(res[from_third], axis=axis)
else:
x = x.val.copy()
x = x.val_rw()
res = np.zeros(self._domain.shape)
x[from_third] = np.cumsum(x[from_third][reverse], axis=axis)[reverse]
res[first] += x[from_third]
......@@ -199,7 +199,7 @@ class _Normalization(Operator):
pd = PowerDistributor(hspace,
power_space=self._domain[space],
space=space)
mode_multiplicity = pd.adjoint(full(pd.target, 1.)).val.copy()
mode_multiplicity = pd.adjoint(full(pd.target, 1.)).val_rw()
zero_mode = (slice(None),)*self._domain.axes[space][0] + (0,)
mode_multiplicity[zero_mode] = 0
self._mode_multiplicity = makeField(self._domain,
......
......@@ -172,12 +172,11 @@ class LOSResponse(LinearOperator):
"getting negative distances")
real_ends = starts + diffs*real_distances
dist = np.array(self.domain[0].distances).reshape((-1, 1))
localized_pixel_starts = starts/dist + 0.5
localized_pixel_ends = real_ends/dist + 0.5
pixel_starts = starts/dist + 0.5
pixel_ends = real_ends/dist + 0.5
# get the shape of the local data slice
w_i = _comp_traverse(localized_pixel_starts,
localized_pixel_ends,
w_i = _comp_traverse(pixel_starts,
pixel_ends,
self.domain[0].shape,
np.array(self.domain[0].distances),
1./(1./difflen+truncation*sigmas),
......@@ -229,6 +228,6 @@ class LOSResponse(LinearOperator):
if mode == self.TIMES:
result_arr = self._smat.matvec(x.val.reshape(-1))
return Field(self._target, result_arr)
local_input_data = x.val.reshape(-1)
res = self._smat.rmatvec(local_input_data).reshape(self.domain[0].shape)
input_data = x.val.reshape(-1)
res = self._smat.rmatvec(input_data).reshape(self.domain[0].shape)
return Field(self._domain, res)
......@@ -321,7 +321,7 @@ class Linearization(object):
tmp = self._val.sinc()
tmp2 = ((np.pi*self._val).cos()-tmp)/self._val
ind = self._val.val == 0
loc = tmp2.val.copy()
loc = tmp2.val_rw()
loc[ind] = 0
tmp2 = Field(tmp.domain, loc)
return self.new(tmp, makeOp(tmp2)(self._jac))
......@@ -371,7 +371,7 @@ class Linearization(object):
tmp2 = self._val.sign()
ind = self._val.val == 0
loc = tmp2.val.copy().astype(float)
loc = tmp2.val_rw().astype(float)
loc[ind] = np.nan
tmp2 = Field(tmp.domain, loc)
......
......@@ -188,7 +188,7 @@ class MetricGaussianKL_MPI(Energy):
for s in self._samples:
tmp = self._hamiltonian(self._lin+s)
if v is None:
v = tmp.val.val.copy()
v = tmp.val.val_rw()
g = tmp.gradient
else:
v += tmp.val.val
......
......@@ -48,7 +48,7 @@ def _toArray(fld):
def _toArray_rw(fld):
if isinstance(fld, Field):
return fld.val.copy().reshape(-1)
return fld.val_rw().reshape(-1)
return _multiToArray(fld)
......
......@@ -132,12 +132,17 @@ class MultiField(object):
return MultiField(domain, tuple(Field(dom, val)
for dom in domain._domains))
def to_global_data(self):
@property
def val(self):
return {key: val.val
for key, val in zip(self._domain.keys(), self._val)}
def val_rw(self):
return {key: val.val_rw()
for key, val in zip(self._domain.keys(), self._val)}
@staticmethod
def from_global_data(domain, arr):
def from_raw(domain, arr):
return MultiField(
domain, tuple(Field(domain[key], arr[key])
for key in domain.keys()))
......
......@@ -56,7 +56,7 @@ class _DomRemover(LinearOperator):
def apply(self, x, mode):
self._check_input(x, mode)
self._check_float_dtype(x)
x = x.to_global_data()
x = x.val
if isinstance(self._domain, DomainTuple):
res = x.ravel() if mode == self.TIMES else x.reshape(
self._domain.shape)
......
......@@ -74,7 +74,7 @@ class DOFDistributor(LinearOperator):
wgt = np.bincount(dofdex.val.ravel(), minlength=nbin)
wgt = wgt*partner.scalar_dvol
else:
dvol = Field.from_arr(partner, partner.dvol).val
dvol = Field.from_raw(partner, partner.dvol).val
wgt = np.bincount(dofdex.val.ravel(),
minlength=nbin, weights=dvol)
# The explicit conversion to float64 is necessary because bincount
......@@ -108,7 +108,7 @@ class DOFDistributor(LinearOperator):
oarr = np.zeros(self._hshape, dtype=x.dtype)
oarr = special_add_at(oarr, 1, self._dofdex, arr)
oarr = oarr.reshape(self._domain.shape)
res = Field.from_arr(self._domain, oarr)
res = Field.from_raw(self._domain, oarr)
return res
def _times(self, x):
......
......@@ -310,7 +310,7 @@ def _plot1D(f, ax, **kwargs):
plt.yscale(kwargs.pop("yscale", "log"))
xcoord = dom.k_lengths
for i, fld in enumerate(f):
ycoord = fld.val.copy()
ycoord = fld.val_rw()
ycoord[0] = ycoord[1]
plt.plot(xcoord, ycoord, label=label[i],
linewidth=linewidth[i], alpha=alpha[i])
......
......@@ -144,7 +144,7 @@ def approximation2endo(op, nsamples):
approx = sc.var
dct = approx.to_dict()
for kk in dct:
foo = dct[kk].to_global_data_rw()
foo = dct[kk].val_rw()
foo[foo == 0] = 1
dct[kk] = makeField(dct[kk].domain, foo)
return MultiField.from_dict(dct)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment