Commit 3d2ae5e6 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

updates

parent be306136
...@@ -94,8 +94,20 @@ template<typename T, size_t ndim> class mav ...@@ -94,8 +94,20 @@ template<typename T, size_t ndim> class mav
{ return d; } { return d; }
const T *data() const const T *data() const
{ return d; } { return d; }
bool contiguous() const
{
ptrdiff_t stride=1;
for (size_t i=0; i<ndim; ++i)
{
if (str[ndim-1-i]!=stride) return false;
stride *= shp[ndim-1-i];
}
return true;
}
}; };
template<typename T, size_t ndim> using const_mav = mav<const T, ndim>;
// //
// basic utilities // basic utilities
// //
...@@ -285,7 +297,7 @@ template<typename T> class Baselines ...@@ -285,7 +297,7 @@ template<typename T> class Baselines
size_t nrows, nchan; size_t nrows, nchan;
public: public:
Baselines(const mav<const T,2> &coord_, const mav<const T,1> &freq) Baselines(const const_mav<T,2> &coord_, const const_mav<T,1> &freq)
{ {
constexpr double speedOfLight = 299792458.; constexpr double speedOfLight = 299792458.;
myassert(coord_.shape(1)==3, "dimension mismatch"); myassert(coord_.shape(1)==3, "dimension mismatch");
...@@ -457,18 +469,15 @@ template<typename T> class GridderConfig ...@@ -457,18 +469,15 @@ template<typename T> class GridderConfig
dirty(i,j) = tmp(i2,j2)*cfu[i]*cfv[j]; dirty(i,j) = tmp(i2,j2)*cfu[i]*cfv[j];
} }
} }
#if 0
pyarr_c<complex<T>> dirty2grid_c(const pyarr_c<complex<T>> &dirty) const void dirty2grid_c(const const_mav<complex<T>,2> &dirty,
{ mav<complex<T>,2> &grid) const
checkArray(dirty, "dirty", {nx_dirty, ny_dirty});
auto pdirty = dirty.data();
auto tmp = makeArray<complex<T>>({nu, nv});
auto ptmp = tmp.mutable_data();
pocketfft::stride_t strides{tmp.strides(0),tmp.strides(1)};
{ {
py::gil_scoped_release release; checkShape(dirty.shape(), {nx_dirty, ny_dirty});
for (size_t i=0; i<nu*nv; ++i) checkShape(grid.shape(), {nu, nv});
ptmp[i] = 0.; for (size_t i=0; i<nu; ++i)
for (size_t j=0; j<nv; ++j)
grid(i,j)=0.;
for (size_t i=0; i<nx_dirty; ++i) for (size_t i=0; i<nx_dirty; ++i)
for (size_t j=0; j<ny_dirty; ++j) for (size_t j=0; j<ny_dirty; ++j)
{ {
...@@ -476,14 +485,14 @@ template<typename T> class GridderConfig ...@@ -476,14 +485,14 @@ template<typename T> class GridderConfig
if (i2>=nu) i2-=nu; if (i2>=nu) i2-=nu;
size_t j2 = nv-ny_dirty/2+j; size_t j2 = nv-ny_dirty/2+j;
if (j2>=nv) j2-=nv; if (j2>=nv) j2-=nv;
ptmp[nv*i2+j2] = pdirty[ny_dirty*i + j]*cfu[i]*cfv[j]; grid(i2,j2) = dirty(i,j)*cfu[i]*cfv[j];
} }
constexpr auto sc = sizeof(complex<T>);
pocketfft::stride_t strides{grid.stride(0)*sc,grid.stride(1)*sc};
pocketfft::c2c({nu,nv}, strides, strides, {0,1}, pocketfft::FORWARD, pocketfft::c2c({nu,nv}, strides, strides, {0,1}, pocketfft::FORWARD,
ptmp, ptmp, T(1), nthreads); grid.data(), grid.data(), T(1), nthreads);
}
return tmp;
} }
#endif
inline void getpix(T u_in, T v_in, T &u, T &v, int &iu0, int &iv0) const inline void getpix(T u_in, T v_in, T &u, T &v, int &iu0, int &iv0) const
{ {
u=fmodulo(u_in*psx, T(1))*nu, u=fmodulo(u_in*psx, T(1))*nu,
...@@ -493,20 +502,15 @@ template<typename T> class GridderConfig ...@@ -493,20 +502,15 @@ template<typename T> class GridderConfig
iv0 = int(v-supp*0.5 + 1 + nv) - nv; iv0 = int(v-supp*0.5 + 1 + nv) - nv;
if (iv0+supp>nv+nsafe) iv0 = nv+nsafe-supp; if (iv0+supp>nv+nsafe) iv0 = nv+nsafe-supp;
} }
#if 0
pyarr_c<complex<T>> apply_wscreen(const pyarr_c<complex<T>> &dirty_, double w, bool adjoint) const void apply_wscreen(const const_mav<complex<T>,2> &dirty,
mav<complex<T>,2> &dirty2, double w, bool adjoint) const
{ {
checkArray(dirty_, "dirty", {nx_dirty, ny_dirty}); checkShape(dirty.shape(), {nx_dirty, ny_dirty});
auto dirty = dirty_.data(); checkShape(dirty2.shape(), {nx_dirty, ny_dirty});
auto res_ = makeArray<complex<T>>({nx_dirty, ny_dirty});
auto res = res_.mutable_data();
double x0 = -0.5*nx_dirty*psx, double x0 = -0.5*nx_dirty*psx,
y0 = -0.5*ny_dirty*psy; y0 = -0.5*ny_dirty*psy;
{ #pragma omp parallel for num_threads(nthreads) schedule(static)
py::gil_scoped_release release;
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for schedule(static)
for (size_t i=0; i<=nx_dirty/2; ++i) for (size_t i=0; i<=nx_dirty/2; ++i)
{ {
double fx = x0+i*psx; double fx = x0+i*psx;
...@@ -515,23 +519,20 @@ template<typename T> class GridderConfig ...@@ -515,23 +519,20 @@ template<typename T> class GridderConfig
{ {
double fy = y0+j*psy; double fy = y0+j*psy;
auto ws = wscreen(fx, fy*fy, w, adjoint); auto ws = wscreen(fx, fy*fy, w, adjoint);
res[ny_dirty*i+j] = dirty[ny_dirty*i+j]*ws; // lower left dirty2(i,j) = dirty(i,j)*ws; // lower left
size_t i2 = nx_dirty-i, j2 = ny_dirty-j; size_t i2 = nx_dirty-i, j2 = ny_dirty-j;
if ((i>0)&&(i<i2)) if ((i>0)&&(i<i2))
{ {
res[ny_dirty*i2+j] = dirty[ny_dirty*i2+j]*ws; // lower right dirty2(i2,j) = dirty(i2,j)*ws; // lower right
if ((j>0)&&(j<j2)) if ((j>0)&&(j<j2))
res[ny_dirty*i2+j2] = dirty[ny_dirty*i2+j2]*ws; // upper right dirty2(i2,j2) = dirty(i2,j2)*ws; // upper right
} }
if ((j>0)&&(j<j2)) if ((j>0)&&(j<j2))
res[ny_dirty*i+j2] = dirty[ny_dirty*i+j2]*ws; // upper left dirty2(i,j2) = dirty(i,j2)*ws; // upper left
} }
} }
}
}
return res_;
} }
#endif
}; };
constexpr int logsquare=4; constexpr int logsquare=4;
...@@ -634,6 +635,315 @@ template<typename T, typename T2=complex<T>> class Helper ...@@ -634,6 +635,315 @@ template<typename T, typename T2=complex<T>> class Helper
} }
}; };
template<typename T> void vis2grid_c
(const Baselines<T> &baselines, const GridderConfig<T> &gconf,
const const_mav<uint32_t,1> &idx, const const_mav<complex<T>,1> &vis,
mav<complex<T>,2> &grid, const const_mav<T,1> &wgt)
{
size_t nvis = vis.shape(0);
checkShape(idx.shape(), {nvis});
bool have_wgt = wgt.size()!=0;
if (have_wgt) checkShape(wgt.shape(), {nvis});
size_t nu=gconf.Nu(), nv=gconf.Nv();
checkShape(grid.shape(), {nu, nv});
myassert(grid.contiguous(), "grid is not contiguous");
T beta = gconf.Beta();
size_t supp = gconf.Supp();
#pragma omp parallel num_threads(nthreads)
{
Helper<T> hlp(gconf, nullptr, grid.data());
T emb = exp(-2*beta);
int jump = hlp.lineJump();
const T * ku = hlp.kernel.data();
const T * kv = hlp.kernel.data()+supp;
// Loop over sampling points
#pragma omp for schedule(guided,100)
for (size_t ipart=0; ipart<nvis; ++ipart)
{
UVW<T> coord = baselines.effectiveCoord(idx(ipart));
hlp.prep(coord.u, coord.v);
auto * ptr = hlp.p0w;
auto v(vis(ipart)*emb);
if (have_wgt)
v*=wgt(ipart);
for (size_t cu=0; cu<supp; ++cu)
{
complex<T> tmp(v*ku[cu]);
for (size_t cv=0; cv<supp; ++cv)
ptr[cv] += tmp*kv[cv];
ptr+=jump;
}
}
} // end of parallel region
}
template<typename T> void ms2grid_c
(const Baselines<T> &baselines, const GridderConfig<T> &gconf,
const const_mav<uint32_t,1> &idx, const const_mav<complex<T>,2> &ms,
mav<complex<T>,2> &grid, const const_mav<T,2> &wgt)
{
auto nrows = baselines.Nrows();
auto nchan = baselines.Nchannels();
size_t nvis = idx.shape(0);
checkShape(ms.shape(), {nrows, nchan});
bool have_wgt = wgt.size()!=0;
if (have_wgt) checkShape(wgt.shape(), {nrows, nchan});
size_t nu=gconf.Nu(), nv=gconf.Nv();
checkShape(grid.shape(), {nu, nv});
myassert(grid.contiguous(), "grid is not contiguous");
T beta = gconf.Beta();
size_t supp = gconf.Supp();
#pragma omp parallel num_threads(nthreads)
{
Helper<T> hlp(gconf, nullptr, grid.data());
T emb = exp(-2*beta);
int jump = hlp.lineJump();
const T * ku = hlp.kernel.data();
const T * kv = hlp.kernel.data()+supp;
// Loop over sampling points
#pragma omp for schedule(guided,100)
for (size_t ipart=0; ipart<nvis; ++ipart)
{
auto tidx = idx(ipart);
auto row = tidx/nchan;
auto chan = tidx-row*nchan;
UVW<T> coord = baselines.effectiveCoord(tidx);
hlp.prep(coord.u, coord.v);
auto * ptr = hlp.p0w;
auto v(ms(row,chan)*emb);
if (have_wgt)
v*=wgt(row,chan);
for (size_t cu=0; cu<supp; ++cu)
{
complex<T> tmp(v*ku[cu]);
for (size_t cv=0; cv<supp; ++cv)
ptr[cv] += tmp*kv[cv];
ptr+=jump;
}
}
} // end of parallel region
}
template<typename T> void grid2vis_c
(const Baselines<T> &baselines, const GridderConfig<T> &gconf,
const const_mav<uint32_t,1> &idx, const const_mav<complex<T>,2> &grid,
mav<complex<T>,1> &vis, const const_mav<T,1> &wgt)
{
size_t nvis = vis.shape(0);
checkShape(idx.shape(), {nvis});
bool have_wgt = wgt.size()!=0;
if (have_wgt) checkShape(wgt.shape(), {nvis});
size_t nu=gconf.Nu(), nv=gconf.Nv();
checkShape(grid.shape(), {nu, nv});
myassert(grid.contiguous(), "grid is not contiguous");
T beta = gconf.Beta();
size_t supp = gconf.Supp();
// Loop over sampling points
#pragma omp parallel num_threads(nthreads)
{
Helper<T> hlp(gconf, grid.data(), nullptr);
T emb = exp(-2*beta);
int jump = hlp.lineJump();
const T * ku = hlp.kernel.data();
const T * kv = hlp.kernel.data()+supp;
#pragma omp for schedule(guided,100)
for (size_t ipart=0; ipart<nvis; ++ipart)
{
UVW<T> coord = baselines.effectiveCoord(idx(ipart));
hlp.prep(coord.u, coord.v);
complex<T> r = 0;
const auto * ptr = hlp.p0r;
for (size_t cu=0; cu<supp; ++cu)
{
complex<T> tmp(0);
for (size_t cv=0; cv<supp; ++cv)
tmp += ptr[cv] * kv[cv];
r += tmp*ku[cu];
ptr += jump;
}
if (have_wgt) r*=wgt[ipart];
vis[ipart] = r*emb;
}
}
}
template<typename T> void grid2ms_c
(const Baselines<T> &baselines, const GridderConfig<T> &gconf,
const const_mav<uint32_t,1> &idx, const const_mav<complex<T>,2> &grid,
mav<complex<T>,2> &ms, const const_mav<T,2> &wgt)
{
auto nrows = baselines.Nrows();
auto nchan = baselines.Nchannels();
size_t nvis = idx.shape(0);
checkShape(ms.shape(), {nrows, nchan});
bool have_wgt = wgt.size()!=0;
if (have_wgt) checkShape(wgt.shape(), {nrows, nchan});
size_t nu=gconf.Nu(), nv=gconf.Nv();
checkShape(grid.shape(), {nu, nv});
myassert(grid.contiguous(), "grid is not contiguous");
T beta = gconf.Beta();
size_t supp = gconf.Supp();
// Loop over sampling points
#pragma omp parallel num_threads(nthreads)
{
Helper<T> hlp(gconf, grid.data(), nullptr);
T emb = exp(-2*beta);
int jump = hlp.lineJump();
const T * ku = hlp.kernel.data();
const T * kv = hlp.kernel.data()+supp;
#pragma omp for schedule(guided,100)
for (size_t ipart=0; ipart<nvis; ++ipart)
{
auto tidx = idx(ipart);
auto row = tidx/nchan;
auto chan = tidx-row*nchan;
UVW<T> coord = baselines.effectiveCoord(tidx);
hlp.prep(coord.u, coord.v);
complex<T> r = 0;
const auto * ptr = hlp.p0r;
for (size_t cu=0; cu<supp; ++cu)
{
complex<T> tmp(0);
for (size_t cv=0; cv<supp; ++cv)
tmp += ptr[cv] * kv[cv];
r += tmp*ku[cu];
ptr += jump;
}
if (have_wgt) r*=wgt(row,chan);
ms(row,chan) = r*emb;
}
}
}
template<typename T> void vis2dirty_wstack(const Baselines<T> &baselines,
const GridderConfig<T> &gconf, const const_mav<uint32_t,1> &idx,
const const_mav<complex<T>,1> &vis, mav<complex<T>,2> &dirty)
{
auto nx_dirty=gconf.Nxdirty();
auto ny_dirty=gconf.Nydirty();
auto nu=gconf.Nu();
auto nv=gconf.Nv();
auto psx=gconf.Pixsize_x();
auto psy=gconf.Pixsize_y();
size_t nvis = vis.shape(0);
checkShape(idx.shape(),vis.shape());
checkShape(dirty.shape(),{nx_dirty,ny_dirty});
// determine w values for every visibility, and min/max w;
T wmin=T(1e38), wmax=T(-1e38);
vector<T> wval(nvis);
for (size_t ipart=0; ipart<nvis; ++ipart)
{
wval[ipart] = baselines.effectiveCoord(idx(ipart)).w;
wmin = min(wmin,wval[ipart]);
wmax = max(wmax,wval[ipart]);
}
cout << "data w range: " << wmin << " to " << wmax << endl;
double x0 = -0.5*nx_dirty*psx,
y0 = -0.5*ny_dirty*psy;
double nmin = sqrt(max(1.-x0*x0-y0*y0,0.))-1.;
double dw = 0.25/abs(nmin);
double w_eps=1e-7; // FIXME
auto w_supp = get_supp(w_eps);
EC_Kernel_with_correction<T> kernel(w_supp);
wmin -= 0.5*w_supp*dw;
wmax += 0.5*w_supp*dw;
size_t nplanes = size_t((wmax-wmin)/dw)+2;
cout << "nplanes: " << nplanes << endl;
vector<size_t> nvis_plane(nplanes,0);
vector<int> minplane(nvis);
for (size_t ipart=0; ipart<nvis; ++ipart)
{
int plane0 = int((wval[ipart]-wmin)/dw+0.5*w_supp)-int(w_supp-1);
minplane[ipart]=plane0;
for (int i=max<int>(0,plane0); i<min<int>(nplanes,plane0+w_supp); ++i)
++nvis_plane[i];
}
for (ptrdiff_t i=0; i<nx_dirty; ++i)
for (ptrdiff_t j=0; j<ny_dirty; ++j)
dirty(i,j) = 0.;
for (size_t iw=0; iw<nplanes; ++iw)
{
cout << "working on w plane #" << iw << " containing " << nvis_plane[iw]
<< " visibilities" << endl;
if (nvis_plane[iw]==0) continue;
double wcur = wmin+iw*dw;
size_t cnt=0;
vector<complex<T>> vis_loc_ (nvis_plane[iw]);
auto vis_loc = mav<complex<T>,1>(vis_loc_.data(), {nvis_plane[iw]});
vector<uint32_t> idx_loc_ (nvis_plane[iw]);
auto idx_loc = mav<uint32_t,1>(idx_loc_.data(), {nvis_plane[iw]});
for (size_t ipart=0; ipart<nvis; ++ipart)
if ((iw>=minplane[ipart]) && (iw<minplane[ipart]+w_supp))
{
myassert(cnt<nvis_plane[iw],"must not happen");
double x=min(1.,2./(w_supp*dw)*abs(wcur-wval[ipart]));
vis_loc[cnt] = vis[ipart]*kernel(x);
idx_loc[cnt] = idx[ipart];
++cnt;
}
myassert(cnt==nvis_plane[iw],"must not happen 2");
auto grid_=vector<complex<T>>(nu*nv);
auto grid=mav<complex<T>,2>(grid_.data(),{nu, nv});
vis2grid_c(baselines, gconf, const_mav<uint32_t,1>(idx_loc), const_mav<complex<T>,1>(vis_loc), grid, const_mav<T,1>(nullptr,{0}));
auto tdirty_=vector<complex<T>>(nx_dirty*ny_dirty);
auto tdirty=mav<complex<T>,2>(tdirty_.data(),{nx_dirty, ny_dirty});
gconf.grid2dirty_c(grid, tdirty);
gconf.apply_wscreen(tdirty, tdirty, wcur, false);
for (ptrdiff_t i=0; i<nx_dirty; ++i)
for (ptrdiff_t j=0; j<ny_dirty; ++j)
dirty(i,j) += tdirty(i,j);
}
// correct for w gridding
cout << "applying correction for gridding in w direction" << endl;
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for schedule(static)
for (size_t i=0; i<=nx_dirty/2; ++i)
{
double fx = x0+i*psx;
fx *= fx;
for (size_t j=0; j<=ny_dirty/2; ++j)
{
double fy = y0+j*psy;
fy*=fy;
#if 1
auto nm1=cos(sqrt(fx+fy)) -1; // cosine profile
#else
auto nm1 = (-fx-fy)/(sqrt(1.-fx-fy)+1.);
#endif
T fct = kernel.corfac(nm1*dw);
size_t i2 = nx_dirty-i, j2 = ny_dirty-j;
dirty(i,j)*=fct;
if ((i>0)&&(i<i2))
{
dirty(i2,j)*=fct;
if ((j>0)&&(j<j2))
dirty(i2,j2)*=fct;
}
if ((j>0)&&(j<j2))
dirty(i,j2)*=fct;
}
}
}
}
} // namespace gridder } // namespace gridder
#endif #endif
...@@ -57,7 +57,7 @@ int : the number of threads used by the module ...@@ -57,7 +57,7 @@ int : the number of threads used by the module
)"""; )""";
template<typename T> template<typename T>
using pyarr = py::array_t<T>; using pyarr = py::array_t<T, 0>;
template<typename T> pyarr<T> makeArray(const vector<size_t> &shape) template<typename T> pyarr<T> makeArray(const vector<size_t> &shape)
{ return pyarr<T>(shape); } { return pyarr<T>(shape); }
...@@ -137,7 +137,7 @@ template<size_t ndim, typename T> mav<T,ndim> make_mav(pyarr<T> &in) ...@@ -137,7 +137,7 @@ template<size_t ndim, typename T> mav<T,ndim> make_mav(pyarr<T> &in)
} }
return mav<T, ndim>(in.mutable_data(),dims,str); return mav<T, ndim>(in.mutable_data(),dims,str);
} }
template<size_t ndim, typename T> mav<const T,ndim> make_const_mav(const pyarr<T> &in) template<size_t ndim, typename T> const_mav<T,ndim> make_const_mav(const pyarr<T> &in)
{ {
myassert(ndim==in.ndim(), "dimension mismatch"); myassert(ndim==in.ndim(), "dimension mismatch");
array<size_t,ndim> dims; array<size_t,ndim> dims;
...@@ -148,7 +148,7 @@ template<size_t ndim, typename T> mav<const T,ndim> make_const_mav(const pyarr<T ...@@ -148,7 +148,7 @@ template<size_t ndim, typename T> mav<const T,ndim> make_const_mav(const pyarr<T
str[i]=in.strides(i)/sizeof(T); str[i]=in.strides(i)/sizeof(T);
myassert(str[i]*ptrdiff_t(sizeof(T))==in.strides(i), "weird strides"); myassert(str[i]*ptrdiff_t(sizeof(T))==in.strides(i), "weird strides");
} }
return mav<const T, ndim>(in.data(),dims,str); return const_mav<T, ndim>(in.data(),dims,str);
} }
constexpr auto PyBaselines_DS = R"""( constexpr auto PyBaselines_DS = R"""(
...@@ -365,65 +365,25 @@ template<typename T> class PyGridderConfig: public GridderConfig<T> ...@@ -365,65 +365,25 @@ template<typename T> class PyGridderConfig: public GridderConfig<T>
pyarr<complex<T>> dirty2grid_c(const pyarr<complex<T>> &dirty) const pyarr<complex<T>> dirty2grid_c(const pyarr<complex<T>> &dirty) const
{ {
checkArray(dirty, "dirty", {nx_dirty, ny_dirty}); auto dirty2 = make_const_mav<2>(dirty);
auto pdirty = dirty.data(); auto grid = makeArray<complex<T>>({nu, nv});
auto tmp = makeArray<complex<T>>({nu, nv}); auto grid2=make_mav<2>(grid);
auto ptmp = tmp.mutable_data();
pocketfft::stride_t strides{tmp.strides(0),tmp.strides(1)};
{ {
py::gil_scoped_release release; py::gil_scoped_release release;
for (size_t i=0; i<nu*nv; ++i) GridderConfig<T>::dirty2grid_c(dirty2, grid2);
ptmp[i] = 0.;
for (size_t i=0; i<nx_dirty; ++i)
for (size_t j=0; j<ny_dirty; ++j)
{
size_t i2 = nu-nx_dirty/2+i;
if (i2>=nu) i2-=nu;
size_t j2 = nv-ny_dirty/2+j;
if (j2>=nv) j2-=nv;
ptmp[nv*i2+j2] = pdirty[ny_dirty*i + j]*cfu[i]*cfv[j];
}
pocketfft::c2c({nu,nv}, strides, strides, {0,1}, pocketfft::FORWARD,
ptmp, ptmp, T(1), nthreads);
} }
return tmp; return grid;
} }
pyarr<complex<T>> apply_wscreen(const pyarr<complex<T>> &dirty_, double w, bool adjoint) const pyarr<complex<T>> apply_wscreen(const pyarr<complex<T>> &dirty, double w, bool adjoint) const
{ {
checkArray(dirty_, "dirty", {nx_dirty, ny_dirty}); auto dirty2 = make_const_mav<2>(dirty);
auto dirty = dirty_.data(); auto res = makeArray<complex<T>>({nx_dirty, ny_dirty});
auto res_ = makeArray<complex<T>>({nx_dirty, ny_dirty}); auto res2 = make_mav<2>(res);
auto res = res_.mutable_data();
double x0 = -0.5*nx_dirty*psx,
y0 = -0.5*ny_dirty*psy;
{ {
py::gil_scoped_release release; py::gil_scoped_release release;
#pragma omp parallel num_threads(nthreads) GridderConfig<T>::apply_wscreen(dirty2, res2, w, adjoint);
{
#pragma omp for schedule(static)
for (size_t i=0; i<=nx_dirty/2; ++i)
{
double fx = x0+i*psx;
fx *= fx;
for (size_t j=0; j<=ny_dirty/2; ++j)
{
double fy = y0+j*psy;
auto ws = wscreen(fx, fy*fy, w, adjoint);
res[ny_dirty*i+j] = dirty[ny_dirty*i+j]*ws; // lower left
size_t i2 = nx_dirty-i, j2 = ny_dirty-j;
if ((i>0)&&(i<i2))
{
res[ny_dirty*i2+j] = dirty[ny_dirty*i2+j]*ws; // lower right