Commit c9d8f4d8 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

starting to generalize the gridder

parent 7b579dbf
......@@ -34,6 +34,41 @@ namespace py = pybind11;
namespace {
void myassert(bool cond, const char *msg)
{
if (cond) return;
throw runtime_error(msg);
}
/*! Returns the largest integer \a n that fulfills \a 2^n<=arg. */
template<typename I> inline int ilog2 (I arg)
{
#ifdef __GNUC__
if (arg==0) return 0;
if (sizeof(I)==sizeof(int))
return 8*sizeof(int)-1-__builtin_clz(arg);
if (sizeof(I)==sizeof(long))
return 8*sizeof(long)-1-__builtin_clzl(arg);
if (sizeof(I)==sizeof(long long))
return 8*sizeof(long long)-1-__builtin_clzll(arg);
#endif
int res=0;
while (arg > 0xFFFF) { res+=16; arg>>=16; }
if (arg > 0x00FF) { res|=8; arg>>=8; }
if (arg > 0x000F) { res|=4; arg>>=4; }
if (arg > 0x0003) { res|=2; arg>>=2; }
if (arg > 0x0001) { res|=1; }
return res;
}
/*! Returns the number of bits needed to represent \a arg different values.
\a arg must be >=1. */
template<typename I> inline int bits_needed (I arg)
{
myassert(arg>=1, "argument must be >=1");
if (arg==1) return 0;
return ilog2(arg-1)+1;
}
//
// Utilities for converting between Cartesian coordinates and Peano index
//
......@@ -123,12 +158,6 @@ uint32_t morton2peano2D_32(uint32_t v, int bits)
return res;
}
void myassert(bool cond, const char *msg)
{
if (cond) return;
throw runtime_error(msg);
}
//
// Utilities for indirect sorting (argsort)
//
......@@ -153,7 +182,8 @@ template<typename It, typename T2, typename Comp>
T2 num=end-begin;
idx.resize(num);
for (T2 i=0; i<num; ++i) idx[i] = i;
sort (idx.begin(),idx.end(),IdxComp__<It,Comp>(begin,comp));
// sort (idx.begin(),idx.end(),IdxComp__<It,Comp>(begin,comp));
stable_sort (idx.begin(),idx.end(),IdxComp__<It,Comp>(begin,comp));
}
/*! Performs an indirect sort on the supplied iterator range and returns in
......@@ -244,12 +274,16 @@ void legendre_prep(int n, vector<double> &x, vector<double> &w)
// Start of real gridder functionality
//
using a_i_c = py::array_t<int, py::array::c_style | py::array::forcecast>;
using a_d_c = py::array_t<double, py::array::c_style | py::array::forcecast>;
using a_c_c = py::array_t<complex<double>,
py::array::c_style | py::array::forcecast>;
template<typename T>
using pyarr = py::array_t<T>;
template<typename T>
using pyarr_c = py::array_t<T, py::array::c_style | py::array::forcecast>;
using a_u32_c = pyarr_c<uint32_t>;
using a_d_c = pyarr_c<double>;
using a_f_c = pyarr_c<float>;
using a_cd_c = pyarr_c<complex<double>>;
a_i_c peanoindex(const a_d_c &uv_, int nu, int nv)
a_u32_c peanoindex(const a_d_c &uv_, int nu, int nv)
{
myassert(uv_.ndim()==2, "uv array must be 2D");
myassert(uv_.shape(1)==2, "uv.shape[1] must be 2");
......@@ -271,7 +305,7 @@ a_i_c peanoindex(const a_d_c &uv_, int nu, int nv)
vector<int> newind;
buildIndex(ipeano.begin(), ipeano.end(), newind);
int odim[] = {nvis};
a_i_c res(odim);
a_u32_c res(odim);
auto iout = res.mutable_data();
for (int i=0; i<nvis; ++i)
iout[i] = newind[i];
......@@ -430,7 +464,7 @@ class ReadHelper: public Helper
}
};
a_d_c complex2hartley (const a_c_c &grid_)
a_d_c complex2hartley (const a_cd_c &grid_)
{
myassert(grid_.ndim()==2, "grid array must be 2D");
int nu = grid_.shape(0), nv = grid_.shape(1);
......@@ -454,7 +488,7 @@ a_d_c complex2hartley (const a_c_c &grid_)
return res;
}
a_d_c to_grid (const a_d_c &uv_, const a_c_c &vis_,
a_d_c to_grid (const a_d_c &uv_, const a_cd_c &vis_,
int nu, int nv, double epsilon)
{
myassert(uv_.ndim()==2, "uv array must be 2D");
......@@ -466,7 +500,7 @@ a_d_c to_grid (const a_d_c &uv_, const a_c_c &vis_,
auto vis = vis_.data();
int odim[] = {nu,nv};
a_c_c res(odim);
a_cd_c res(odim);
auto grid = res.mutable_data();
for (int i=0; i<nu*nv; ++i) grid[i] = 0.;
......@@ -497,14 +531,14 @@ a_d_c to_grid (const a_d_c &uv_, const a_c_c &vis_,
return complex2hartley(res);
}
a_c_c hartley2complex (const a_d_c &grid_)
a_cd_c hartley2complex (const a_d_c &grid_)
{
myassert(grid_.ndim()==2, "grid array must be 2D");
int nu = grid_.shape(0), nv = grid_.shape(1);
auto grid = grid_.data();
int odim[] = {nu,nv};
a_c_c res(odim);
a_cd_c res(odim);
auto grid2 = res.mutable_data();
for (int u=0; u<nu; ++u)
{
......@@ -522,7 +556,7 @@ a_c_c hartley2complex (const a_d_c &grid_)
return res;
}
a_c_c from_grid (const a_d_c &uv_, const a_d_c &grid0_, double epsilon)
a_cd_c from_grid (const a_d_c &uv_, const a_d_c &grid0_, double epsilon)
{
myassert(uv_.ndim()==2, "uv array must be 2D");
myassert(uv_.shape(1)==2, "uv.shape[1] must be 2");
......@@ -536,7 +570,7 @@ a_c_c from_grid (const a_d_c &uv_, const a_d_c &grid0_, double epsilon)
nv=grid_.shape(1);
int odim[] = {nvis};
a_c_c res(odim);
a_cd_c res(odim);
auto vis = res.mutable_data();
// Loop over sampling points
......@@ -595,13 +629,164 @@ a_d_c correction_factors (size_t n, size_t nval, double epsilon)
return res;
}
class GridderConfig
{
private:
size_t nx_dirty, ny_dirty;
double epsilon, ucorr, vcorr;
size_t nx_grid, ny_grid;
size_t peano_level;
public:
GridderConfig(size_t nxdirty, size_t nydirty, double epsilon_,
double urange, double vrange)
: nx_dirty(nxdirty), ny_dirty(nydirty), epsilon(epsilon_),
ucorr(1./urange), vcorr(1./vrange),
nx_grid(2*nx_dirty), ny_grid(2*ny_dirty),
peano_level(bits_needed(max(nx_grid, ny_grid)))
{
myassert((nx_dirty&1)==0, "nx_dirty must be even");
myassert((ny_dirty&1)==0, "ny_dirty must be even");
myassert(epsilon>0, "epsilon must be positive");
myassert(urange>0, "urange must be positive");
myassert(vrange>0, "vrange must be positive");
}
template<typename Tcoord> void coord2cell(const Tcoord &coord,
size_t &iu, size_t &iv, double &fu, double &fv) const
{
}
template<typename Tcoord> size_t coord2peano(const Tcoord &coord) const
{
double u=fmodulo(coord.u*ucorr, 1.)*nx_grid,
v=fmodulo(coord.v*vcorr, 1.)*ny_grid;
auto iu = min(nx_grid-1, size_t(u));
auto iv = min(ny_grid-1, size_t(v));
return morton2peano2D_32(coord2morton2D_32(iu,iv),peano_level);
}
template<typename Ti, typename Tbase> pyarr_c<Ti> reorder_indices
(const pyarr_c<Ti> &idx, const Tbase &baselines) const
{
myassert(idx.ndim()==1, "need 1D index array");
vector<size_t> peano(idx.shape(0));
auto pidx = idx.data();
for (size_t i=0; i<peano.size(); ++i)
peano[i] = coord2peano(baselines.EffectiveCoord(pidx[i]));
vector<size_t> newind;
buildIndex(peano.begin(), peano.end(), newind);
peano=vector<size_t>(); // deallocate
int odim[] = {idx.shape(0)};
pyarr_c<Ti> res(odim);
auto iout = res.mutable_data();
for (int i=0; i<idx.shape(0); ++i)
iout[i] = pidx[newind[i]];
return res;
}
};
template<typename T> struct UV
{
using dtype = T;
T u, v;
UV () : u(T(0)), v(T(0)) {}
UV (T u_, T v_) : u(u_), v(v_) {}
UV operator* (double fct) const
{ return UV(u*fct, v*fct); }
};
template<typename T> struct UVW
{
using dtype = T;
T u, v, w;
UVW () : u(T(0)), v(T(0)), w(T(0)) {}
UVW (T u_, T v_, T w_) : u(u_), v(v_), w(w_) {}
UVW operator* (double fct) const
{ return UVW(u*fct, v*fct, w*fct); }
};
template<typename Ti, typename Tcoord, typename Ts> class Baselines
{
static_assert(is_integral<Ti>::value, "Ti must be an integral type");
static_assert(is_unsigned<Ti>::value, "Ti must be an unsigned type");
private:
vector<Tcoord> coord;
vector<Ts> scaling;
size_t nrows;
size_t channelbits, channelmask;
public:
Baselines(const vector<Tcoord> &coord_, const vector<Ts> &scaling_)
: coord(coord_), scaling(scaling_), nrows(coord.size()/scaling.size())
{
myassert(nrows*scaling.size()==coord.size(), "bad array dimensions");
channelbits = bits_needed(scaling.size());
channelmask = (size_t(1)<<channelbits)-1;
auto rowbits = bits_needed(nrows);
myassert(rowbits+channelbits<=8*sizeof(Ti), "Ti too small");
}
Baselines(const pyarr_c<typename Tcoord::dtype> &coord_,
const pyarr_c<Ts> &scaling_)
{
myassert(coord_.ndim()==2, "coord array must be 2D");
myassert(coord_.shape(1)==2, "coord.shape[1] must be 2");
myassert(scaling_.ndim()==1, "scaling array must be 1D");
nrows = coord_.shape(0)/scaling_.shape(0);
myassert(nrows*size_t(scaling_.shape(0))==size_t(coord_.shape(0)),
"bad array dimensions");
scaling.resize(scaling_.shape(0));
for (size_t i=0; i<scaling.size(); ++i)
scaling[i] = scaling_.data()[i];
coord.resize(nrows);
for (size_t i=0; i<coord.size(); ++i)
coord[i] = UV<double>(coord_.data()[2*i], coord_.data()[2*i+1]);
channelbits = bits_needed(scaling.size());
channelmask = (size_t(1)<<channelbits)-1;
auto rowbits = bits_needed(nrows);
myassert(rowbits+channelbits<=8*sizeof(Ti), "Ti too small");
}
pyarr_c<Ti> getIndices() const
{
int odim[] = {int(nrows*scaling.size())};
pyarr_c<Ti> res(odim);
auto odata = res.mutable_data();
for (size_t i=0; i<nrows; ++i)
for (size_t j=0; j<scaling.size(); ++j)
odata[j+scaling.size()*i] = (i<<channelbits)+j;
return res;
}
Tcoord EffectiveCoord(Ti index) const
{ return coord[index>>channelbits]*scaling[index&channelmask]; }
};
using Baselines_i32_uv_d = Baselines<uint32_t, UV<double>, double>;
template<typename Tbase> void blah(const Tbase &baselines)
{
auto x=baselines.getIndices();
auto data = x.data();
auto res = baselines.EffectiveCoord(data[0]);
cout << res.u << " " << res.v << " " << endl;
}
} // unnamed namespace
PYBIND11_MODULE(nifty_gridder, m)
{
using namespace pybind11::literals;
m.def("peanoindex",&peanoindex);
m.def("get_w",&get_w);
m.def("to_grid",&to_grid);
m.def("from_grid",&from_grid);
m.def("correction_factors",&correction_factors);
py::class_<Baselines_i32_uv_d> (m, "Baselines_i32_uv_d")
.def(py::init<a_d_c, a_d_c>(), "coord"_a, "scaling"_a)
.def("getIndices", &Baselines_i32_uv_d::getIndices);
py::class_<GridderConfig> (m, "GridderConfig")
.def(py::init<size_t, size_t, double, double, double>(),"nxdirty"_a,
"nydirty"_a, "epsilon"_a, "urange"_a, "vrange"_a)
.def("reorder_indices", &GridderConfig::reorder_indices<uint32_t, Baselines_i32_uv_d>,
"idx"_a, "baselines"_a);
m.def ("blah",&blah<Baselines_i32_uv_d>);
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment