Commit ec85bf2d authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge remote-tracking branch 'origin/ducc0' into quaternions

parents e6068f0e d9865983
Pipeline #77357 passed with stages
in 13 minutes and 14 seconds
......@@ -44,7 +44,9 @@ using namespace std;
class Alm_Base
{
protected:
size_t lmax, mmax, tval;
size_t lmax, arrsize;
vector<size_t> mval;
vector<ptrdiff_t> mstart;
public:
/*! Returns the total number of coefficients for maximum quantum numbers
......@@ -55,28 +57,79 @@ class Alm_Base
return ((m+1)*(m+2))/2 + (m+1)*(l-m);
}
Alm_Base (size_t lmax_, const vector<size_t> &mval_,
const vector<ptrdiff_t> &mstart_)
: lmax(lmax_), mval(mval_)
{
MR_assert(mval.size()>0, "no m indices supplied");
MR_assert(mstart_.size()==mval.size(), "mval and mstart have different sizes");
for (size_t i=0; i<mval.size(); ++i)
{
MR_assert(mval[i]<=lmax, "m >= lmax");
if (i>0)
MR_assert(mval[i]>mval[i-1], "m not strictly ascending");
}
mstart.resize(mval.back()+1, -2*lmax);
arrsize=0;
for (size_t i=0; i<mval.size(); ++i)
{
mstart[mval[i]] = mstart_[i];
arrsize = size_t(max(ptrdiff_t(arrsize), mstart_[i]+ptrdiff_t(lmax+1)));
}
}
Alm_Base (size_t lmax_, const vector<size_t> &mval_)
: lmax(lmax_), mval(mval_)
{
MR_assert(mval.size()>0, "no m indices supplied");
for (size_t i=0; i<mval.size(); ++i)
{
MR_assert(mval[i]<=lmax, "m >= lmax");
if (i>0)
MR_assert(mval[i]>mval[i-1], "m not strictly ascending");
}
mstart.resize(mval.back()+1, -2*lmax);
for (size_t i=0, cnt=0; i<mval.size(); ++i, cnt+=lmax-mval[i]+1)
mstart[mval[i]] = ptrdiff_t(cnt)-ptrdiff_t(mval[i]);
arrsize = size_t(mstart.back()+ptrdiff_t(lmax+1));
}
/*! Constructs an Alm_Base object with given \a lmax and \a mmax. */
Alm_Base (size_t lmax_=0, size_t mmax_=0)
: lmax(lmax_), mmax(mmax_), tval(2*lmax+1) {}
Alm_Base (size_t lmax_, size_t mmax_)
: lmax(lmax_), mval(mmax_+1), mstart(mmax_+1)
{
ptrdiff_t idx = 0;
for (size_t m=0; m<=mmax_; ++m)
{
mval[m] = m;
mstart[m] = idx-m;
idx += lmax-m+1;
}
arrsize = Num_Alms(lmax_, mmax_);
}
/*! Returns the maximum \a l */
size_t Lmax() const { return lmax; }
/*! Returns the maximum \a m */
size_t Mmax() const { return mmax; }
size_t Mmax() const { return mval.back(); }
size_t n_entries() const { return arrsize; }
/*! Returns an array index for a given m, from which the index of a_lm
can be obtained by adding l. */
size_t index_l0 (size_t m) const
{ return ((m*(tval-m))>>1); }
{ return mstart[m]; }
/*! Returns the array index of the specified coefficient. */
size_t index (size_t l, size_t m) const
{ return index_l0(m) + l; }
/*! Returns \a true, if both objects have the same \a lmax and \a mmax,
else \a false. */
bool conformable (const Alm_Base &other) const
{ return ((lmax==other.lmax) && (mmax==other.mmax)); }
bool conformable(const Alm_Base &other) const
{
return (lmax==other.lmax) && (mval==other.mval) && (mstart==other.mstart);
}
bool complete() const
{ return mval.size() == lmax+1; }
};
/*! Class for storing spherical harmonic coefficients. */
......@@ -87,21 +140,21 @@ template<typename T> class Alm: public Alm_Base
template<typename Func> void applyLM(Func func)
{
for (size_t m=0; m<=mmax; ++m)
for (auto m: mval)
for (size_t l=m; l<=lmax; ++l)
func(l,m,alm.v(l,m));
func(l,m,alm.v(index(l,m)));
}
public:
/*! Constructs an Alm object with given \a lmax and \a mmax. */
Alm (mav<T,1> &data, size_t lmax_, size_t mmax_)
: Alm_Base(lmax_, mmax_), alm(data)
{ MR_assert(alm.size()==Num_Alms(lmax, mmax), "bad array size"); }
{ MR_assert(alm.size()==Num_Alms(lmax, mmax_), "bad array size"); }
Alm (const mav<T,1> &data, size_t lmax_, size_t mmax_)
: Alm_Base(lmax_, mmax_), alm(data)
{ MR_assert(alm.size()==Num_Alms(lmax, mmax), "bad array size"); }
{ MR_assert(alm.size()==Num_Alms(lmax, mmax_), "bad array size"); }
Alm (size_t lmax_=0, size_t mmax_=0)
: Alm_Base(lmax_,mmax_), alm ({Num_Alms(lmax,mmax)}) {}
: Alm_Base(lmax_,mmax_), alm ({Num_Alms(lmax,mmax_)}) {}
/*! Sets all coefficients to zero. */
void SetToZero ()
......@@ -120,13 +173,16 @@ template<typename T> class Alm: public Alm_Base
/*! \a a(l,m) *= \a factor[m] for all \a l,m. */
template<typename T2> void ScaleM (const mav<T2,1> &factor)
{
MR_assert(factor.size()>size_t(mmax),
MR_assert(factor.size()>size_t(Mmax()),
"alm.ScaleM: factor array too short");
applyLM([&factor](size_t /*l*/, size_t m, T &v){v*=factor(m);});
}
/*! Adds \a num to a_00. */
template<typename T2> void Add (const T2 &num)
{ alm.v(0)+=num; }
{
MR_assert(mval[0]==0, "cannot add a constant: no m=0 mode present");
alm.v(index_l0(0))+=num;
}
/*! Returns a reference to the specified coefficient. */
T &operator() (size_t l, size_t m)
......@@ -229,8 +285,7 @@ template<typename T> void rotate_alm (Alm<complex<T>> &alm,
double psi, double theta, double phi)
{
auto lmax=alm.Lmax();
MR_assert (lmax==alm.Mmax(),
"rotate_alm: lmax must be equal to mmax");
MR_assert (alm.complete(), "rotate_alm: need complete A_lm set");
if (theta!=0)
{
......
......@@ -34,6 +34,7 @@
#include "ducc0/infra/threading.h"
#include "ducc0/infra/useful_macros.h"
#include "ducc0/infra/mav.h"
#include "ducc0/infra/simd.h"
#include "ducc0/math/es_kernel.h"
namespace ducc0 {
......@@ -536,8 +537,11 @@ template<typename T, typename T2=complex<T>> class Helper
public:
const T2 *p0r;
T2 *p0w;
T kernel[64] DUCC0_ALIGNED(64);
static constexpr size_t vlen=native_simd<T>::size();
union {
T scalar[64];
native_simd<T> simd[64/vlen];
} kernel;
Helper(const GridderConfig &gconf_, const T2 *grid_r_, T2 *grid_w_,
vector<std::mutex> &locks_, double w0_=-1, double dw_=-1)
......@@ -551,13 +555,16 @@ template<typename T, typename T2=complex<T>> class Helper
w0(w0_),
xdw(T(1)/dw_),
nexp(2*supp + do_w_gridding),
nvecs(vlen*((nexp+vlen-1)/vlen)),
nvecs((nexp+vlen-1)/vlen),
locks(locks_)
{}
{
MR_assert(2*supp+1<=64, "support too large");
for (auto &v: kernel.simd) v=0;
}
~Helper() { if (grid_w) dump(); }
int lineJump() const { return sv; }
T Wfac() const { return kernel[2*supp]; }
T Wfac() const { return kernel.scalar[2*supp]; }
void prep(const UVW &in)
{
double u, v;
......@@ -567,20 +574,13 @@ template<typename T, typename T2=complex<T>> class Helper
double y0 = xsupp*(iv0-v);
for (int i=0; i<supp; ++i)
{
kernel[i ] = T(x0+i*xsupp);
kernel[i+supp] = T(y0+i*xsupp);
kernel.scalar[i ] = T(x0+i*xsupp);
kernel.scalar[i+supp] = T(y0+i*xsupp);
}
if (do_w_gridding)
kernel[2*supp] = min(T(1), T(xdw*xsupp*abs(w0-in.w)));
for (size_t i=nexp; i<nvecs; ++i)
kernel[i]=0;
for (size_t i=0; i<nvecs; ++i)
{
kernel[i] = T(1) - kernel[i]*kernel[i];
kernel[i] = (kernel[i]<0) ? T(-200.) : beta*(sqrt(kernel[i])-T(1));
}
kernel.scalar[2*supp] = T(xdw*xsupp*abs(w0-in.w));
for (size_t i=0; i<nvecs; ++i)
kernel[i] = exp(kernel[i]);
kernel.simd[i] = esk(kernel.simd[i], beta);
if ((iu0<bu0) || (iv0<bv0) || (iu0+supp>bu0+su) || (iv0+supp>bv0+sv))
{
if (grid_w) { dump(); fill(wbuf.begin(), wbuf.end(), T(0)); }
......@@ -681,8 +681,8 @@ template<typename T, typename Serv> void x2grid_c
{
Helper<T> hlp(gconf, nullptr, grid.vdata(), locks, w0, dw);
int jump = hlp.lineJump();
const T * DUCC0_RESTRICT ku = hlp.kernel;
const T * DUCC0_RESTRICT kv = hlp.kernel+supp;
const T * DUCC0_RESTRICT ku = hlp.kernel.scalar;
const T * DUCC0_RESTRICT kv = hlp.kernel.scalar+supp;
while (auto rng=sched.getNext()) for(auto ipart=rng.lo; ipart<rng.hi; ++ipart)
{
......@@ -729,8 +729,8 @@ template<typename T, typename Serv> void grid2x_c
{
Helper<T> hlp(gconf, grid.data(), nullptr, locks, w0, dw);
int jump = hlp.lineJump();
const T * DUCC0_RESTRICT ku = hlp.kernel;
const T * DUCC0_RESTRICT kv = hlp.kernel+supp;
const T * DUCC0_RESTRICT ku = hlp.kernel.scalar;
const T * DUCC0_RESTRICT kv = hlp.kernel.scalar+supp;
while (auto rng=sched.getNext()) for(auto ipart=rng.lo; ipart<rng.hi; ++ipart)
{
......
......@@ -220,9 +220,13 @@ template<typename T> class Interpolator
mapper[itheta*ncp+iphi].push_back(i);
}
size_t cnt=0;
for (const auto &vec: mapper)
for (auto &vec: mapper)
{
for (auto i:vec)
idx[cnt++] = i;
vec.clear();
vec.shrink_to_fit();
}
return idx;
}
......@@ -434,7 +438,14 @@ template<typename T> class Interpolator
auto idx = getIdx(ptg);
execStatic(idx.size(), nthreads, 0, [&](Scheduler &sched)
{
vector<T> wt(supp), wp(supp);
union {
native_simd<T> simd[64/vl];
T scalar[64];
} kdata;
T *wt(kdata.scalar), *wp(kdata.scalar+supp);
size_t nvec = (2*supp+vl-1)/vl;
for (auto &v: kdata.simd) v=0;
MR_assert(2*supp<=64, "support too large");
vector<T> psiarr(2*kmax+1);
#ifdef SIMD_INTERPOL
vector<native_simd<T>> psiarr2((2*kmax+1+vl-1)/vl);
......@@ -446,11 +457,13 @@ template<typename T> class Interpolator
T f0=T(0.5*supp+ptg(i,0)*xdtheta);
size_t i0 = size_t(f0+T(1));
for (size_t t=0; t<supp; ++t)
wt[t] = kernel((t+i0-f0)*delta - 1);
wt[t] = (t+i0-f0)*delta - 1;
T f1=T(0.5)*supp+ptg(i,1)*xdphi;
size_t i1 = size_t(f1+1.);
for (size_t t=0; t<supp; ++t)
wp[t] = kernel((t+i1-f1)*delta - 1);
wp[t] = (t+i1-f1)*delta - 1;
for (size_t t=0; t<nvec; ++t)
kdata.simd[t] = kernel(kdata.simd[t]);
psiarr[0]=1.;
double psi=ptg(i,2);
double cpsi=cos(psi), spsi=sin(psi);
......@@ -484,25 +497,25 @@ template<typename T> class Interpolator
{
#ifdef SPECIAL_CASING
case 1:
interpol_help0<1,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<1,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 2:
interpol_help0<2,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<2,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 3:
interpol_help0<3,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<3,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 4:
interpol_help0<4,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<4,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 5:
interpol_help0<5,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<5,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 6:
interpol_help0<6,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<6,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 7:
interpol_help0<7,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<7,1>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
#endif
default:
......@@ -549,25 +562,25 @@ template<typename T> class Interpolator
{
#ifdef SPECIAL_CASING
case 1:
interpol_help0<1,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<1,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 2:
interpol_help0<2,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<2,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 3:
interpol_help0<3,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<3,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 4:
interpol_help0<4,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<4,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 5:
interpol_help0<5,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<5,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 6:
interpol_help0<6,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<6,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
case 7:
interpol_help0<7,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), res, i);
interpol_help0<7,3>(wt, wp, p, d0, d1, psiarr2.data(), res, i);
break;
#endif
default:
......@@ -629,7 +642,14 @@ template<typename T> class Interpolator
execStatic(idx.size(), nthreads, 0, [&](Scheduler &sched)
{
size_t b_theta=99999999999999, b_phi=9999999999999999;
vector<T> wt(supp), wp(supp);
union {
native_simd<T> simd[64/vl];
T scalar[64];
} kdata;
T *wt(kdata.scalar), *wp(kdata.scalar+supp);
size_t nvec = (2*supp+vl-1)/vl;
for (auto &v: kdata.simd) v=0;
MR_assert(2*supp<=64, "support too large");
vector<T> psiarr(2*kmax+1);
#ifdef SIMD_INTERPOL
vector<native_simd<T>> psiarr2((2*kmax+1+vl-1)/vl);
......@@ -641,11 +661,13 @@ template<typename T> class Interpolator
T f0=T(0.5)*supp+ptg(i,0)*xdtheta;
size_t i0 = size_t(f0+1.);
for (size_t t=0; t<supp; ++t)
wt[t] = kernel((t+i0-f0)*delta - 1);
wt[t] = (t+i0-f0)*delta - 1;
T f1=T(0.5)*supp+ptg(i,1)*xdphi;
size_t i1 = size_t(f1+1.);
for (size_t t=0; t<supp; ++t)
wp[t] = kernel((t+i1-f1)*delta - 1);
wp[t] = (t+i1-f1)*delta - 1;
for (size_t t=0; t<nvec; ++t)
kdata.simd[t] = kernel(kdata.simd[t]);
psiarr[0]=1.;
double psi=ptg(i,2);
double cpsi=cos(psi), spsi=sin(psi);
......@@ -696,25 +718,25 @@ template<typename T> class Interpolator
{
#ifdef SPECIAL_CASING
case 1:
deinterpol_help0<1,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<1,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 2:
deinterpol_help0<2,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<2,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 3:
deinterpol_help0<3,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<3,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 4:
deinterpol_help0<4,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<4,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 5:
deinterpol_help0<5,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<5,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 6:
deinterpol_help0<6,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<6,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 7:
deinterpol_help0<7,1>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<7,1>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
#endif
default:
......@@ -759,25 +781,25 @@ template<typename T> class Interpolator
{
#ifdef SPECIAL_CASING
case 1:
deinterpol_help0<1,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<1,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 2:
deinterpol_help0<2,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<2,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 3:
deinterpol_help0<3,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<3,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 4:
deinterpol_help0<4,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<4,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 5:
deinterpol_help0<5,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<5,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 6:
deinterpol_help0<6,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<6,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
case 7:
deinterpol_help0<7,3>(wt.data(), wp.data(), p, d0, d1, psiarr2.data(), data, i);
deinterpol_help0<7,3>(wt, wp, p, d0, d1, psiarr2.data(), data, i);
break;
#endif
default:
......
......@@ -25,6 +25,7 @@
#include <cmath>
#include <vector>
#include <array>
#include "ducc0/infra/simd.h"
#include "ducc0/infra/error_handling.h"
#include "ducc0/infra/threading.h"
#include "ducc0/math/constants.h"
......@@ -36,6 +37,28 @@ namespace detail_es_kernel {
using namespace std;
template<typename T> T esk (T v, T beta)
{
auto tmp = (1-v)*(1+v);
auto tmp2 = tmp>=0;
return tmp2*exp(T(beta)*(sqrt(tmp*tmp2)-1));
}
template<typename T> class exponator
{
public:
T operator()(T val) { return exp(val); }
};
template<typename T> native_simd<T> esk (native_simd<T> v, T beta)
{
auto tmp = (T(1)-v)*(T(1)+v);
auto mask = tmp<T(0);
where(mask,tmp)*=T(0);
auto res = (beta*(sqrt(tmp)-T(1))).apply(exponator<T>());
where (mask,res)*=T(0);
return res;
}
/* This class implements the "exponential of semicircle" gridding kernel
described in https://arxiv.org/abs/1808.06736 */
class ES_Kernel
......@@ -62,11 +85,11 @@ class ES_Kernel
: ES_Kernel(supp_, 2., nthreads){}
template<typename T> T operator()(T v) const
{
auto tmp = (1-v)*(1+v);
auto tmp2 = tmp>=0;
return tmp2*exp(T(beta)*(sqrt(tmp*tmp2)-1));
}
{ return esk(v, T(beta)); }
template<typename T> native_simd<T> operator()(native_simd<T> v) const
{ return esk(v, T(beta)); }
/* Compute correction factors for the ES gridding kernel
This implementation follows eqs. (3.8) to (3.10) of Barnett et al. 2018 */
double corfac(double v) const
......@@ -138,6 +161,7 @@ class ES_Kernel
}
using detail_es_kernel::esk;
using detail_es_kernel::ES_Kernel;
}
......
/*
* This file is part of the MR utility library.
*
* This code is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This code is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this code; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Copyright (C) 2019-2020 Max-Planck-Society
Author: Nathan Reed */
#ifndef DUCC0_ENUMERATE_H
#define DUCC0_ENUMERATE_H
#include <tuple>
namespace mr {
// copied from http://reedbeta.com/blog/python-like-enumerate-in-cpp17/
template <typename T,
typename TIter = decltype(std::begin(std::declval<T>())),
typename = decltype(std::end(std::declval<T>()))>
constexpr auto enumerate(T && iterable)
{
struct iterator
{
size_t i;
TIter iter;
bool operator != (const iterator & other) const { return iter != other.iter; }
void operator ++ () { ++i; ++iter; }
auto operator * () const { return std::tie(i, *iter); }
};
struct iterable_wrapper
{
T iterable;
auto begin() { return iterator{ 0, std::begin(iterable) }; }
auto end() { return iterator{ 0, std::end(iterable) }; }
};
return iterable_wrapper{ std::forward<T>(iterable) };
}
}
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment