Commit 200cbe53 authored by Martin Reinecke's avatar Martin Reinecke

move global variables into static functions

parent 2c80bd37
......@@ -492,9 +492,8 @@ namespace threading {
#ifdef POCKETFFT_NO_MULTITHREADING
constexpr size_t thread_id = 0;
constexpr size_t num_threads = 1;
constexpr size_t max_threads = 1;
constexpr size_t thread_id() { return 0; }
constexpr size_t num_threads() { return 1; }
template <typename Func>
void thread_map(size_t /* nthreads */, Func f)
......@@ -502,8 +501,16 @@ void thread_map(size_t /* nthreads */, Func f)
#else
thread_local size_t thread_id = 0;
thread_local size_t num_threads = 1;
size_t &thread_id()
{
static thread_local size_t thread_id_=0;
return thread_id_;
}
size_t &num_threads()
{
static thread_local size_t num_threads_=1;
return num_threads_;
}
static const size_t max_threads = max(1u, thread::hardware_concurrency());
class latch
......@@ -669,8 +676,8 @@ void thread_map(size_t nthreads, Func f)
{
pool.submit(
[&f, &counter, &ex, &ex_mut, i, nthreads] {
thread_id = i;
num_threads = nthreads;
thread_id() = i;
num_threads() = nthreads;
try { f(); }
catch (...)
{
......@@ -2760,10 +2767,10 @@ template<size_t N> class multi_iter
str_i(iarr.stride(idim_)), p_oi(0), str_o(oarr.stride(idim_)),
idim(idim_), rem(iarr.size()/iarr.shape(idim))
{
auto nshares = threading::num_threads;
auto nshares = threading::num_threads();
if (nshares==1) return;
if (nshares==0) throw runtime_error("can't run with zero threads");
auto myshare = threading::thread_id;
auto myshare = threading::thread_id();
if (myshare>=nshares) throw runtime_error("impossible share requested");
size_t nbase = rem/nshares;
size_t additional = rem%nshares;
......
......@@ -373,7 +373,7 @@ py::array genuine_hartley(const py::array &in, const py::object &axes_,
}
// Export good_size in raw C-API to reduce overhead (~4x faster)
PyObject * good_size(PyObject * self, PyObject * args)
PyObject * good_size(PyObject * /*self*/, PyObject * args)
{
Py_ssize_t n_ = -1;
int real = false;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment