Commit ffa0a769 authored by Cristian Lalescu's avatar Cristian Lalescu
Browse files

Merge branch 'bugfix/issue38' into develop

parents 4438759b c6cc3649
Pipeline #101874 passed with stages
in 10 minutes and 20 seconds
...@@ -32,18 +32,30 @@ ...@@ -32,18 +32,30 @@
#include <cstdio> #include <cstdio>
#include <omp.h> #include <omp.h>
class lock_free_bool_array{ class lock_free_bool_array{
static const int Available = 0; static const int Available = 0;
static const int Busy = 1; static const int Busy = 1;
static const int NoOwner = -1; static const int NoOwner = -1;
#ifdef __INTEL_COMPILER
struct Locker {
Locker(){
omp_init_nest_lock(&lock);
}
~Locker(){
omp_destroy_nest_lock(&lock);
}
omp_nest_lock_t lock;
};
#else
struct Locker { struct Locker {
Locker() : lock(Available), ownerId(NoOwner), counter(0) {} Locker() : lock(Available), ownerId(NoOwner), counter(0) {
}
std::atomic_int lock; std::atomic_int lock;
std::atomic_int ownerId; std::atomic_int ownerId;
int counter; int counter;
}; };
#endif
std::vector<std::unique_ptr<Locker>> keys; std::vector<std::unique_ptr<Locker>> keys;
...@@ -55,14 +67,39 @@ public: ...@@ -55,14 +67,39 @@ public:
k.reset(new Locker()); k.reset(new Locker());
} }
} }
#ifndef NDEBUG
~lock_free_bool_array(){
for(auto& k : keys){
#ifdef __INTEL_COMPILER
#else
assert(k->lock.load() == Available);
assert(k->ownerId.load() == NoOwner);
#endif
}
}
#endif
#ifdef __INTEL_COMPILER
void lock(const long int inKey){
Locker* k = keys[inKey%keys.size()].get();
omp_set_nest_lock(&k->lock);
}
void unlock(const long int inKey){
Locker* k = keys[inKey%keys.size()].get();
omp_unset_nest_lock(&k->lock);
}
#else
void lock(const long int inKey){ void lock(const long int inKey){
Locker* k = keys[inKey%keys.size()].get(); Locker* k = keys[inKey%keys.size()].get();
if(k->ownerId.load() != omp_get_thread_num()){ if(k->ownerId.load() != omp_get_thread_num()){
int localBusy = Busy;// Intel complains if we pass a const as last param
int expected = Available; int expected = Available;
while(!std::atomic_compare_exchange_strong(&k->lock, &expected, Busy)){ while(!std::atomic_compare_exchange_strong(&k->lock, &expected, localBusy)){
usleep(1); usleep(1);
expected = Available;
} }
assert(k->ownerId.load() == NoOwner);
k->ownerId.store(omp_get_thread_num()); k->ownerId.store(omp_get_thread_num());
k->counter = 0; // must remain k->counter = 0; // must remain
} }
...@@ -83,6 +120,7 @@ public: ...@@ -83,6 +120,7 @@ public:
k->lock.store(Available); k->lock.store(Available);
} }
} }
#endif
}; };
#endif #endif
...@@ -231,7 +231,9 @@ public: ...@@ -231,7 +231,9 @@ public:
const long double field_section_width_z = spatial_box_width[IDXC_Z]/(long double)(field_grid_dim[IDXC_Z]); const long double field_section_width_z = spatial_box_width[IDXC_Z]/(long double)(field_grid_dim[IDXC_Z]);
const long int limite = static_cast<long int>((field_section_width_z*(long double)(partition_interval_offset_per_proc[dest_proc+1]) const long int limite = static_cast<long int>((field_section_width_z*(long double)(partition_interval_offset_per_proc[dest_proc+1])
- std::numeric_limits<long double>::epsilon())/cutoff_radius); - std::numeric_limits<long double>::epsilon())/cutoff_radius);
if(limite == nb_cell_levels[IDXC_Z]){ if(static_cast<long double>(limite)*cutoff_radius
== field_section_width_z*(long double)(partition_interval_offset_per_proc[dest_proc+1])
|| limite == nb_cell_levels[IDXC_Z]){
return limite-1; return limite-1;
} }
return limite; return limite;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment