* relaxed atomic_integral

* re-enabled gc
* optimized mco calculation
* added heuristic to *reduce* mco at insertion time
* fixed signedness bug at update_size_ctrl
This commit is contained in:
joaquintides
2023-12-25 12:48:04 +01:00
parent 71fb7636c5
commit a68b92d1cd

View File

@@ -203,17 +203,6 @@ private:
template<typename Integral> template<typename Integral>
struct atomic_integral struct atomic_integral
{ {
#if defined(BOOST_UNORDERED_LATCH_FREE)
operator Integral()const{return n.load(std::memory_order_acquire);}
void operator=(Integral m){n.store(m,std::memory_order_release);}
void operator|=(Integral m){n.fetch_or(m);}
void operator&=(Integral m){n.fetch_and(m);}
atomic_integral& operator=(atomic_integral const& rhs) {
n.store(rhs.n.load());
return *this;
}
#else
operator Integral()const{return n.load(std::memory_order_relaxed);} operator Integral()const{return n.load(std::memory_order_relaxed);}
void operator=(Integral m){n.store(m,std::memory_order_relaxed);} void operator=(Integral m){n.store(m,std::memory_order_relaxed);}
void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);} void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);}
@@ -223,7 +212,6 @@ struct atomic_integral
n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed); n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed);
return *this; return *this;
} }
#endif
std::atomic<Integral> n; std::atomic<Integral> n;
}; };
@@ -880,15 +868,14 @@ public:
if(f(cast_for(group_shared{},type_policy::value_from(*p)))){ if(f(cast_for(group_shared{},type_policy::value_from(*p)))){
// TODO: prove no ABA // TODO: prove no ABA
auto pc=reinterpret_cast<unsigned char*>(pg)+n; auto pc=reinterpret_cast<unsigned char*>(pg)+n;
auto mco=group_type::maybe_caused_overflow(pc);
if(reinterpret_cast<std::atomic<unsigned char>*>(pc)->exchange(1)!=1){ if(reinterpret_cast<std::atomic<unsigned char>*>(pc)->exchange(1)!=1){
#if 1 #if 0
auto& v=local_garbage_vector(); auto& v=local_garbage_vector();
--v.size; --v.size;
v.mcos+=mco; v.mcos+=!pg->is_not_overflowed(hash);
pg->reset(n); pg->reset(n);
#else #else
retire_element(p,mco); retire_element(p,!pg->is_not_overflowed(hash));
#endif #endif
res=1; res=1;
} }
@@ -1621,7 +1608,9 @@ private:
} }
pg->set(n,hash); pg->set(n,hash);
insert_counter(pos0)=counter+2; insert_counter(pos0)=counter+2;
++local_garbage_vector().size; auto& v=local_garbage_vector();
++v.size;
v.mcos-=!pg->is_not_overflowed(hash);
return 1; return 1;
} }
if(!pbn--)return -1; if(!pbn--)return -1;
@@ -1957,7 +1946,7 @@ private:
std::atomic<std::size_t> rpos=0; std::atomic<std::size_t> rpos=0;
std::atomic<bool> reading=false; std::atomic<bool> reading=false;
std::atomic<ssize_t> size=0; std::atomic<ssize_t> size=0;
std::atomic<std::size_t> mcos=0; std::atomic<ssize_t> mcos=0;
}; };
static constexpr std::size_t default_max_probe=3; static constexpr std::size_t default_max_probe=3;
@@ -2013,13 +2002,14 @@ private:
void update_size_ctrl() void update_size_ctrl()
{ {
using ssize_t=std::make_signed<std::size_t>::type;
for(std::size_t i=0;i<garbage_vectors.size();++i){ for(std::size_t i=0;i<garbage_vectors.size();++i){
auto &v=garbage_vectors[i]; auto &v=garbage_vectors[i];
this->size_ctrl.size+=v.size; this->size_ctrl.size+=v.size.exchange(0);
if(this->size_ctrl.ml>=v.mcos)this->size_ctrl.ml-=v.mcos; auto mcos=v.mcos.exchange(0);
else this->size_ctrl.ml=0; if(ssize_t(this->size_ctrl.ml)>=mcos)this->size_ctrl.ml-=mcos;
v.size=0; else this->size_ctrl.ml=0;
v.mcos=0;
} }
} }