removed prior experiments and optimized insertion via two-step counter

This commit is contained in:
joaquintides
2023-12-14 12:36:04 +01:00
parent b866bba144
commit ca7120d928

View File

@ -841,9 +841,9 @@ public:
auto mco=group_type::maybe_caused_overflow(pc); auto mco=group_type::maybe_caused_overflow(pc);
if(reinterpret_cast<std::atomic<unsigned char>*>(pg)[n]. if(reinterpret_cast<std::atomic<unsigned char>*>(pg)[n].
compare_exchange_strong(expected,1)){ compare_exchange_strong(expected,1)){
super::destroy_element(p); //super::destroy_element(p);
pg->reset(n); //pg->reset(n);
//retire_element(static_cast<std::size_t>(p-this->arrays.elements()),mco); retire_element(static_cast<std::size_t>(p-this->arrays.elements()),mco);
res=1; res=1;
} }
} }
@ -1036,16 +1036,6 @@ private:
epoch_type& e; epoch_type& e;
}; };
struct group_synchronized_shared_lock_guard
{
group_synchronized_shared_lock_guard(
epoch_type& e_,group_access::shared_lock_guard&& lck_):
e{e_},lck{std::move(lck_)}{}
~group_synchronized_shared_lock_guard(){e=0;}
epoch_type& e;
group_access::shared_lock_guard lck;
};
struct group_exclusive_lock_guard struct group_exclusive_lock_guard
{ {
group_exclusive_lock_guard( group_exclusive_lock_guard(
@ -1109,11 +1099,6 @@ private:
/* Tag-dispatched shared/exclusive group access */ /* Tag-dispatched shared/exclusive group access */
using group_shared=std::false_type; using group_shared=std::false_type;
#if defined(BOOST_UNORDERED_LATCH_FREE)
using group_synchronized_shared=int;
#endif
using group_exclusive=std::true_type; using group_exclusive=std::true_type;
inline group_shared_lock_guard access(group_shared,std::size_t pos)const inline group_shared_lock_guard access(group_shared,std::size_t pos)const
@ -1127,16 +1112,6 @@ private:
#endif #endif
} }
#if defined(BOOST_UNORDERED_LATCH_FREE)
inline group_synchronized_shared_lock_guard access(
group_synchronized_shared,std::size_t pos)const
{
auto& e=garbage_vectors[thread_id()%garbage_vectors.size()].epoch;
e=current_epoch.load(std::memory_order_relaxed);
return {e,this->arrays.group_accesses()[pos].shared_access()};
}
#endif
inline group_exclusive_lock_guard access( inline group_exclusive_lock_guard access(
group_exclusive,std::size_t pos)const group_exclusive,std::size_t pos)const
{ {
@ -1162,11 +1137,6 @@ private:
static inline const value_type& static inline const value_type&
cast_for(group_shared,value_type& x){return x;} cast_for(group_shared,value_type& x){return x;}
#if defined(BOOST_UNORDERED_LATCH_FREE)
static inline const value_type&
cast_for(group_synchronized_shared,value_type& x){return x;}
#endif
static inline typename std::conditional< static inline typename std::conditional<
std::is_same<key_type,value_type>::value, std::is_same<key_type,value_type>::value,
const value_type&, const value_type&,
@ -1404,13 +1374,8 @@ private:
template<typename... Args> template<typename... Args>
BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args) BOOST_FORCEINLINE bool construct_and_emplace(Args&&... args)
{ {
#if 0 && defined(BOOST_UNORDERED_LATCH_FREE)
return construct_and_emplace_or_visit(
group_synchronized_shared{},[](const value_type&){},std::forward<Args>(args)...);
#else
return construct_and_emplace_or_visit( return construct_and_emplace_or_visit(
group_shared{},[](const value_type&){},std::forward<Args>(args)...); group_shared{},[](const value_type&){},std::forward<Args>(args)...);
#endif
} }
struct call_construct_and_emplace_or_visit struct call_construct_and_emplace_or_visit
@ -1459,14 +1424,8 @@ private:
template<typename... Args> template<typename... Args>
BOOST_FORCEINLINE bool emplace_impl(Args&&... args) BOOST_FORCEINLINE bool emplace_impl(Args&&... args)
{ {
#if 0 && defined(BOOST_UNORDERED_LATCH_FREE)
return emplace_or_visit_impl(
group_synchronized_shared{},
[](const value_type&){},std::forward<Args>(args)...);
#else
return emplace_or_visit_impl( return emplace_or_visit_impl(
group_shared{},[](const value_type&){},std::forward<Args>(args)...); group_shared{},[](const value_type&){},std::forward<Args>(args)...);
#endif
} }
template<typename GroupAccessMode,typename F,typename... Args> template<typename GroupAccessMode,typename F,typename... Args>
@ -1587,16 +1546,13 @@ private:
for(;;){ for(;;){
startover: startover:
boost::uint32_t counter=0; boost::uint32_t counter=0;
{ while(BOOST_UNLIKELY((counter=insert_counter(pos0))%2==1)){}
//auto lck=access(group_exclusive{},pos0);
//counter=insert_counter(pos0);
}
if(unprotected_visit( if(unprotected_visit(
access_mode,k,pos0,hash,std::forward<F>(f)))return 0; access_mode,k,pos0,hash,std::forward<F>(f)))return 0;
//reserve_size rsize(*this); reserve_size rsize(*this);
//if(BOOST_LIKELY(rsize.succeeded())){ if(BOOST_LIKELY(rsize.succeeded())){
if(true){
for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){ for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
auto pos=pb.get(); auto pos=pb.get();
auto pg=this->arrays.groups()+pos; auto pg=this->arrays.groups()+pos;
@ -1609,9 +1565,8 @@ private:
/* slot wasn't empty */ /* slot wasn't empty */
goto startover; goto startover;
} }
//auto lck=access(group_exclusive{},pos0); if(BOOST_UNLIKELY(
//if(BOOST_UNLIKELY(insert_counter(pos0)++!=counter)){ !insert_counter(pos0).compare_exchange_weak(counter,counter+1))){
if(false){
/* other thread inserted from pos0, need to start over */ /* other thread inserted from pos0, need to start over */
pg->reset(n); pg->reset(n);
goto startover; goto startover;
@ -1619,7 +1574,8 @@ private:
auto p=this->arrays.elements()+pos*N+n; auto p=this->arrays.elements()+pos*N+n;
this->construct_element(p,std::forward<Args>(args)...); this->construct_element(p,std::forward<Args>(args)...);
pg->set(n,hash); pg->set(n,hash);
//rsize.commit(); ++insert_counter(pos0);
rsize.commit();
return 1; return 1;
} }
pg->mark_overflow(hash); pg->mark_overflow(hash);