switched from available to size_

This commit is contained in:
joaquintides
2023-03-14 19:09:13 +01:00
committed by Christian Mazakas
parent 5e225fe46c
commit f244ba55de
3 changed files with 33 additions and 42 deletions

View File

@ -255,9 +255,9 @@ public:
std::size_t size()const noexcept
{
auto lck=shared_access();
std::size_t ml_=this->ml; /* load order matters */
std::size_t available_=this->available;
return ml_-available_;
std::size_t ml_=this->ml;
std::size_t size_=this->size_;
return size_<=ml_?size_:ml_;
}
using super::max_size;
@ -522,7 +522,7 @@ private:
this->al(),std::forward<Args>(args)...);
int res=unprotected_norehash_emplace_or_visit(
std::forward<F>(f),type_policy::move(x.value()));
if(res>=0)return res!=0;
if(BOOST_LIKELY(res>=0))return res!=0;
lck.unlock();
@ -553,34 +553,30 @@ private:
auto lck=shared_access();
int res=unprotected_norehash_emplace_or_visit(
std::forward<F>(f),std::forward<Args>(args)...);
if(res>=0)return res!=0;
if(BOOST_LIKELY(res>=0))return res!=0;
}
rehash_if_full();
}
}
struct reserve_available
struct reserve_size
{
reserve_available(concurrent_table& x_):x{x_}
reserve_size(concurrent_table& x_):x{x_}
{
do{
available=x.available.load(std::memory_order_relaxed);
}while(
available&&!x.available.compare_exchange_weak(available,available-1));
size_=++x.size_;
}
~reserve_available()
~reserve_size()
{
if(!commit_&&available){
x.available.fetch_add(1,std::memory_order_release);
}
if(!commit_)--x.size_;
}
bool succeeded()const{return available!=0;}
bool succeeded()const{return size_<=x.ml;}
void commit(){commit_=true;}
concurrent_table &x;
std::size_t available;
std::size_t size_;
bool commit_=false;
};
@ -595,10 +591,10 @@ private:
for(;;){
startover:
boost::uint32_t counter=insert_counter(pos0);
if(unprotected_visit(k,pos0,hash,f))return 0;
if(unprotected_visit(k,pos0,hash,std::forward<F>(f)))return 0;
reserve_available ra(*this);
if(BOOST_LIKELY(ra.succeeded())){
reserve_size rs(*this);
if(BOOST_LIKELY(rs.succeeded())){
for(prober pb(pos0);;pb.next(this->arrays.groups_size_mask)){
auto pos=pb.get();
auto pg=this->arrays.groups+pos;
@ -616,7 +612,7 @@ private:
}
auto p=this->arrays.elements+pos*N+n;
this->construct_element(p,std::forward<Args>(args)...);
ra.commit();
rs.commit();
f(type_policy::value_from(*p));
return 1;
}
@ -634,7 +630,7 @@ private:
{
auto lck=exclusive_access();
// TODO: use same mechanism as unchecked_emplace_with_rehash
if(!this->available)this->super::rehash(super::capacity()+1);
if(this->size_==this->ml)this->super::rehash(super::capacity()+1);
}
shared_lock_guard shared_access()const

View File

@ -1191,7 +1191,7 @@ public:
const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
hash_base{empty_init,h_},pred_base{empty_init,pred_},
allocator_base{empty_init,al_},arrays(new_arrays(n)),
ml{initial_max_load()},available{std::size_t(ml)}
ml{initial_max_load()},size_{0}
{}
table_core(const table_core& x):
@ -1205,11 +1205,11 @@ public:
hash_base{empty_init,std::move(x.h())},
pred_base{empty_init,std::move(x.pred())},
allocator_base{empty_init,std::move(x.al())},
arrays(x.arrays),ml{std::size_t(x.ml)},available{std::size_t(x.available)}
arrays(x.arrays),ml{std::size_t(x.ml)},size_{std::size_t(x.size_)}
{
x.arrays=x.new_arrays(0);
x.ml=x.initial_max_load();
x.available=std::size_t(x.ml);
x.size_=std::size_t(x.size_);
}
table_core(const table_core& x,const Allocator& al_):
@ -1224,7 +1224,7 @@ public:
if(al()==x.al()){
std::swap(arrays,x.arrays);
std::swap(ml,x.ml);
std::swap(available,x.available);
std::swap(size_,x.size_);
}
else{
reserve(x.size());
@ -1323,7 +1323,7 @@ public:
move_assign_if<pocma>(al(),x.al());
swap(arrays,x.arrays);
swap(ml,x.ml);
swap(available,x.available);
swap(size_,x.size_);
}
else{
/* noshrink: favor memory reuse over tightness */
@ -1349,7 +1349,7 @@ public:
allocator_type get_allocator()const noexcept{return al();}
bool empty()const noexcept{return size()==0;}
std::size_t size()const noexcept{return ml-available;}
std::size_t size()const noexcept{return size_;}
std::size_t max_size()const noexcept{return SIZE_MAX;}
// TODO unify erase?
@ -1391,7 +1391,7 @@ public:
swap(pred(),x.pred());
swap(arrays,x.arrays);
swap(ml,x.ml);
swap(available,x.available);
swap(size_,x.size_);
}
void clear()noexcept
@ -1410,7 +1410,7 @@ public:
}
arrays.groups[arrays.groups_size_mask].set_sentinel();
ml=initial_max_load();
available=std::size_t(ml);
size_=0;
}
}
@ -1545,7 +1545,7 @@ public:
{
auto res=nosize_unchecked_emplace_at(
arrays,pos0,hash,std::forward<Args>(args)...);
--available;
++size_;
return res;
}
@ -1580,7 +1580,7 @@ public:
/* new_arrays_ lifetime taken care of by unchecked_rehash */
unchecked_rehash(new_arrays_);
--available;
++size_;
return it;
}
@ -1649,7 +1649,7 @@ public:
arrays_type arrays;
SizeImpl ml;
SizeImpl available;
SizeImpl size_;
private:
template<typename,typename,typename,typename,typename,typename>
@ -1712,7 +1712,7 @@ private:
std::memcpy(
arrays.groups,x.arrays.groups,
(arrays.groups_size_mask+1)*sizeof(group_type));
available=std::size_t(x.available);
size_=std::size_t(x.size_);
}
}
@ -1775,10 +1775,9 @@ private:
* that average probe length won't increase unboundedly in repeated
* insert/erase cycles (drift).
*/
bool ofw=group_type::maybe_caused_overflow(pc);
ml-=group_type::maybe_caused_overflow(pc);
group_type::reset(pc);
ml-=ofw;
available+=!ofw;
--size_;
}
void recover_slot(group_type* pg,std::size_t pos)
@ -1844,9 +1843,7 @@ private:
}
delete_arrays(arrays);
arrays=new_arrays_;
auto s=size();
ml=initial_max_load();
available=ml-s;
}
void noshrink_reserve(std::size_t n)
@ -1862,9 +1859,7 @@ private:
auto new_arrays_=new_arrays(n);
delete_arrays(arrays);
arrays=new_arrays_;
auto s=size();
ml=initial_max_load();
available=ml-s;
}
}
}

View File

@ -492,7 +492,7 @@ private:
if(it!=end()){
return {it,false};
}
if(BOOST_LIKELY(this->available!=0)){
if(BOOST_LIKELY(this->size_<this->ml)){
return {
make_iterator(
this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...)),