refactored padding into a clean design

This commit is contained in:
joaquintides
2023-06-09 10:03:39 +02:00
committed by Christian Mazakas
parent 3ea8a768ca
commit 518a5d2e1e
3 changed files with 78 additions and 53 deletions

View File

@ -64,6 +64,8 @@ using is_execution_policy=std::false_type;
namespace foa{
static constexpr std::size_t cacheline_size=64;
template<typename T,std::size_t N>
class cache_aligned_array
{
@ -76,20 +78,20 @@ public:
T& operator[](std::size_t pos)noexcept{return *data(pos);}
private:
static constexpr std::size_t cacheline=64;
static constexpr std::size_t element_offset=
(sizeof(T)+cacheline-1)/cacheline*cacheline;
(sizeof(T)+cacheline_size-1)/cacheline_size*cacheline_size;
BOOST_STATIC_ASSERT(alignof(T)<=cacheline);
BOOST_STATIC_ASSERT(alignof(T)<=cacheline_size);
T* data(std::size_t pos)noexcept
{
return reinterpret_cast<T*>(
(reinterpret_cast<uintptr_t>(&buf)+cacheline-1)/cacheline*cacheline+
pos*element_offset);
(reinterpret_cast<uintptr_t>(&buf)+cacheline_size-1)/
cacheline_size*cacheline_size
+pos*element_offset);
}
unsigned char buf[element_offset*N+cacheline-1];
unsigned char buf[element_offset*N+cacheline_size-1];
};
template<typename Mutex,std::size_t N>
@ -297,6 +299,36 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
group_access *group_accesses;
};
struct cache_aligned_size_control
{
static constexpr auto atomic_size_t_size=sizeof(std::atomic<std::size_t>);
BOOST_STATIC_ASSERT(atomic_size_t_size<cacheline_size);
cache_aligned_size_control(std::size_t ml_,std::size_t size_):
pad0_{},ml{ml_},pad1_{},size{size_}{}
unsigned char pad0_[cacheline_size-atomic_size_t_size];
std::atomic<std::size_t> ml;
unsigned char pad1_[cacheline_size-atomic_size_t_size];
std::atomic<std::size_t> size;
};
/* std::swap can't be used on non-assignable atomics */
inline void
swap_atomic_size_t(std::atomic<std::size_t>& x,std::atomic<std::size_t>& y)
{
std::size_t tmp=x;
x=static_cast<std::size_t>(y);
y=tmp;
}
inline void swap(cache_aligned_size_control& x,cache_aligned_size_control& y)
{
swap_atomic_size_t(x.ml,y.ml);
swap_atomic_size_t(x.size,y.size);
}
/* foa::concurrent_table serves as the foundation for end-user concurrent
* hash containers. The TypePolicy parameter can specify flat/node-based
* map-like and set-like containers, though currently we're only providing
@ -357,7 +389,7 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using concurrent_table_core_impl=table_core<
TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
std::atomic<std::size_t>,Hash,Pred,Allocator>;
cache_aligned_size_control,Hash,Pred,Allocator>;
#include <boost/unordered/detail/foa/ignore_wshadow.hpp>
@ -988,8 +1020,8 @@ private:
std::size_t unprotected_size()const
{
std::size_t m=this->ml;
std::size_t s=this->size_;
std::size_t m=this->size_ctrl.ml;
std::size_t s=this->size_ctrl.size;
return s<=m?s:m;
}
@ -1105,7 +1137,7 @@ private:
if(this->find(k,pos0,hash))return false;
if(BOOST_LIKELY(this->size_<this->ml)){
if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...);
}
else{
@ -1118,15 +1150,15 @@ private:
{
reserve_size(concurrent_table& x_):x{x_}
{
size_=++x.size_;
size_=++x.size_ctrl.size;
}
~reserve_size()
{
if(!commit_)--x.size_;
if(!commit_)--x.size_ctrl.size;
}
bool succeeded()const{return size_<=x.ml;}
bool succeeded()const{return size_<=x.size_ctrl.ml;}
void commit(){commit_=true;}
@ -1200,7 +1232,9 @@ private:
void rehash_if_full()
{
auto lck=exclusive_access();
if(this->size_==this->ml)this->unchecked_rehash_for_growth();
if(this->size_ctrl.size==this->size_ctrl.ml){
this->unchecked_rehash_for_growth();
}
}
template<typename GroupAccessMode,typename F>

View File

@ -1234,7 +1234,7 @@ alloc_make_insert_type(const Allocator& al,Args&&... args)
template<
typename TypePolicy,typename Group,template<typename...> class Arrays,
typename SizeImpl,typename Hash,typename Pred,typename Allocator
typename SizeControl,typename Hash,typename Pred,typename Allocator
>
class
@ -1258,7 +1258,7 @@ public:
using alloc_traits=boost::allocator_traits<Allocator>;
using element_type=typename type_policy::element_type;
using arrays_type=Arrays<element_type,group_type,size_policy>;
using size_impl_type=SizeImpl;
using size_ctrl_type=SizeControl;
using key_type=typename type_policy::key_type;
using init_type=typename type_policy::init_type;
@ -1279,7 +1279,7 @@ public:
const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
hash_base{empty_init,h_},pred_base{empty_init,pred_},
allocator_base{empty_init,al_},arrays(new_arrays(n)),
ml{initial_max_load()},size_{0}
size_ctrl{initial_max_load(),0}
{}
table_core(const table_core& x):
@ -1293,11 +1293,11 @@ public:
hash_base{empty_init,std::move(x.h())},
pred_base{empty_init,std::move(x.pred())},
allocator_base{empty_init,std::move(x.al())},
arrays(x.arrays),ml{std::size_t(x.ml)},size_{std::size_t(x.size_)}
arrays(x.arrays),size_ctrl{x.size_ctrl}
{
x.arrays=x.new_arrays(0);
x.ml=x.initial_max_load();
x.size_=0;
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
}
table_core(const table_core& x,const Allocator& al_):
@ -1311,8 +1311,7 @@ public:
{
if(al()==x.al()){
std::swap(arrays,x.arrays);
swap_size_impl(size_,x.size_);
swap_size_impl(ml,x.ml);
swap(size_ctrl,x.size_ctrl);
}
else{
reserve(x.size());
@ -1410,8 +1409,7 @@ public:
reserve(0);
move_assign_if<pocma>(al(),x.al());
swap(arrays,x.arrays);
swap_size_impl(ml,x.ml);
swap_size_impl(size_,x.size_);
swap(size_ctrl,x.size_ctrl);
}
else{
/* noshrink: favor memory reuse over tightness */
@ -1437,7 +1435,7 @@ public:
allocator_type get_allocator()const noexcept{return al();}
bool empty()const noexcept{return size()==0;}
std::size_t size()const noexcept{return size_;}
std::size_t size()const noexcept{return size_ctrl.size;}
std::size_t max_size()const noexcept{return SIZE_MAX;}
BOOST_FORCEINLINE
@ -1522,8 +1520,7 @@ public:
swap(h(),x.h());
swap(pred(),x.pred());
swap(arrays,x.arrays);
swap_size_impl(ml,x.ml);
swap_size_impl(size_,x.size_);
swap(size_ctrl,x.size_ctrl);
}
void clear()noexcept
@ -1541,8 +1538,8 @@ public:
pg->initialize();
}
arrays.groups[arrays.groups_size_mask].set_sentinel();
ml=initial_max_load();
size_=0;
size_ctrl.ml=initial_max_load();
size_ctrl.size=0;
}
}
@ -1562,7 +1559,7 @@ public:
float max_load_factor()const noexcept{return mlf;}
std::size_t max_load()const noexcept{return ml;}
std::size_t max_load()const noexcept{return size_ctrl.ml;}
void rehash(std::size_t n)
{
@ -1678,7 +1675,7 @@ public:
{
auto res=nosize_unchecked_emplace_at(
arrays,pos0,hash,std::forward<Args>(args)...);
++size_;
++size_ctrl.size;
return res;
}
@ -1708,7 +1705,7 @@ public:
/* new_arrays_ lifetime taken care of by unchecked_rehash */
unchecked_rehash(new_arrays_);
++size_;
++size_ctrl.size;
return it;
}
@ -1725,7 +1722,7 @@ public:
auto new_arrays_=new_arrays(n);
delete_arrays(arrays);
arrays=new_arrays_;
ml=initial_max_load();
size_ctrl.ml=initial_max_load();
}
}
}
@ -1786,11 +1783,7 @@ public:
}
arrays_type arrays;
unsigned char pad1[64];
size_impl_type ml;
unsigned char pad2[64];
size_impl_type size_;
unsigned char pad3[64];
size_ctrl_type size_ctrl;
private:
template<
@ -1809,7 +1802,7 @@ private:
hash_base{empty_init,std::move(h_)},
pred_base{empty_init,std::move(pred_)},
allocator_base{empty_init,al_},arrays(new_arrays(0)),
ml{initial_max_load()},size_{0}
size_ctrl{initial_max_load(),0}
{}
arrays_type new_arrays(std::size_t n)
@ -1879,7 +1872,7 @@ private:
if(arrays.elements){
copy_elements_array_from(x);
copy_groups_array_from(x);
size_=std::size_t(x.size_);
size_ctrl.size=std::size_t(x.size_ctrl.size);
}
}
@ -1964,23 +1957,15 @@ private:
}
}
static inline void swap_size_impl(size_impl_type& x,size_impl_type& y)
{
/* std::swap can't be used on non-assignable atomics */
std::size_t tmp=x;
x=static_cast<std::size_t>(y);
y=tmp;
}
void recover_slot(unsigned char* pc)
{
/* If this slot potentially caused overflow, we decrease the maximum load so
* that average probe length won't increase unboundedly in repeated
* insert/erase cycles (drift).
*/
ml-=group_type::maybe_caused_overflow(pc);
size_ctrl.ml-=group_type::maybe_caused_overflow(pc);
group_type::reset(pc);
--size_;
--size_ctrl.size;
}
void recover_slot(group_type* pg,std::size_t pos)
@ -2046,7 +2031,7 @@ private:
}
delete_arrays(arrays);
arrays=new_arrays_;
ml=initial_max_load();
size_ctrl.ml=initial_max_load();
}
template<typename Value>

View File

@ -46,6 +46,12 @@ struct plain_integral
Integral n;
};
struct plain_size_control
{
std::size_t ml;
std::size_t size;
};
template<typename,typename,typename,typename>
class table;
@ -221,7 +227,7 @@ private:
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using table_core_impl=
table_core<TypePolicy,group15<plain_integral>,table_arrays,
std::size_t,Hash,Pred,Allocator>;
plain_size_control,Hash,Pred,Allocator>;
#include <boost/unordered/detail/foa/ignore_wshadow.hpp>
@ -475,7 +481,7 @@ private:
if(loc){
return {make_iterator(loc),false};
}
if(BOOST_LIKELY(this->size_<this->ml)){
if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
return {
make_iterator(
this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...)),