removed BOOST_UNORDERED_EMBEDDED_GROUP_ACCESS support

This commit is contained in:
joaquintides
2023-03-31 18:52:00 +02:00
parent 863984a7c8
commit b86dee9a18
2 changed files with 24 additions and 78 deletions

View File

@ -156,13 +156,13 @@ struct atomic_integral
std::atomic<Integral> n; std::atomic<Integral> n;
}; };
/* Group-level concurrency protection. It provides a rw mutex plus an
* atomic insertion counter for optimistic insertion (see
* unprotected_norehash_emplace_or_visit).
*/
struct group_access struct group_access
{ {
struct dummy_group_access_type
{
boost::uint32_t storage[2]={0,0};
};
using mutex_type=rw_spinlock; using mutex_type=rw_spinlock;
using shared_lock_guard=shared_lock<mutex_type>; using shared_lock_guard=shared_lock<mutex_type>;
using exclusive_lock_guard=lock_guard<mutex_type>; using exclusive_lock_guard=lock_guard<mutex_type>;
@ -174,31 +174,25 @@ struct group_access
private: private:
mutex_type m; mutex_type m;
insert_counter_type cnt; insert_counter_type cnt=0;
};
template<typename Group>
struct concurrent_group:Group,group_access
{
struct dummy_group_type
{
typename Group::dummy_group_type group_storage;
group_access::dummy_group_access_type access_storage;
};
}; };
template<std::size_t Size> template<std::size_t Size>
group_access* dummy_group_accesses() group_access* dummy_group_accesses()
{ {
/* TODO: describe /* Default group_access array to provide to empty containers without
* incurring dynamic allocation. Mutexes won't actually ever be used,
* (no successful reduced hash match) and insertion counters won't ever
* be incremented (insertions won't succeed as capacity()==0).
*/ */
static group_access::dummy_group_access_type static group_access accesses[Size];
storage[Size]={typename group_access::dummy_group_access_type(),};
return reinterpret_cast<group_access*>(storage); return accesses;
} }
/* subclasses table_arrays to add an additional group_access array */
template<typename Value,typename Group,typename SizePolicy> template<typename Value,typename Group,typename SizePolicy>
struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy> struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
{ {
@ -225,7 +219,7 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
access_traits::allocate(aal,arrays.groups_size_mask+1)); access_traits::allocate(aal,arrays.groups_size_mask+1));
for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){ for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
new(arrays.group_accesses+i) group_access(); ::new (arrays.group_accesses+i) group_access();
} }
} }
BOOST_CATCH(...){ BOOST_CATCH(...){
@ -263,16 +257,7 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator> template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using concurrent_table_core_impl=table_core< using concurrent_table_core_impl=table_core<
TypePolicy, TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
#if defined(BOOST_UNORDERED_EMBEDDED_GROUP_ACCESS)
concurrent_group<group15<atomic_integral>>,
table_arrays,
#else
group15<atomic_integral>,
concurrent_table_arrays,
#endif
std::atomic<std::size_t>,Hash,Pred,Allocator>; std::atomic<std::size_t>,Hash,Pred,Allocator>;
#include <boost/unordered/detail/foa/ignore_wshadow.hpp> #include <boost/unordered/detail/foa/ignore_wshadow.hpp>
@ -701,16 +686,9 @@ private:
using shared_lock_guard=shared_lock<mutex_type>; using shared_lock_guard=shared_lock<mutex_type>;
using exclusive_lock_guard=lock_guard<multimutex_type>; using exclusive_lock_guard=lock_guard<multimutex_type>;
using exclusive_bilock_guard=scoped_bilock<multimutex_type>; using exclusive_bilock_guard=scoped_bilock<multimutex_type>;
#if defined(BOOST_UNORDERED_EMBEDDED_GROUP_ACCESS)
using group_shared_lock_guard=typename group_type::shared_lock_guard;
using group_exclusive_lock_guard=typename group_type::exclusive_lock_guard;
using group_insert_counter_type=typename group_type::insert_counter_type;
#else
using group_shared_lock_guard=typename group_access::shared_lock_guard; using group_shared_lock_guard=typename group_access::shared_lock_guard;
using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard; using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard;
using group_insert_counter_type=typename group_access::insert_counter_type; using group_insert_counter_type=typename group_access::insert_counter_type;
#endif
concurrent_table(const concurrent_table& x,exclusive_lock_guard): concurrent_table(const concurrent_table& x,exclusive_lock_guard):
super{x}{} super{x}{}
@ -741,38 +719,6 @@ private:
return {x.mutexes,y.mutexes}; return {x.mutexes,y.mutexes};
} }
#if defined(BOOST_UNORDERED_EMBEDDED_GROUP_ACCESS)
inline group_shared_lock_guard shared_access(std::size_t pos)const
{
return this->arrays.groups[pos].shared_access();
}
inline group_exclusive_lock_guard exclusive_access(std::size_t pos)const
{
return this->arrays.groups[pos].exclusive_access();
}
inline group_insert_counter_type& insert_counter(std::size_t pos)const
{
return this->arrays.groups[pos].insert_counter();
}
#else
inline group_shared_lock_guard shared_access(std::size_t pos)const
{
return this->arrays.group_accesses[pos].shared_access();
}
inline group_exclusive_lock_guard exclusive_access(std::size_t pos)const
{
return this->arrays.group_accesses[pos].exclusive_access();
}
inline group_insert_counter_type& insert_counter(std::size_t pos)const
{
return this->arrays.group_accesses[pos].insert_counter();
}
#endif
/* Tag-dispatched shared/exclusive group access */ /* Tag-dispatched shared/exclusive group access */
using group_shared=std::false_type; using group_shared=std::false_type;
@ -780,13 +726,18 @@ private:
inline group_shared_lock_guard access(group_shared,std::size_t pos)const inline group_shared_lock_guard access(group_shared,std::size_t pos)const
{ {
return shared_access(pos); return this->arrays.group_accesses[pos].shared_access();
} }
inline group_exclusive_lock_guard access( inline group_exclusive_lock_guard access(
group_exclusive,std::size_t pos)const group_exclusive,std::size_t pos)const
{ {
return exclusive_access(pos); return this->arrays.group_accesses[pos].exclusive_access();
}
inline group_insert_counter_type& insert_counter(std::size_t pos)const
{
return this->arrays.group_accesses[pos].insert_counter();
} }
/* Const casts value_type& according to the level of group access for /* Const casts value_type& according to the level of group access for

View File

@ -937,15 +937,10 @@ struct table_arrays
reinterpret_cast<uintptr_t>(p))%sizeof(group_type); reinterpret_cast<uintptr_t>(p))%sizeof(group_type);
arrays.groups=reinterpret_cast<group_type*>(p); arrays.groups=reinterpret_cast<group_type*>(p);
for (std::size_t i=0;i<groups_size;++i){
new(arrays.groups+i) group_type();
arrays.groups[i].initialize();
}
/* memset is faster/not slower than initializing groups individually. /* memset is faster/not slower than initializing groups individually.
* This assumes all zeros is group_type's default layout. * This assumes all zeros is group_type's default layout.
*/ */
// std::memset(arrays.groups,0,sizeof(group_type)*groups_size); std::memset(arrays.groups,0,sizeof(group_type)*groups_size);
arrays.groups[groups_size-1].set_sentinel(); arrays.groups[groups_size-1].set_sentinel();
} }
return arrays; return arrays;