forked from boostorg/unordered
made concurrent_table use table_core's automatic capacity growth formula
This commit is contained in:
committed by
Christian Mazakas
parent
41abb96d83
commit
2fae05ed31
@ -621,7 +621,6 @@ private:
|
||||
using group_insert_counter_type=typename group_access::insert_counter_type;
|
||||
#endif
|
||||
|
||||
|
||||
concurrent_table(const concurrent_table& x,exclusive_lock_guard):
|
||||
super{x}{}
|
||||
concurrent_table(concurrent_table&& x,exclusive_lock_guard):
|
||||
@ -901,8 +900,7 @@ private:
|
||||
void rehash_if_full()
|
||||
{
|
||||
auto lck=exclusive_access();
|
||||
// TODO: use same mechanism as unchecked_emplace_with_rehash
|
||||
if(this->size_==this->ml)super::rehash(super::capacity()+1);
|
||||
if(this->size_==this->ml)this->unchecked_rehash_for_growth();
|
||||
}
|
||||
|
||||
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
|
||||
|
@ -1546,22 +1546,17 @@ public:
|
||||
return res;
|
||||
}
|
||||
|
||||
BOOST_NOINLINE void unchecked_rehash_for_growth()
|
||||
{
|
||||
auto new_arrays_=new_arrays_for_growth();
|
||||
unchecked_rehash(new_arrays_);
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
BOOST_NOINLINE locator
|
||||
unchecked_emplace_with_rehash(std::size_t hash,Args&&... args)
|
||||
{
|
||||
/* Due to the anti-drift mechanism (see recover_slot), new_arrays_ may be
|
||||
* of the same size as the old arrays; in the limit, erasing one element at
|
||||
* full load and then inserting could bring us back to the same capacity
|
||||
* after a costly rehash. To avoid this, we jump to the next capacity level
|
||||
* when the number of erased elements is <= 10% of total elements at full
|
||||
* load, which is implemented by requesting additional F*size elements,
|
||||
* with F = P * 10% / (1 - P * 10%), where P is the probability of an
|
||||
* element having caused overflow; P has been measured as ~0.162 under
|
||||
* ideal conditions, yielding F ~ 0.0165 ~ 1/61.
|
||||
*/
|
||||
auto new_arrays_=new_arrays(std::size_t(
|
||||
std::ceil(static_cast<float>(size()+size()/61+1)/mlf)));
|
||||
auto new_arrays_=new_arrays_for_growth();
|
||||
locator it;
|
||||
BOOST_TRY{
|
||||
/* strong exception guarantee -> try insertion before rehash */
|
||||
@ -1664,6 +1659,22 @@ private:
|
||||
return arrays_type::new_(al(),n);
|
||||
}
|
||||
|
||||
arrays_type new_arrays_for_growth()
|
||||
{
|
||||
/* Due to the anti-drift mechanism (see recover_slot), the new arrays may
|
||||
* be of the same size as the old arrays; in the limit, erasing one
|
||||
* element at full load and then inserting could bring us back to the same
|
||||
* capacity after a costly rehash. To avoid this, we jump to the next
|
||||
* capacity level when the number of erased elements is <= 10% of total
|
||||
* elements at full load, which is implemented by requesting additional
|
||||
* F*size elements, with F = P * 10% / (1 - P * 10%), where P is the
|
||||
* probability of an element having caused overflow; P has been measured as
|
||||
* ~0.162 under ideal conditions, yielding F ~ 0.0165 ~ 1/61.
|
||||
*/
|
||||
return new_arrays(std::size_t(
|
||||
std::ceil(static_cast<float>(size()+size()/61+1)/mlf)));
|
||||
}
|
||||
|
||||
void delete_arrays(arrays_type& arrays_)noexcept
|
||||
{
|
||||
arrays_type::delete_(al(),arrays_);
|
||||
|
Reference in New Issue
Block a user