From 2fae05ed316a81e9d6b795367106cfaa410b7419 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sun, 19 Mar 2023 13:08:29 +0100 Subject: [PATCH] made concurrent_table use table_core's automatic capacity growth formula --- .../unordered/detail/foa/concurrent_table.hpp | 4 +-- include/boost/unordered/detail/foa/core.hpp | 35 ++++++++++++------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/include/boost/unordered/detail/foa/concurrent_table.hpp b/include/boost/unordered/detail/foa/concurrent_table.hpp index c010997f..c33f594d 100644 --- a/include/boost/unordered/detail/foa/concurrent_table.hpp +++ b/include/boost/unordered/detail/foa/concurrent_table.hpp @@ -621,7 +621,6 @@ private: using group_insert_counter_type=typename group_access::insert_counter_type; #endif - concurrent_table(const concurrent_table& x,exclusive_lock_guard): super{x}{} concurrent_table(concurrent_table&& x,exclusive_lock_guard): @@ -901,8 +900,7 @@ private: void rehash_if_full() { auto lck=exclusive_access(); - // TODO: use same mechanism as unchecked_emplace_with_rehash - if(this->size_==this->ml)super::rehash(super::capacity()+1); + if(this->size_==this->ml)this->unchecked_rehash_for_growth(); } #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) diff --git a/include/boost/unordered/detail/foa/core.hpp b/include/boost/unordered/detail/foa/core.hpp index 6d7e8df4..feefe33c 100644 --- a/include/boost/unordered/detail/foa/core.hpp +++ b/include/boost/unordered/detail/foa/core.hpp @@ -1546,22 +1546,17 @@ public: return res; } + BOOST_NOINLINE void unchecked_rehash_for_growth() + { + auto new_arrays_=new_arrays_for_growth(); + unchecked_rehash(new_arrays_); + } + template BOOST_NOINLINE locator unchecked_emplace_with_rehash(std::size_t hash,Args&&... args) { - /* Due to the anti-drift mechanism (see recover_slot), new_arrays_ may be - * of the same size as the old arrays; in the limit, erasing one element at - * full load and then inserting could bring us back to the same capacity - * after a costly rehash. To avoid this, we jump to the next capacity level - * when the number of erased elements is <= 10% of total elements at full - * load, which is implemented by requesting additional F*size elements, - * with F = P * 10% / (1 - P * 10%), where P is the probability of an - * element having caused overflow; P has been measured as ~0.162 under - * ideal conditions, yielding F ~ 0.0165 ~ 1/61. - */ - auto new_arrays_=new_arrays(std::size_t( - std::ceil(static_cast(size()+size()/61+1)/mlf))); + auto new_arrays_=new_arrays_for_growth(); locator it; BOOST_TRY{ /* strong exception guarantee -> try insertion before rehash */ @@ -1664,6 +1659,22 @@ private: return arrays_type::new_(al(),n); } + arrays_type new_arrays_for_growth() + { + /* Due to the anti-drift mechanism (see recover_slot), the new arrays may + * be of the same size as the old arrays; in the limit, erasing one + * element at full load and then inserting could bring us back to the same + * capacity after a costly rehash. To avoid this, we jump to the next + * capacity level when the number of erased elements is <= 10% of total + * elements at full load, which is implemented by requesting additional + * F*size elements, with F = P * 10% / (1 - P * 10%), where P is the + * probability of an element having caused overflow; P has been measured as + * ~0.162 under ideal conditions, yielding F ~ 0.0165 ~ 1/61. + */ + return new_arrays(std::size_t( + std::ceil(static_cast(size()+size()/61+1)/mlf))); + } + void delete_arrays(arrays_type& arrays_)noexcept { arrays_type::delete_(al(),arrays_);