Sync from upstream.

This commit is contained in:
Rene Rivera
2024-03-23 15:41:41 -05:00
3 changed files with 76 additions and 20 deletions

View File

@ -568,7 +568,6 @@ struct optimize_multikey_is_true
static const bool value = sizeof(test<T>(0)) > sizeof(detail::yes_type)*2u;
};
template<bool StoreHash>
struct insert_commit_data_impl
{
std::size_t hash;
@ -580,17 +579,6 @@ struct insert_commit_data_impl
{ hash = h; }
};
template<>
struct insert_commit_data_impl<false>
{
std::size_t bucket_idx;
inline std::size_t get_hash() const
{ return 0U; }
inline void set_hash(std::size_t)
{}
};
template<class Node, class SlistNodePtr>
inline typename pointer_traits<SlistNodePtr>::template rebind_pointer<Node>::type
dcast_bucket_ptr(const SlistNodePtr &p)
@ -2367,7 +2355,7 @@ class hashtable_impl
/// @endcond
public:
typedef insert_commit_data_impl<store_hash> insert_commit_data;
typedef insert_commit_data_impl insert_commit_data;
private:
void default_init_actions()
@ -2729,7 +2717,7 @@ class hashtable_impl
insert_commit_data commit_data;
std::pair<iterator, bool> ret = this->insert_unique_check(key_of_value()(value), commit_data);
if(ret.second){
ret.first = this->insert_unique_commit(value, commit_data);
ret.first = this->insert_unique_fast_commit(value, commit_data);
}
return ret;
}
@ -2856,7 +2844,43 @@ class hashtable_impl
//! erased between the "insert_check" and "insert_commit" calls.
//!
//! After a successful rehashing insert_commit_data remains valid.
iterator insert_unique_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT
iterator insert_unique_commit(reference value, const insert_commit_data& commit_data) BOOST_NOEXCEPT
{
size_type bucket_num = this->priv_hash_to_nbucket(commit_data.get_hash());
bucket_type& b = this->priv_bucket(bucket_num);
this->priv_size_traits().increment();
node_ptr const n = pointer_traits<node_ptr>::pointer_to(this->priv_value_to_node(value));
BOOST_INTRUSIVE_SAFE_HOOK_DEFAULT_ASSERT(!safemode_or_autounlink || slist_node_algorithms::unique(n));
node_functions_t::store_hash(n, commit_data.get_hash(), store_hash_t());
this->priv_insertion_update_cache(bucket_num);
group_functions_t::insert_in_group(n, n, optimize_multikey_t());
slist_node_algorithms::link_after(b.get_node_ptr(), n);
return this->build_iterator(siterator(n), this->to_ptr(b));
}
//! <b>Requires</b>: value must be an lvalue of type value_type. commit_data
//! must have been obtained from a previous call to "insert_check".
//! No objects should have been inserted or erased from the unordered_set between
//! the "insert_check" that filled "commit_data" and the call to "insert_commit".
//!
//! No rehashing shall be performed between `insert_check` and `insert_fast_commit`.
//!
//! <b>Effects</b>: Inserts the value in the unordered_set using the information obtained
//! from the "commit_data" that a previous "insert_check" filled.
//!
//! <b>Returns</b>: An iterator to the newly inserted object.
//!
//! <b>Complexity</b>: Constant time.
//!
//! <b>Throws</b>: Nothing.
//!
//! <b>Notes</b>: This function has only sense if a "insert_check" has been
//! previously executed to fill "commit_data". No value should be inserted or
//! erased between the "insert_check" and "insert_commit" calls.
//!
//! Since this commit operation does not support rehashing between the check
//! and the commit, it's faster than `insert_commit`.
iterator insert_unique_fast_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT
{
this->priv_size_inc();
node_ptr const n = this->priv_value_to_node_ptr(value);

View File

@ -222,6 +222,10 @@ class unordered_set_impl
inline iterator insert_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT
{ return table_type::insert_unique_commit(value, commit_data); }
//! @copydoc ::boost::intrusive::hashtable::insert_unique_fast_commit
inline iterator insert_fast_commit(reference value, const insert_commit_data &commit_data) BOOST_NOEXCEPT
{ return table_type::insert_unique_fast_commit(value, commit_data); }
#ifdef BOOST_INTRUSIVE_DOXYGEN_INVOKED
//! @copydoc ::boost::intrusive::hashtable::erase(const_iterator)

View File

@ -158,20 +158,48 @@ void test_unordered<ContainerDefiner>::test_insert(value_cont_type& values, deta
<>::type unordered_set_type;
typedef typename unordered_set_type::bucket_traits bucket_traits;
typedef typename unordered_set_type::key_of_value key_of_value;
typedef typename unordered_set_type::bucket_ptr bucket_ptr;
const std::size_t ExtraBuckets = unordered_set_type::bucket_overhead;
typename unordered_set_type::bucket_type buckets[BucketSize + ExtraBuckets];
unordered_set_type testset(bucket_traits(
pointer_traits<typename unordered_set_type::bucket_ptr>::
pointer_to(buckets[0]), sizeof(buckets)/sizeof(*buckets)));
const bucket_traits orig_bucket_traits( pointer_traits<bucket_ptr>::pointer_to(buckets[0])
, sizeof(buckets) / sizeof(*buckets));
unordered_set_type testset(orig_bucket_traits);
testset.insert(&values[0] + 2, &values[0] + 5);
typename unordered_set_type::insert_commit_data commit_data;
BOOST_TEST ((!testset.insert_check(key_of_value()(values[2]), commit_data).second));
BOOST_TEST (( testset.insert_check(key_of_value()(values[0]), commit_data).second));
//Test insert_fast_commit
{
BOOST_TEST(testset.find(key_of_value()(values[0])) == testset.end());
testset.insert_fast_commit(values[0], commit_data);
BOOST_TEST(testset.find(key_of_value()(values[0])) != testset.end());
testset.erase(key_of_value()(values[0]));
BOOST_TEST(testset.find(key_of_value()(values[0])) == testset.end());
}
//Test insert_commit
BOOST_IF_CONSTEXPR(!unordered_set_type::incremental)
{
BOOST_TEST((testset.insert_check(key_of_value()(values[0]), commit_data).second));
typename unordered_set_type::bucket_type buckets2[2U + ExtraBuckets];
//Two rehashes to be compatible with incremental hashing
testset.rehash(bucket_traits(
pointer_traits<bucket_ptr>::pointer_to(buckets2[0]), 2U + ExtraBuckets));
testset.insert_commit(values[0], commit_data);
BOOST_TEST(testset.find(key_of_value()(values[0])) != testset.end());
testset.erase(key_of_value()(values[0]));
BOOST_TEST(testset.find(key_of_value()(values[0])) == testset.end());
//Two rehashes to be compatible with incremental hashing
testset.clear();
testset.rehash(orig_bucket_traits);
testset.insert(&values[0] + 2, &values[0] + 5);
}
const unordered_set_type& const_testset = testset;
if(unordered_set_type::incremental)
BOOST_IF_CONSTEXPR(unordered_set_type::incremental)
{
{ int init_values [] = { 4, 5, 1 };
TEST_INTRUSIVE_SEQUENCE_MAYBEUNIQUE( init_values, const_testset ); }
@ -600,7 +628,7 @@ void test_unordered<ContainerDefiner>::test_rehash(value_cont_type& values, deta
typedef typename unordered_type::bucket_ptr bucket_ptr;
typename unordered_type::bucket_type buckets1[BucketSize + ExtraBuckets];
typename unordered_type::bucket_type buckets2 [2 + ExtraBuckets];
typename unordered_type::bucket_type buckets2 [BucketSize / 4 + ExtraBuckets];
typename unordered_type::bucket_type buckets3[BucketSize*2 + ExtraBuckets];
unordered_type testset1(&values[0], &values[0] + 6, bucket_traits(