diff --git a/include/boost/unordered/concurrent_flat_map.hpp b/include/boost/unordered/concurrent_flat_map.hpp index dffad274..62d24b52 100644 --- a/include/boost/unordered/concurrent_flat_map.hpp +++ b/include/boost/unordered/concurrent_flat_map.hpp @@ -192,6 +192,11 @@ namespace boost { { } + concurrent_flat_map(concurrent_flat_map&& rhs, allocator_type a) + : table_(std::move(rhs.table_), a) + { + } + /// Capacity /// diff --git a/include/boost/unordered/detail/foa/core.hpp b/include/boost/unordered/detail/foa/core.hpp index cc66393a..5e07151b 100644 --- a/include/boost/unordered/detail/foa/core.hpp +++ b/include/boost/unordered/detail/foa/core.hpp @@ -1274,12 +1274,27 @@ public: } table_core(table_core&& x,const Allocator& al_): - table_core{0,std::move(x.h()),std::move(x.pred()),al_} + hash_base{empty_init,std::move(x.h())}, + pred_base{empty_init,std::move(x.pred())}, + allocator_base{empty_init,al_},arrays(new_arrays(0)), + ml{initial_max_load()},size_{0} { if(al()==x.al()){ std::swap(arrays,x.arrays); - std::swap(ml,x.ml); - std::swap(size_,x.size_); + + // when SizeImpl is an atomic type, std::swap() can't be used + // as it's not MoveConstructible so we instead opt for this manual version + { + std::size_t tmp{size_}; + size_=static_cast(x.size_); + x.size_=tmp; + } + + { + std::size_t tmp{ml}; + ml=static_cast(x.ml); + x.ml=tmp; + } } else{ reserve(x.size()); diff --git a/test/cfoa/constructor_tests.cpp b/test/cfoa/constructor_tests.cpp index 0fb5614b..6e41d951 100644 --- a/test/cfoa/constructor_tests.cpp +++ b/test/cfoa/constructor_tests.cpp @@ -44,9 +44,37 @@ template struct soccc_allocator bool operator!=(soccc_allocator const& rhs) const { return x_ != rhs.x_; } }; +template struct stateful_allocator +{ + int x_ = -1; + + using value_type = T; + + stateful_allocator() = default; + stateful_allocator(stateful_allocator const&) = default; + stateful_allocator(stateful_allocator&&) = default; + + stateful_allocator(int const x) : x_{x} {} + + template + stateful_allocator(stateful_allocator const& rhs) : x_{rhs.x_} + { + } + + T* allocate(std::size_t n) + { + return static_cast(::operator new(n * sizeof(T))); + } + + void deallocate(T* p, std::size_t) { ::operator delete(p); } + + bool operator==(stateful_allocator const& rhs) const { return x_ == rhs.x_; } + bool operator!=(stateful_allocator const& rhs) const { return x_ != rhs.x_; } +}; + using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = std::allocator >; +using allocator_type = stateful_allocator >; using map_type = boost::unordered::concurrent_flat_map; @@ -331,6 +359,7 @@ namespace { auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); auto reference_map = boost::unordered_flat_map(values.begin(), values.end()); + raii::reset_counts(); { @@ -339,6 +368,8 @@ namespace { std::atomic_uint num_transfers{0}; + auto const old_mc = +raii::move_constructor; + thread_runner( values, [&x, &reference_map, &num_transfers]( boost::span > s) { @@ -368,6 +399,105 @@ namespace { }); BOOST_TEST_EQ(num_transfers, 1u); + BOOST_TEST_EQ(raii::move_constructor, old_mc); + } + + check_raii_counts(); + + // allocator-aware move constructor, unequal allocators + raii::reset_counts(); + + { + map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + allocator_type{1}); + + std::atomic_uint num_transfers{0}; + + auto const old_mc = +raii::move_constructor; + auto const old_size = x.size(); + + thread_runner( + values, [&x, &reference_map, &num_transfers, old_size]( + boost::span > s) { + (void)s; + + auto a = allocator_type{2}; + BOOST_TEST(a != x.get_allocator()); + + map_type y(std::move(x), a); + + if (!y.empty()) { + ++num_transfers; + + test_matches_reference(y, reference_map); + BOOST_TEST_EQ(y.size(), old_size); + BOOST_TEST_EQ(y.hash_function(), hasher(1)); + BOOST_TEST_EQ(y.key_eq(), key_equal(2)); + } else { + BOOST_TEST_EQ(y.size(), 0); + BOOST_TEST_EQ(y.hash_function(), hasher()); + BOOST_TEST_EQ(y.key_eq(), key_equal()); + } + + BOOST_TEST_EQ(x.size(), 0); + BOOST_TEST_EQ(x.hash_function(), hasher()); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + + BOOST_TEST(y.get_allocator() != x.get_allocator()); + BOOST_TEST(y.get_allocator() == a); + }); + + BOOST_TEST_EQ(num_transfers, 1u); + BOOST_TEST_EQ(raii::move_constructor, old_mc + (2 * old_size)); + } + + check_raii_counts(); + + // allocator-aware move constructor, equal allocators + raii::reset_counts(); + + { + map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + allocator_type{1}); + + std::atomic_uint num_transfers{0}; + + auto const old_mc = +raii::move_constructor; + auto const old_size = x.size(); + + thread_runner( + values, [&x, &reference_map, &num_transfers, old_size]( + boost::span > s) { + (void)s; + + auto a = allocator_type{1}; + BOOST_TEST(a == x.get_allocator()); + + map_type y(std::move(x), a); + + if (!y.empty()) { + ++num_transfers; + + test_matches_reference(y, reference_map); + BOOST_TEST_EQ(y.size(), old_size); + BOOST_TEST_EQ(y.hash_function(), hasher(1)); + BOOST_TEST_EQ(y.key_eq(), key_equal(2)); + } else { + BOOST_TEST_EQ(y.size(), 0); + BOOST_TEST_EQ(y.hash_function(), hasher()); + BOOST_TEST_EQ(y.key_eq(), key_equal()); + } + + BOOST_TEST_EQ(x.size(), 0); + BOOST_TEST_EQ(x.hash_function(), hasher()); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + + BOOST_TEST(y.get_allocator() == x.get_allocator()); + BOOST_TEST(y.get_allocator() == a); + }); + + BOOST_TEST_EQ(num_transfers, 1u); + BOOST_TEST_EQ(raii::move_constructor, old_mc); } check_raii_counts();