From 212c6a1e4d251ee4d9a0be375951b55de9290b6a Mon Sep 17 00:00:00 2001 From: Christian Mazakas Date: Wed, 26 Apr 2023 12:53:49 -0700 Subject: [PATCH] Add prototype of move assignment --- .../boost/unordered/concurrent_flat_map.hpp | 9 + include/boost/unordered/detail/foa/core.hpp | 4 +- test/cfoa/assign_tests.cpp | 213 ++++++++++++++++++ 3 files changed, 224 insertions(+), 2 deletions(-) diff --git a/include/boost/unordered/concurrent_flat_map.hpp b/include/boost/unordered/concurrent_flat_map.hpp index 74d97b94..7f38d261 100644 --- a/include/boost/unordered/concurrent_flat_map.hpp +++ b/include/boost/unordered/concurrent_flat_map.hpp @@ -258,6 +258,15 @@ namespace boost { return *this; } + concurrent_flat_map& operator=(concurrent_flat_map&& rhs) + noexcept(std::allocator_traits::is_always_equal::value || + std::allocator_traits< + Allocator>::propagate_on_container_move_assignment::value) + { + table_ = std::move(rhs.table_); + return *this; + } + /// Capacity /// diff --git a/include/boost/unordered/detail/foa/core.hpp b/include/boost/unordered/detail/foa/core.hpp index 1552f41e..6769d179 100644 --- a/include/boost/unordered/detail/foa/core.hpp +++ b/include/boost/unordered/detail/foa/core.hpp @@ -1381,8 +1381,8 @@ public: reserve(0); move_assign_if(al(),x.al()); swap(arrays,x.arrays); - swap(ml,x.ml); - swap(size_,x.size_); + swap_size_impl(ml,x.ml); + swap_size_impl(size_,x.size_); } else{ /* noshrink: favor memory reuse over tightness */ diff --git a/test/cfoa/assign_tests.cpp b/test/cfoa/assign_tests.cpp index d11bd4d4..d527eb46 100644 --- a/test/cfoa/assign_tests.cpp +++ b/test/cfoa/assign_tests.cpp @@ -119,6 +119,8 @@ namespace { BOOST_TEST_EQ(x.hash_function(), y.hash_function()); BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() != y.get_allocator()); + + BOOST_TEST(y.empty()); }); BOOST_TEST_EQ(raii::destructor, num_threads * (2 * old_size)); @@ -269,6 +271,212 @@ namespace { } check_raii_counts(); } + + template void move_assign(G gen, test::random_generator rg) + { + auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); + auto reference_map = + boost::unordered_flat_map(values.begin(), values.end()); + + // move assignment has more complex requirements than copying + // equal allocators: + // lhs empty, rhs non-empty + // lhs non-empty, rhs empty + // lhs non-empty, rhs non-empty + // + // unequal allocators: + // lhs non-empty, rhs non-empty + // + // pocma + // self move-assign + + // lhs empty, rhs empty + { + raii::reset_counts(); + + map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + + std::atomic num_transfers{0}; + + thread_runner( + values, [&x, &num_transfers](boost::span s) { + (void)s; + + map_type y(0, hasher(2), key_equal(1), allocator_type(3)); + + BOOST_TEST(x.empty()); + BOOST_TEST(y.empty()); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + + y = std::move(x); + if (y.hash_function() == hasher(1)) { + ++num_transfers; + BOOST_TEST_EQ(y.key_eq(), key_equal(2)); + } else { + BOOST_TEST_EQ(y.hash_function(), hasher(2)); + BOOST_TEST_EQ(y.key_eq(), key_equal(1)); + } + + BOOST_TEST_EQ(x.hash_function(), hasher(2)); + BOOST_TEST_EQ(x.key_eq(), key_equal(1)); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + }); + + BOOST_TEST_EQ(num_transfers, 1u); + + BOOST_TEST_EQ(raii::destructor, 0u); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + BOOST_TEST_EQ(raii::copy_constructor, 0u); + } + + // lhs non-empty, rhs empty + { + raii::reset_counts(); + + map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + + std::atomic num_transfers{0}; + + thread_runner( + values, [&x, &values, &num_transfers](boost::span s) { + (void)s; + + map_type y(values.begin(), values.end(), values.size(), hasher(2), + key_equal(1), allocator_type(3)); + + BOOST_TEST(x.empty()); + BOOST_TEST(!y.empty()); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + + y = std::move(x); + if (y.hash_function() == hasher(1)) { + ++num_transfers; + BOOST_TEST_EQ(y.key_eq(), key_equal(2)); + } else { + BOOST_TEST_EQ(y.hash_function(), hasher(2)); + BOOST_TEST_EQ(y.key_eq(), key_equal(1)); + } + + BOOST_TEST_EQ(x.hash_function(), hasher(2)); + BOOST_TEST_EQ(x.key_eq(), key_equal(1)); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + + BOOST_TEST(y.empty()); + }); + + BOOST_TEST_EQ(num_transfers, 1u); + + BOOST_TEST_EQ(raii::destructor, num_threads * 2 * reference_map.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + BOOST_TEST_EQ( + raii::copy_constructor, num_threads * 2 * reference_map.size()); + } + check_raii_counts(); + + // lhs empty, rhs non-empty + { + raii::reset_counts(); + + map_type x(values.begin(), values.end(), values.size(), hasher(1), + key_equal(2), allocator_type(3)); + + auto const old_cc = +raii::copy_constructor; + auto const old_mc = +raii::move_constructor; + std::atomic num_transfers{0}; + + thread_runner(values, + [&x, &reference_map, &num_transfers](boost::span s) { + (void)s; + + map_type y(allocator_type(3)); + + BOOST_TEST(y.empty()); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + + y = std::move(x); + if (!y.empty()) { + ++num_transfers; + test_matches_reference(y, reference_map); + + BOOST_TEST_EQ(y.hash_function(), hasher(1)); + BOOST_TEST_EQ(y.key_eq(), key_equal(2)); + } else { + BOOST_TEST_EQ(y.hash_function(), hasher()); + BOOST_TEST_EQ(y.key_eq(), key_equal()); + } + + BOOST_TEST(x.empty()); + + BOOST_TEST_EQ(x.hash_function(), hasher()); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + }); + + BOOST_TEST_EQ(num_transfers, 1u); + + BOOST_TEST_EQ(raii::destructor, 2 * reference_map.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + BOOST_TEST_EQ(raii::copy_constructor, old_cc); + BOOST_TEST_EQ(raii::move_constructor, old_mc); + } + check_raii_counts(); + + // lhs non-empty, rhs non-empty + { + raii::reset_counts(); + + map_type x(values.begin(), values.end(), values.size(), hasher(1), + key_equal(2), allocator_type(3)); + + auto const old_size = x.size(); + auto const old_cc = +raii::copy_constructor; + auto const old_mc = +raii::move_constructor; + + std::atomic num_transfers{0}; + + thread_runner(values, [&x, &values, &num_transfers, &reference_map]( + boost::span s) { + (void)s; + + map_type y(values.begin(), values.end(), values.size(), hasher(2), + key_equal(1), allocator_type(3)); + + BOOST_TEST(!y.empty()); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + + y = std::move(x); + if (y.hash_function() == hasher(1)) { + ++num_transfers; + test_matches_reference(y, reference_map); + + BOOST_TEST_EQ(y.key_eq(), key_equal(2)); + } else { + BOOST_TEST_EQ(y.hash_function(), hasher(2)); + BOOST_TEST_EQ(y.key_eq(), key_equal(1)); + } + + BOOST_TEST(x.empty()); + + BOOST_TEST_EQ(x.hash_function(), hasher(2)); + BOOST_TEST_EQ(x.key_eq(), key_equal(1)); + BOOST_TEST(x.get_allocator() == y.get_allocator()); + }); + + BOOST_TEST_EQ(num_transfers, 1u); + + BOOST_TEST_EQ( + raii::destructor, 2 * old_size + num_threads * 2 * old_size); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + BOOST_TEST_EQ(raii::move_constructor, old_mc); + BOOST_TEST_EQ(raii::copy_constructor, + old_cc + (num_threads * 2 * reference_map.size())); + } + check_raii_counts(); + } } // namespace // clang-format off @@ -276,6 +484,11 @@ UNORDERED_TEST( copy_assign, ((value_type_generator)) ((default_generator)(sequential)(limited_range))) + +UNORDERED_TEST( + move_assign, + ((value_type_generator)) + ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS()