diff --git a/doc/unordered/changes.adoc b/doc/unordered/changes.adoc index 757a7000..1d760650 100644 --- a/doc/unordered/changes.adoc +++ b/doc/unordered/changes.adoc @@ -10,6 +10,8 @@ * Added `[c]visit_while` operations to `boost::concurrent_map`, with serial and parallel variants. +* Added efficient move construction of `boost::unordered_flat_map` from +`boost::concurrent_flat_map` and vice versa. == Release 1.83.0 - Major update diff --git a/doc/unordered/concurrent.adoc b/doc/unordered/concurrent.adoc index 7ed1be44..d418cec4 100644 --- a/doc/unordered/concurrent.adoc +++ b/doc/unordered/concurrent.adoc @@ -201,3 +201,29 @@ and the user need not take any special precaution, but overall performance may b Another blocking operation is _rehashing_, which happens explicitly via `rehash`/`reserve` or during insertion when the table's load hits `max_load()`. As with non-concurrent containers, reserving space in advance of bulk insertions will generally speed up the process. + +== Interoperability with non-concurrent containers + +As their internal data structure is basically the same, `boost::unordered_flat_map` can +be efficiently move-constructed from `boost::concurrent_flat_map` and vice versa. +This interoperability comes handy in multistage scenarios where parts of the data processing happen +in parallel whereas other steps are non-concurrent (or non-modifying). In the following example, +we want to construct a histogram from a huge input vector of words: +the population phase can be done in parallel with `boost::concurrent_flat_map` and results +then transferred to the final container. + +[source,c++] +---- +std::vector words = ...; + +// Insert words in parallel +boost::concurrent_flat_map m0; +std::for_each( + std::execution::par, words.begin(), words.end(), + [&](const auto& word) { + m0.try_emplace_or_visit(word, 1, [](auto& x) { ++x.second; }); + }); + +// Transfer to a regular unordered_flat_map +boost::unordered_flat_map m=std::move(m0); +---- diff --git a/doc/unordered/concurrent_flat_map.adoc b/doc/unordered/concurrent_flat_map.adoc index 8038f210..bca4f5bb 100644 --- a/doc/unordered/concurrent_flat_map.adoc +++ b/doc/unordered/concurrent_flat_map.adoc @@ -69,6 +69,7 @@ namespace boost { explicit xref:#concurrent_flat_map_allocator_constructor[concurrent_flat_map](const Allocator& a); xref:#concurrent_flat_map_copy_constructor_with_allocator[concurrent_flat_map](const concurrent_flat_map& other, const Allocator& a); xref:#concurrent_flat_map_move_constructor_with_allocator[concurrent_flat_map](concurrent_flat_map&& other, const Allocator& a); + xref:#concurrent_flat_map_move_constructor_from_unordered_flat_map[concurrent_flat_map](unordered_flat_map&& other); xref:#concurrent_flat_map_initializer_list_constructor[concurrent_flat_map](std::initializer_list il, size_type n = _implementation-defined_ const hasher& hf = hasher(), @@ -501,6 +502,21 @@ Concurrency:;; Blocking on `other`. --- +==== Move Constructor from unordered_flat_map + +```c++ +concurrent_flat_map(unordered_flat_map&& other); +``` + +Move construction from a xref:#unordered_flat_map[`unordered_flat_map`]. +The internal bucket array of `other` is transferred directly to the new container. +The hash function, predicate and allocator are moved-constructed from `other`. + +[horizontal] +Complexity:;; O(`bucket_count()`) + +--- + ==== Initializer List Constructor [source,c++,subs="+quotes"] ---- diff --git a/doc/unordered/unordered_flat_map.adoc b/doc/unordered/unordered_flat_map.adoc index a112e192..543db307 100644 --- a/doc/unordered/unordered_flat_map.adoc +++ b/doc/unordered/unordered_flat_map.adoc @@ -77,6 +77,7 @@ namespace boost { explicit xref:#unordered_flat_map_allocator_constructor[unordered_flat_map](const Allocator& a); xref:#unordered_flat_map_copy_constructor_with_allocator[unordered_flat_map](const unordered_flat_map& other, const Allocator& a); xref:#unordered_flat_map_move_constructor_with_allocator[unordered_flat_map](unordered_flat_map&& other, const Allocator& a); + xref:#unordered_flat_map_move_constructor_from_concurrent_flat_map[unordered_flat_map](concurrent_flat_map&& other); xref:#unordered_flat_map_initializer_list_constructor[unordered_flat_map](std::initializer_list il, size_type n = _implementation-defined_ const hasher& hf = hasher(), @@ -472,6 +473,22 @@ from `other`, and the allocator is copy-constructed from `a`. --- +==== Move Constructor from concurrent_flat_map + +```c++ +unordered_flat_map(concurrent_flat_map&& other); +``` + +Move construction from a xref:#concurrent_flat_map[`concurrent_flat_map`]. +The internal bucket array of `other` is transferred directly to the new container. +The hash function, predicate and allocator are moved-constructed from `other`. + +[horizontal] +Complexity:;; Constant time. +Concurrency:;; Blocking on `other`. + +--- + ==== Initializer List Constructor [source,c++,subs="+quotes"] ---- diff --git a/include/boost/unordered/concurrent_flat_map.hpp b/include/boost/unordered/concurrent_flat_map.hpp index 9364d349..6e2eef98 100644 --- a/include/boost/unordered/concurrent_flat_map.hpp +++ b/include/boost/unordered/concurrent_flat_map.hpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -84,6 +85,9 @@ namespace boost { template friend class concurrent_flat_map; + template + friend class unordered_flat_map; using type_policy = detail::foa::flat_map_types; @@ -223,6 +227,13 @@ namespace boost { { } + + concurrent_flat_map( + unordered_flat_map&& other) + : table_(std::move(other.table_)) + { + } + ~concurrent_flat_map() = default; concurrent_flat_map& operator=(concurrent_flat_map const& rhs) diff --git a/include/boost/unordered/detail/foa/concurrent_table.hpp b/include/boost/unordered/detail/foa/concurrent_table.hpp index 51615446..4a536de0 100644 --- a/include/boost/unordered/detail/foa/concurrent_table.hpp +++ b/include/boost/unordered/detail/foa/concurrent_table.hpp @@ -252,7 +252,21 @@ struct concurrent_table_arrays:table_arrays template static concurrent_table_arrays new_(Allocator& al,std::size_t n) { - concurrent_table_arrays arrays{super::new_(al,n),nullptr}; + super x{super::new_(al,n)}; + BOOST_TRY{ + return new_group_access(al,x); + } + BOOST_CATCH(...){ + super::delete_(al,x); + BOOST_RETHROW + } + BOOST_CATCH_END + } + + template + static concurrent_table_arrays new_group_access(Allocator& al,const super& x) + { + concurrent_table_arrays arrays{x,nullptr}; if(!arrays.elements){ arrays.group_accesses=dummy_group_accesses(); } @@ -261,26 +275,26 @@ struct concurrent_table_arrays:table_arrays typename boost::allocator_rebind::type; using access_traits=boost::allocator_traits; - BOOST_TRY{ - auto aal=access_alloc(al); - arrays.group_accesses=boost::to_address( - access_traits::allocate(aal,arrays.groups_size_mask+1)); + auto aal=access_alloc(al); + arrays.group_accesses=boost::to_address( + access_traits::allocate(aal,arrays.groups_size_mask+1)); - for(std::size_t i=0;i static void delete_(Allocator& al,concurrent_table_arrays& arrays)noexcept + { + delete_group_access(al,arrays); + super::delete_(al,arrays); + } + + template + static void delete_group_access(Allocator& al,concurrent_table_arrays& arrays)noexcept { if(arrays.elements){ using access_alloc= @@ -294,7 +308,6 @@ struct concurrent_table_arrays:table_arrays aal,pointer_traits::pointer_to(*arrays.group_accesses), arrays.groups_size_mask+1); } - super::delete_(al,arrays); } group_access *group_accesses; @@ -307,7 +320,7 @@ struct atomic_size_control atomic_size_control(std::size_t ml_,std::size_t size_): pad0_{},ml{ml_},pad1_{},size{size_}{} - atomic_size_control(atomic_size_control& x): + atomic_size_control(const atomic_size_control& x): pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{} /* padding to avoid false sharing internally and with sorrounding data */ @@ -359,7 +372,7 @@ inline void swap(atomic_size_control& x,atomic_size_control& y) * - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based * on C++17 stdlib parallel algorithms. * - * Consult boost::unordered_flat_map docs for the full API reference. + * Consult boost::concurrent_flat_map docs for the full API reference. * Heterogeneous lookup is suported by default, that is, without checking for * any ::is_transparent typedefs --this checking is done by the wrapping * containers. @@ -391,6 +404,9 @@ inline void swap(atomic_size_control& x,atomic_size_control& y) * over. */ +template +class table; /* concurrent/non-concurrent interop */ + template using concurrent_table_core_impl=table_core< TypePolicy,group15,concurrent_table_arrays, @@ -412,10 +428,10 @@ class concurrent_table: using group_type=typename super::group_type; using super::N; using prober=typename super::prober; - - template< - typename TypePolicy2,typename Hash2,typename Pred2,typename Allocator2> - friend class concurrent_table; + using arrays_type=typename super::arrays_type; + using size_ctrl_type=typename super::size_ctrl_type; + using compatible_nonconcurrent_table=table; + friend compatible_nonconcurrent_table; public: using key_type=typename super::key_type; @@ -450,6 +466,21 @@ public: concurrent_table(x,al_,x.exclusive_access()){} concurrent_table(concurrent_table&& x,const Allocator& al_): concurrent_table(std::move(x),al_,x.exclusive_access()){} + + concurrent_table(compatible_nonconcurrent_table&& x): + super{ + std::move(x.h()),std::move(x.pred()),std::move(x.al()), + arrays_type(arrays_type::new_group_access( + x.al(), + typename arrays_type::super{ + x.arrays.groups_size_index,x.arrays.groups_size_mask, + reinterpret_cast(x.arrays.groups), + reinterpret_cast(x.arrays.elements)})), + size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}} + { + x.empty_initialize(); + } + ~concurrent_table()=default; concurrent_table& operator=(const concurrent_table& x) @@ -875,6 +906,8 @@ public: } private: + template friend class concurrent_table; + using mutex_type=rw_spinlock; using multimutex_type=multimutex; // TODO: adapt 128 to the machine using shared_lock_guard=shared_lock; diff --git a/include/boost/unordered/detail/foa/core.hpp b/include/boost/unordered/detail/foa/core.hpp index 239d05d3..f0a2ef5c 100644 --- a/include/boost/unordered/detail/foa/core.hpp +++ b/include/boost/unordered/detail/foa/core.hpp @@ -1282,6 +1282,17 @@ public: size_ctrl{initial_max_load(),0} {} + /* bare transfer ctor for concurrent/non-concurrent interop */ + + table_core( + Hash&& h_,Pred&& pred_,Allocator&& al_, + const arrays_type& arrays_,const size_ctrl_type& size_ctrl_): + hash_base{empty_init,std::move(h_)}, + pred_base{empty_init,std::move(pred_)}, + allocator_base{empty_init,std::move(al_)}, + arrays(arrays_),size_ctrl(size_ctrl_) + {} + table_core(const table_core& x): table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{} @@ -1290,14 +1301,11 @@ public: std::is_nothrow_move_constructible::value&& std::is_nothrow_move_constructible::value&& std::is_nothrow_move_constructible::value): - hash_base{empty_init,std::move(x.h())}, - pred_base{empty_init,std::move(x.pred())}, - allocator_base{empty_init,std::move(x.al())}, - arrays(x.arrays),size_ctrl(x.size_ctrl) + table_core{ + std::move(x.h()),std::move(x.pred()),std::move(x.al()), + x.arrays,x.size_ctrl} { - x.arrays=x.new_arrays(0); - x.size_ctrl.ml=x.initial_max_load(); - x.size_ctrl.size=0; + x.empty_initialize(); } table_core(const table_core& x,const Allocator& al_): @@ -1336,6 +1344,13 @@ public: delete_arrays(arrays); } + void empty_initialize()noexcept + { + arrays=new_arrays(0); + size_ctrl.ml=initial_max_load(); + size_ctrl.size=0; + } + table_core& operator=(const table_core& x) { BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) @@ -1804,7 +1819,8 @@ private: pred_base{empty_init,std::move(pred_)}, allocator_base{empty_init,al_},arrays(new_arrays(0)), size_ctrl{initial_max_load(),0} - {} + { + } arrays_type new_arrays(std::size_t n) { diff --git a/include/boost/unordered/detail/foa/table.hpp b/include/boost/unordered/detail/foa/table.hpp index 85145cb6..6cc11ed8 100644 --- a/include/boost/unordered/detail/foa/table.hpp +++ b/include/boost/unordered/detail/foa/table.hpp @@ -264,6 +264,9 @@ private: * checking is done by boost::unordered_(flat|node)_(map|set). */ +template +class concurrent_table; /* concurrent/non-concurrent interop */ + template using table_core_impl= table_core,table_arrays, @@ -284,7 +287,12 @@ class table:table_core_impl using group_type=typename super::group_type; using super::N; using prober=typename super::prober; + using arrays_type=typename super::arrays_type; + using size_ctrl_type=typename super::size_ctrl_type; using locator=typename super::locator; + using compatible_concurrent_table= + concurrent_table; + friend compatible_concurrent_table; public: using key_type=typename super::key_type; @@ -323,6 +331,8 @@ public: table(table&& x)=default; table(const table& x,const Allocator& al_):super{x,al_}{} table(table&& x,const Allocator& al_):super{std::move(x),al_}{} + table(compatible_concurrent_table&& x): + table(std::move(x),x.exclusive_access()){} ~table()=default; table& operator=(const table& x)=default; @@ -496,6 +506,22 @@ public: friend bool operator!=(const table& x,const table& y){return !(x==y);} private: + template + table(compatible_concurrent_table&& x,ExclusiveLockGuard): + super{ + std::move(x.h()),std::move(x.pred()),std::move(x.al()), + arrays_type{ + x.arrays.groups_size_index,x.arrays.groups_size_mask, + reinterpret_cast(x.arrays.groups), + reinterpret_cast(x.arrays.elements)}, + size_ctrl_type{ + x.size_ctrl.ml,x.size_ctrl.size}} + { + compatible_concurrent_table::arrays_type::delete_group_access( + this->al(),x.arrays); + x.empty_initialize(); + } + struct erase_on_exit { erase_on_exit(table& x_,const_iterator it_):x{x_},it{it_}{} diff --git a/include/boost/unordered/unordered_flat_map.hpp b/include/boost/unordered/unordered_flat_map.hpp index d74de55a..1f7984ec 100644 --- a/include/boost/unordered/unordered_flat_map.hpp +++ b/include/boost/unordered/unordered_flat_map.hpp @@ -10,6 +10,7 @@ #pragma once #endif +#include #include #include #include @@ -36,6 +37,10 @@ namespace boost { template class unordered_flat_map { + template + friend class concurrent_flat_map; + using map_types = detail::foa::flat_map_types; using table_type = detail::foa::table&& other) + : table_(std::move(other.table_)) + { + } + ~unordered_flat_map() = default; unordered_flat_map& operator=(unordered_flat_map const& other) diff --git a/test/cfoa/assign_tests.cpp b/test/cfoa/assign_tests.cpp index 24675d87..f2dd2364 100644 --- a/test/cfoa/assign_tests.cpp +++ b/test/cfoa/assign_tests.cpp @@ -33,6 +33,9 @@ using hasher = stateful_hash; using key_equal = stateful_key_equal; using allocator_type = stateful_allocator >; +using flat_map_type = boost::unordered::unordered_flat_map; + using map_type = boost::unordered::concurrent_flat_map; @@ -843,6 +846,136 @@ namespace { } check_raii_counts(); } + + template void flat_map_move_assign(G gen, test::random_generator rg) + { + auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); + auto reference_map = + boost::unordered_flat_map(values.begin(), values.end()); + + /* + * basically test that a temporary container is materialized and we + * move-assign from that + * + * we don't need to be super rigorous here because we already have tests for + * container assignment, we're just testing that a temporary is materialized + */ + + { + raii::reset_counts(); + + flat_map_type flat_map(values.begin(), values.end(), values.size(), + hasher(1), key_equal(2), allocator_type(3)); + + map_type map(0, hasher(2), key_equal(1), allocator_type(3)); + + BOOST_TEST(flat_map.get_allocator() == map.get_allocator()); + + map = std::move(flat_map); + + BOOST_TEST(flat_map.empty()); + BOOST_TEST_EQ(map.size(), reference_map.size()); + + test_fuzzy_matches_reference(map, reference_map, rg); + + BOOST_TEST_EQ(map.hash_function(), hasher(1)); + BOOST_TEST_EQ(map.key_eq(), key_equal(2)); + + BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); + BOOST_TEST_EQ(raii::destructor, 2 * values.size()); + BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + } + + check_raii_counts(); + + { + raii::reset_counts(); + + map_type map(values.begin(), values.end(), values.size(), hasher(1), + key_equal(2), allocator_type(3)); + + flat_map_type flat_map(0, hasher(2), key_equal(1), allocator_type(3)); + + BOOST_TEST(flat_map.get_allocator() == map.get_allocator()); + + flat_map = std::move(map); + + BOOST_TEST(map.empty()); + BOOST_TEST_EQ(flat_map.size(), reference_map.size()); + + BOOST_TEST_EQ(flat_map.hash_function(), hasher(1)); + BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2)); + + BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); + BOOST_TEST_EQ(raii::destructor, 2 * values.size()); + BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + } + + check_raii_counts(); + + { + raii::reset_counts(); + + flat_map_type flat_map(values.begin(), values.end(), values.size(), + hasher(1), key_equal(2), allocator_type(3)); + + map_type map(0, hasher(2), key_equal(1), allocator_type(4)); + + BOOST_TEST(flat_map.get_allocator() != map.get_allocator()); + + map = std::move(flat_map); + + BOOST_TEST(flat_map.empty()); + BOOST_TEST_EQ(map.size(), reference_map.size()); + + test_fuzzy_matches_reference(map, reference_map, rg); + + BOOST_TEST_EQ(map.hash_function(), hasher(1)); + BOOST_TEST_EQ(map.key_eq(), key_equal(2)); + + BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); + BOOST_TEST_EQ( + raii::destructor, 2 * values.size() + 2 * reference_map.size()); + BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + } + + check_raii_counts(); + + { + raii::reset_counts(); + + map_type map(values.begin(), values.end(), values.size(), hasher(1), + key_equal(2), allocator_type(3)); + + flat_map_type flat_map(0, hasher(2), key_equal(1), allocator_type(4)); + + BOOST_TEST(flat_map.get_allocator() != map.get_allocator()); + + flat_map = std::move(map); + + BOOST_TEST(map.empty()); + BOOST_TEST_EQ(flat_map.size(), reference_map.size()); + + BOOST_TEST_EQ(flat_map.hash_function(), hasher(1)); + BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2)); + + BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); + BOOST_TEST_EQ( + raii::destructor, 2 * values.size() + 2 * reference_map.size()); + BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + } + + check_raii_counts(); + } + } // namespace // clang-format off @@ -860,6 +993,11 @@ UNORDERED_TEST( insert_and_assign, ((init_type_generator)) ((default_generator)(sequential)(limited_range))) + +UNORDERED_TEST( + flat_map_move_assign, + ((init_type_generator)) + ((default_generator)(sequential)(limited_range))) // clang-format on RUN_TESTS() diff --git a/test/cfoa/constructor_tests.cpp b/test/cfoa/constructor_tests.cpp index f6e0e069..595cc281 100644 --- a/test/cfoa/constructor_tests.cpp +++ b/test/cfoa/constructor_tests.cpp @@ -775,6 +775,109 @@ namespace { check_raii_counts(); } + template void flat_map_constructor(G gen, test::random_generator rg) + { + auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); + auto reference_map = + boost::unordered_flat_map( + values.begin(), values.end(), values.size()); + + raii::reset_counts(); + + { + boost::unordered_flat_map + flat_map(values.begin(), values.end(), reference_map.size(), hasher(1), + key_equal(2), allocator_type(3)); + + auto const old_dc = +raii::default_constructor; + auto const old_mc = +raii::move_constructor; + auto const old_cc = +raii::copy_constructor; + + BOOST_TEST_EQ(old_dc, 0u); + BOOST_TEST_GT(old_mc, 0u); + BOOST_TEST_GT(old_cc, 0u); + + map_type x(std::move(flat_map)); + + test_fuzzy_matches_reference(x, reference_map, rg); + + BOOST_TEST_EQ(+raii::default_constructor, old_dc); + BOOST_TEST_EQ(+raii::move_constructor, old_mc); + BOOST_TEST_EQ(+raii::copy_constructor, old_cc); + + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + + BOOST_TEST(flat_map.empty()); + } + + check_raii_counts(); + + { + boost::unordered_flat_map + flat_map(0, hasher(1), key_equal(2), allocator_type(3)); + + map_type x(std::move(flat_map)); + + BOOST_TEST(x.empty()); + + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + + BOOST_TEST(flat_map.empty()); + } + + check_raii_counts(); + + { + map_type flat_map(values.begin(), values.end(), reference_map.size(), + hasher(1), key_equal(2), allocator_type(3)); + + auto const old_dc = +raii::default_constructor; + auto const old_mc = +raii::move_constructor; + auto const old_cc = +raii::copy_constructor; + + BOOST_TEST_EQ(old_dc, 0u); + BOOST_TEST_GT(old_mc, 0u); + BOOST_TEST_GT(old_cc, 0u); + + boost::unordered_flat_map + x(std::move(flat_map)); + + BOOST_TEST(x == reference_map); + + BOOST_TEST_EQ(+raii::default_constructor, old_dc); + BOOST_TEST_EQ(+raii::move_constructor, old_mc); + BOOST_TEST_EQ(+raii::copy_constructor, old_cc); + + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + + BOOST_TEST(flat_map.empty()); + } + + check_raii_counts(); + + { + map_type flat_map(0, hasher(1), key_equal(2), allocator_type(3)); + + boost::unordered_flat_map + x(std::move(flat_map)); + + BOOST_TEST(x.empty()); + + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + + BOOST_TEST(flat_map.empty()); + } + + check_raii_counts(); + } } // namespace // clang-format off @@ -818,6 +921,11 @@ UNORDERED_TEST( ((value_type_generator)) ((default_generator)(sequential)(limited_range))) +UNORDERED_TEST( + flat_map_constructor, + ((value_type_generator)) + ((default_generator)(sequential)(limited_range))) + // clang-format on RUN_TESTS()