Merge pull request #198 from boostorg/feature/concurrent_unordered_interop

Feature/concurrent unordered interop
This commit is contained in:
joaquintides
2023-08-12 09:48:08 +02:00
committed by GitHub
11 changed files with 432 additions and 28 deletions

View File

@ -10,6 +10,8 @@
* Added `[c]visit_while` operations to `boost::concurrent_map`,
with serial and parallel variants.
* Added efficient move construction of `boost::unordered_flat_map` from
`boost::concurrent_flat_map` and vice versa.
== Release 1.83.0 - Major update

View File

@ -201,3 +201,29 @@ and the user need not take any special precaution, but overall performance may b
Another blocking operation is _rehashing_, which happens explicitly via `rehash`/`reserve`
or during insertion when the table's load hits `max_load()`. As with non-concurrent containers,
reserving space in advance of bulk insertions will generally speed up the process.
== Interoperability with non-concurrent containers
As their internal data structure is basically the same, `boost::unordered_flat_map` can
be efficiently move-constructed from `boost::concurrent_flat_map` and vice versa.
This interoperability comes handy in multistage scenarios where parts of the data processing happen
in parallel whereas other steps are non-concurrent (or non-modifying). In the following example,
we want to construct a histogram from a huge input vector of words:
the population phase can be done in parallel with `boost::concurrent_flat_map` and results
then transferred to the final container.
[source,c++]
----
std::vector<std::string> words = ...;
// Insert words in parallel
boost::concurrent_flat_map<std::string_view, std::size_t> m0;
std::for_each(
std::execution::par, words.begin(), words.end(),
[&](const auto& word) {
m0.try_emplace_or_visit(word, 1, [](auto& x) { ++x.second; });
});
// Transfer to a regular unordered_flat_map
boost::unordered_flat_map m=std::move(m0);
----

View File

@ -69,6 +69,7 @@ namespace boost {
explicit xref:#concurrent_flat_map_allocator_constructor[concurrent_flat_map](const Allocator& a);
xref:#concurrent_flat_map_copy_constructor_with_allocator[concurrent_flat_map](const concurrent_flat_map& other, const Allocator& a);
xref:#concurrent_flat_map_move_constructor_with_allocator[concurrent_flat_map](concurrent_flat_map&& other, const Allocator& a);
xref:#concurrent_flat_map_move_constructor_from_unordered_flat_map[concurrent_flat_map](unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other);
xref:#concurrent_flat_map_initializer_list_constructor[concurrent_flat_map](std::initializer_list<value_type> il,
size_type n = _implementation-defined_
const hasher& hf = hasher(),
@ -501,6 +502,21 @@ Concurrency:;; Blocking on `other`.
---
==== Move Constructor from unordered_flat_map
```c++
concurrent_flat_map(unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other);
```
Move construction from a xref:#unordered_flat_map[`unordered_flat_map`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
[horizontal]
Complexity:;; O(`bucket_count()`)
---
==== Initializer List Constructor
[source,c++,subs="+quotes"]
----

View File

@ -77,6 +77,7 @@ namespace boost {
explicit xref:#unordered_flat_map_allocator_constructor[unordered_flat_map](const Allocator& a);
xref:#unordered_flat_map_copy_constructor_with_allocator[unordered_flat_map](const unordered_flat_map& other, const Allocator& a);
xref:#unordered_flat_map_move_constructor_with_allocator[unordered_flat_map](unordered_flat_map&& other, const Allocator& a);
xref:#unordered_flat_map_move_constructor_from_concurrent_flat_map[unordered_flat_map](concurrent_flat_map<Key, T, Hash, Pred, Allocator>&& other);
xref:#unordered_flat_map_initializer_list_constructor[unordered_flat_map](std::initializer_list<value_type> il,
size_type n = _implementation-defined_
const hasher& hf = hasher(),
@ -472,6 +473,22 @@ from `other`, and the allocator is copy-constructed from `a`.
---
==== Move Constructor from concurrent_flat_map
```c++
unordered_flat_map(concurrent_flat_map<Key, T, Hash, Pred, Allocator>&& other);
```
Move construction from a xref:#concurrent_flat_map[`concurrent_flat_map`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
[horizontal]
Complexity:;; Constant time.
Concurrency:;; Blocking on `other`.
---
==== Initializer List Constructor
[source,c++,subs="+quotes"]
----

View File

@ -15,6 +15,7 @@
#include <boost/unordered/detail/foa/concurrent_table.hpp>
#include <boost/unordered/detail/foa/flat_map_types.hpp>
#include <boost/unordered/detail/type_traits.hpp>
#include <boost/unordered/unordered_flat_map_fwd.hpp>
#include <boost/container_hash/hash.hpp>
#include <boost/core/allocator_access.hpp>
@ -84,6 +85,9 @@ namespace boost {
template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2>
friend class concurrent_flat_map;
template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2>
friend class unordered_flat_map;
using type_policy = detail::foa::flat_map_types<Key, T>;
@ -223,6 +227,13 @@ namespace boost {
{
}
concurrent_flat_map(
unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other)
: table_(std::move(other.table_))
{
}
~concurrent_flat_map() = default;
concurrent_flat_map& operator=(concurrent_flat_map const& rhs)

View File

@ -252,7 +252,21 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
template<typename Allocator>
static concurrent_table_arrays new_(Allocator& al,std::size_t n)
{
concurrent_table_arrays arrays{super::new_(al,n),nullptr};
super x{super::new_(al,n)};
BOOST_TRY{
return new_group_access(al,x);
}
BOOST_CATCH(...){
super::delete_(al,x);
BOOST_RETHROW
}
BOOST_CATCH_END
}
template<typename Allocator>
static concurrent_table_arrays new_group_access(Allocator& al,const super& x)
{
concurrent_table_arrays arrays{x,nullptr};
if(!arrays.elements){
arrays.group_accesses=dummy_group_accesses<SizePolicy::min_size()>();
}
@ -261,26 +275,26 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
typename boost::allocator_rebind<Allocator,group_access>::type;
using access_traits=boost::allocator_traits<access_alloc>;
BOOST_TRY{
auto aal=access_alloc(al);
arrays.group_accesses=boost::to_address(
access_traits::allocate(aal,arrays.groups_size_mask+1));
auto aal=access_alloc(al);
arrays.group_accesses=boost::to_address(
access_traits::allocate(aal,arrays.groups_size_mask+1));
for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
::new (arrays.group_accesses+i) group_access();
}
for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
::new (arrays.group_accesses+i) group_access();
}
BOOST_CATCH(...){
super::delete_(al,arrays);
BOOST_RETHROW
}
BOOST_CATCH_END
}
return arrays;
}
template<typename Allocator>
static void delete_(Allocator& al,concurrent_table_arrays& arrays)noexcept
{
delete_group_access(al,arrays);
super::delete_(al,arrays);
}
template<typename Allocator>
static void delete_group_access(Allocator& al,concurrent_table_arrays& arrays)noexcept
{
if(arrays.elements){
using access_alloc=
@ -294,7 +308,6 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
aal,pointer_traits::pointer_to(*arrays.group_accesses),
arrays.groups_size_mask+1);
}
super::delete_(al,arrays);
}
group_access *group_accesses;
@ -307,7 +320,7 @@ struct atomic_size_control
atomic_size_control(std::size_t ml_,std::size_t size_):
pad0_{},ml{ml_},pad1_{},size{size_}{}
atomic_size_control(atomic_size_control& x):
atomic_size_control(const atomic_size_control& x):
pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
/* padding to avoid false sharing internally and with sorrounding data */
@ -359,7 +372,7 @@ inline void swap(atomic_size_control& x,atomic_size_control& y)
* - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based
* on C++17 stdlib parallel algorithms.
*
* Consult boost::unordered_flat_map docs for the full API reference.
* Consult boost::concurrent_flat_map docs for the full API reference.
* Heterogeneous lookup is suported by default, that is, without checking for
* any ::is_transparent typedefs --this checking is done by the wrapping
* containers.
@ -391,6 +404,9 @@ inline void swap(atomic_size_control& x,atomic_size_control& y)
* over.
*/
template<typename,typename,typename,typename>
class table; /* concurrent/non-concurrent interop */
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using concurrent_table_core_impl=table_core<
TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
@ -412,10 +428,10 @@ class concurrent_table:
using group_type=typename super::group_type;
using super::N;
using prober=typename super::prober;
template<
typename TypePolicy2,typename Hash2,typename Pred2,typename Allocator2>
friend class concurrent_table;
using arrays_type=typename super::arrays_type;
using size_ctrl_type=typename super::size_ctrl_type;
using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
friend compatible_nonconcurrent_table;
public:
using key_type=typename super::key_type;
@ -450,6 +466,21 @@ public:
concurrent_table(x,al_,x.exclusive_access()){}
concurrent_table(concurrent_table&& x,const Allocator& al_):
concurrent_table(std::move(x),al_,x.exclusive_access()){}
concurrent_table(compatible_nonconcurrent_table&& x):
super{
std::move(x.h()),std::move(x.pred()),std::move(x.al()),
arrays_type(arrays_type::new_group_access(
x.al(),
typename arrays_type::super{
x.arrays.groups_size_index,x.arrays.groups_size_mask,
reinterpret_cast<group_type*>(x.arrays.groups),
reinterpret_cast<value_type*>(x.arrays.elements)})),
size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
{
x.empty_initialize();
}
~concurrent_table()=default;
concurrent_table& operator=(const concurrent_table& x)
@ -875,6 +906,8 @@ public:
}
private:
template<typename,typename,typename,typename> friend class concurrent_table;
using mutex_type=rw_spinlock;
using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine
using shared_lock_guard=shared_lock<mutex_type>;

View File

@ -1282,6 +1282,17 @@ public:
size_ctrl{initial_max_load(),0}
{}
/* bare transfer ctor for concurrent/non-concurrent interop */
table_core(
Hash&& h_,Pred&& pred_,Allocator&& al_,
const arrays_type& arrays_,const size_ctrl_type& size_ctrl_):
hash_base{empty_init,std::move(h_)},
pred_base{empty_init,std::move(pred_)},
allocator_base{empty_init,std::move(al_)},
arrays(arrays_),size_ctrl(size_ctrl_)
{}
table_core(const table_core& x):
table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{}
@ -1290,14 +1301,11 @@ public:
std::is_nothrow_move_constructible<Hash>::value&&
std::is_nothrow_move_constructible<Pred>::value&&
std::is_nothrow_move_constructible<Allocator>::value):
hash_base{empty_init,std::move(x.h())},
pred_base{empty_init,std::move(x.pred())},
allocator_base{empty_init,std::move(x.al())},
arrays(x.arrays),size_ctrl(x.size_ctrl)
table_core{
std::move(x.h()),std::move(x.pred()),std::move(x.al()),
x.arrays,x.size_ctrl}
{
x.arrays=x.new_arrays(0);
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
x.empty_initialize();
}
table_core(const table_core& x,const Allocator& al_):
@ -1336,6 +1344,13 @@ public:
delete_arrays(arrays);
}
void empty_initialize()noexcept
{
arrays=new_arrays(0);
size_ctrl.ml=initial_max_load();
size_ctrl.size=0;
}
table_core& operator=(const table_core& x)
{
BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
@ -1804,7 +1819,8 @@ private:
pred_base{empty_init,std::move(pred_)},
allocator_base{empty_init,al_},arrays(new_arrays(0)),
size_ctrl{initial_max_load(),0}
{}
{
}
arrays_type new_arrays(std::size_t n)
{

View File

@ -264,6 +264,9 @@ private:
* checking is done by boost::unordered_(flat|node)_(map|set).
*/
template<typename,typename,typename,typename>
class concurrent_table; /* concurrent/non-concurrent interop */
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using table_core_impl=
table_core<TypePolicy,group15<plain_integral>,table_arrays,
@ -284,7 +287,12 @@ class table:table_core_impl<TypePolicy,Hash,Pred,Allocator>
using group_type=typename super::group_type;
using super::N;
using prober=typename super::prober;
using arrays_type=typename super::arrays_type;
using size_ctrl_type=typename super::size_ctrl_type;
using locator=typename super::locator;
using compatible_concurrent_table=
concurrent_table<TypePolicy,Hash,Pred,Allocator>;
friend compatible_concurrent_table;
public:
using key_type=typename super::key_type;
@ -323,6 +331,8 @@ public:
table(table&& x)=default;
table(const table& x,const Allocator& al_):super{x,al_}{}
table(table&& x,const Allocator& al_):super{std::move(x),al_}{}
table(compatible_concurrent_table&& x):
table(std::move(x),x.exclusive_access()){}
~table()=default;
table& operator=(const table& x)=default;
@ -496,6 +506,22 @@ public:
friend bool operator!=(const table& x,const table& y){return !(x==y);}
private:
template<typename ExclusiveLockGuard>
table(compatible_concurrent_table&& x,ExclusiveLockGuard):
super{
std::move(x.h()),std::move(x.pred()),std::move(x.al()),
arrays_type{
x.arrays.groups_size_index,x.arrays.groups_size_mask,
reinterpret_cast<group_type*>(x.arrays.groups),
reinterpret_cast<value_type*>(x.arrays.elements)},
size_ctrl_type{
x.size_ctrl.ml,x.size_ctrl.size}}
{
compatible_concurrent_table::arrays_type::delete_group_access(
this->al(),x.arrays);
x.empty_initialize();
}
struct erase_on_exit
{
erase_on_exit(table& x_,const_iterator it_):x{x_},it{it_}{}

View File

@ -10,6 +10,7 @@
#pragma once
#endif
#include <boost/unordered/concurrent_flat_map_fwd.hpp>
#include <boost/unordered/detail/foa/flat_map_types.hpp>
#include <boost/unordered/detail/foa/table.hpp>
#include <boost/unordered/detail/type_traits.hpp>
@ -36,6 +37,10 @@ namespace boost {
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
class unordered_flat_map
{
template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2>
friend class concurrent_flat_map;
using map_types = detail::foa::flat_map_types<Key, T>;
using table_type = detail::foa::table<map_types, Hash, KeyEqual,
@ -173,6 +178,12 @@ namespace boost {
{
}
unordered_flat_map(
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator>&& other)
: table_(std::move(other.table_))
{
}
~unordered_flat_map() = default;
unordered_flat_map& operator=(unordered_flat_map const& other)

View File

@ -33,6 +33,9 @@ using hasher = stateful_hash;
using key_equal = stateful_key_equal;
using allocator_type = stateful_allocator<std::pair<raii const, raii> >;
using flat_map_type = boost::unordered::unordered_flat_map<raii, raii, hasher,
key_equal, allocator_type>;
using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher,
key_equal, allocator_type>;
@ -843,6 +846,136 @@ namespace {
}
check_raii_counts();
}
template <class G> void flat_map_move_assign(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
/*
* basically test that a temporary container is materialized and we
* move-assign from that
*
* we don't need to be super rigorous here because we already have tests for
* container assignment, we're just testing that a temporary is materialized
*/
{
raii::reset_counts();
flat_map_type flat_map(values.begin(), values.end(), values.size(),
hasher(1), key_equal(2), allocator_type(3));
map_type map(0, hasher(2), key_equal(1), allocator_type(3));
BOOST_TEST(flat_map.get_allocator() == map.get_allocator());
map = std::move(flat_map);
BOOST_TEST(flat_map.empty());
BOOST_TEST_EQ(map.size(), reference_map.size());
test_fuzzy_matches_reference(map, reference_map, rg);
BOOST_TEST_EQ(map.hash_function(), hasher(1));
BOOST_TEST_EQ(map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(raii::destructor, 2 * values.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
{
raii::reset_counts();
map_type map(values.begin(), values.end(), values.size(), hasher(1),
key_equal(2), allocator_type(3));
flat_map_type flat_map(0, hasher(2), key_equal(1), allocator_type(3));
BOOST_TEST(flat_map.get_allocator() == map.get_allocator());
flat_map = std::move(map);
BOOST_TEST(map.empty());
BOOST_TEST_EQ(flat_map.size(), reference_map.size());
BOOST_TEST_EQ(flat_map.hash_function(), hasher(1));
BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(raii::destructor, 2 * values.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
{
raii::reset_counts();
flat_map_type flat_map(values.begin(), values.end(), values.size(),
hasher(1), key_equal(2), allocator_type(3));
map_type map(0, hasher(2), key_equal(1), allocator_type(4));
BOOST_TEST(flat_map.get_allocator() != map.get_allocator());
map = std::move(flat_map);
BOOST_TEST(flat_map.empty());
BOOST_TEST_EQ(map.size(), reference_map.size());
test_fuzzy_matches_reference(map, reference_map, rg);
BOOST_TEST_EQ(map.hash_function(), hasher(1));
BOOST_TEST_EQ(map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(
raii::destructor, 2 * values.size() + 2 * reference_map.size());
BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
{
raii::reset_counts();
map_type map(values.begin(), values.end(), values.size(), hasher(1),
key_equal(2), allocator_type(3));
flat_map_type flat_map(0, hasher(2), key_equal(1), allocator_type(4));
BOOST_TEST(flat_map.get_allocator() != map.get_allocator());
flat_map = std::move(map);
BOOST_TEST(map.empty());
BOOST_TEST_EQ(flat_map.size(), reference_map.size());
BOOST_TEST_EQ(flat_map.hash_function(), hasher(1));
BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(
raii::destructor, 2 * values.size() + 2 * reference_map.size());
BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
}
} // namespace
// clang-format off
@ -860,6 +993,11 @@ UNORDERED_TEST(
insert_and_assign,
((init_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
flat_map_move_assign,
((init_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on
RUN_TESTS()

View File

@ -775,6 +775,109 @@ namespace {
check_raii_counts();
}
template <class G> void flat_map_constructor(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>(
values.begin(), values.end(), values.size());
raii::reset_counts();
{
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
flat_map(values.begin(), values.end(), reference_map.size(), hasher(1),
key_equal(2), allocator_type(3));
auto const old_dc = +raii::default_constructor;
auto const old_mc = +raii::move_constructor;
auto const old_cc = +raii::copy_constructor;
BOOST_TEST_EQ(old_dc, 0u);
BOOST_TEST_GT(old_mc, 0u);
BOOST_TEST_GT(old_cc, 0u);
map_type x(std::move(flat_map));
test_fuzzy_matches_reference(x, reference_map, rg);
BOOST_TEST_EQ(+raii::default_constructor, old_dc);
BOOST_TEST_EQ(+raii::move_constructor, old_mc);
BOOST_TEST_EQ(+raii::copy_constructor, old_cc);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
{
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
flat_map(0, hasher(1), key_equal(2), allocator_type(3));
map_type x(std::move(flat_map));
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
{
map_type flat_map(values.begin(), values.end(), reference_map.size(),
hasher(1), key_equal(2), allocator_type(3));
auto const old_dc = +raii::default_constructor;
auto const old_mc = +raii::move_constructor;
auto const old_cc = +raii::copy_constructor;
BOOST_TEST_EQ(old_dc, 0u);
BOOST_TEST_GT(old_mc, 0u);
BOOST_TEST_GT(old_cc, 0u);
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
x(std::move(flat_map));
BOOST_TEST(x == reference_map);
BOOST_TEST_EQ(+raii::default_constructor, old_dc);
BOOST_TEST_EQ(+raii::move_constructor, old_mc);
BOOST_TEST_EQ(+raii::copy_constructor, old_cc);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
{
map_type flat_map(0, hasher(1), key_equal(2), allocator_type(3));
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
x(std::move(flat_map));
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
}
} // namespace
// clang-format off
@ -818,6 +921,11 @@ UNORDERED_TEST(
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
flat_map_constructor,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on
RUN_TESTS()