Merge branch 'develop' into feature/detect_reentrancy

This commit is contained in:
joaquintides
2023-08-12 09:54:22 +02:00
committed by GitHub
14 changed files with 897 additions and 45 deletions

View File

@ -8,6 +8,10 @@
== Release 1.84.0 == Release 1.84.0
* Added `[c]visit_while` operations to `boost::concurrent_map`,
with serial and parallel variants.
* Added efficient move construction of `boost::unordered_flat_map` from
`boost::concurrent_flat_map` and vice versa.
* Added debug mode mechanisms for detecting illegal reentrancies into * Added debug mode mechanisms for detecting illegal reentrancies into
a `boost::concurrent_flat_map` from user code. a `boost::concurrent_flat_map` from user code.

View File

@ -154,7 +154,28 @@ m.visit_all(std::execution::par, [](auto& x) { // run in parallel
}); });
---- ----
There is another whole-table visitation operation, `erase_if`: Traversal can be interrupted midway:
[source,c++]
----
// finds the key to a given (unique) value
int key = 0;
int value = ...;
bool found = !m.visit_while([&](const auto& x) {
if(x.second == value) {
key = x.first;
return false; // finish
}
else {
return true; // keep on visiting
}
});
if(found) { ... }
----
There is one last whole-table visitation operation, `erase_if`:
[source,c++] [source,c++]
---- ----
@ -163,8 +184,8 @@ m.erase_if([](auto& x) {
}); });
---- ----
`erase_if` can also be parallelized. Note that, in order to increase efficiency, `visit_while` and `erase_if` can also be parallelized. Note that, in order to increase efficiency,
these operations do not block the table during execution: this implies that elements whole-table visitation operations do not block the table during execution: this implies that elements
may be inserted, modified or erased by other threads during visitation. It is may be inserted, modified or erased by other threads during visitation. It is
advisable not to assume too much about the exact global state of a `boost::concurrent_flat_map` advisable not to assume too much about the exact global state of a `boost::concurrent_flat_map`
at any point in your program. at any point in your program.
@ -180,3 +201,29 @@ and the user need not take any special precaution, but overall performance may b
Another blocking operation is _rehashing_, which happens explicitly via `rehash`/`reserve` Another blocking operation is _rehashing_, which happens explicitly via `rehash`/`reserve`
or during insertion when the table's load hits `max_load()`. As with non-concurrent containers, or during insertion when the table's load hits `max_load()`. As with non-concurrent containers,
reserving space in advance of bulk insertions will generally speed up the process. reserving space in advance of bulk insertions will generally speed up the process.
== Interoperability with non-concurrent containers
As their internal data structure is basically the same, `boost::unordered_flat_map` can
be efficiently move-constructed from `boost::concurrent_flat_map` and vice versa.
This interoperability comes handy in multistage scenarios where parts of the data processing happen
in parallel whereas other steps are non-concurrent (or non-modifying). In the following example,
we want to construct a histogram from a huge input vector of words:
the population phase can be done in parallel with `boost::concurrent_flat_map` and results
then transferred to the final container.
[source,c++]
----
std::vector<std::string> words = ...;
// Insert words in parallel
boost::concurrent_flat_map<std::string_view, std::size_t> m0;
std::for_each(
std::execution::par, words.begin(), words.end(),
[&](const auto& word) {
m0.try_emplace_or_visit(word, 1, [](auto& x) { ++x.second; });
});
// Transfer to a regular unordered_flat_map
boost::unordered_flat_map m=std::move(m0);
----

View File

@ -69,6 +69,7 @@ namespace boost {
explicit xref:#concurrent_flat_map_allocator_constructor[concurrent_flat_map](const Allocator& a); explicit xref:#concurrent_flat_map_allocator_constructor[concurrent_flat_map](const Allocator& a);
xref:#concurrent_flat_map_copy_constructor_with_allocator[concurrent_flat_map](const concurrent_flat_map& other, const Allocator& a); xref:#concurrent_flat_map_copy_constructor_with_allocator[concurrent_flat_map](const concurrent_flat_map& other, const Allocator& a);
xref:#concurrent_flat_map_move_constructor_with_allocator[concurrent_flat_map](concurrent_flat_map&& other, const Allocator& a); xref:#concurrent_flat_map_move_constructor_with_allocator[concurrent_flat_map](concurrent_flat_map&& other, const Allocator& a);
xref:#concurrent_flat_map_move_constructor_from_unordered_flat_map[concurrent_flat_map](unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other);
xref:#concurrent_flat_map_initializer_list_constructor[concurrent_flat_map](std::initializer_list<value_type> il, xref:#concurrent_flat_map_initializer_list_constructor[concurrent_flat_map](std::initializer_list<value_type> il,
size_type n = _implementation-defined_ size_type n = _implementation-defined_
const hasher& hf = hasher(), const hasher& hf = hasher(),
@ -114,6 +115,16 @@ namespace boost {
template<class ExecutionPolicy, class F> template<class ExecutionPolicy, class F>
void xref:#concurrent_flat_map_parallel_cvisit_all[cvisit_all](ExecutionPolicy&& policy, F f) const; void xref:#concurrent_flat_map_parallel_cvisit_all[cvisit_all](ExecutionPolicy&& policy, F f) const;
template<class F> bool xref:#concurrent_flat_map_cvisit_while[visit_while](F f);
template<class F> bool xref:#concurrent_flat_map_cvisit_while[visit_while](F f) const;
template<class F> bool xref:#concurrent_flat_map_cvisit_while[cvisit_while](F f) const;
template<class ExecutionPolicy, class F>
bool xref:#concurrent_flat_map_parallel_cvisit_while[visit_while](ExecutionPolicy&& policy, F f);
template<class ExecutionPolicy, class F>
bool xref:#concurrent_flat_map_parallel_cvisit_while[visit_while](ExecutionPolicy&& policy, F f) const;
template<class ExecutionPolicy, class F>
bool xref:#concurrent_flat_map_parallel_cvisit_while[cvisit_while](ExecutionPolicy&& policy, F f) const;
// capacity // capacity
++[[nodiscard]]++ bool xref:#concurrent_flat_map_empty[empty]() const noexcept; ++[[nodiscard]]++ bool xref:#concurrent_flat_map_empty[empty]() const noexcept;
size_type xref:#concurrent_flat_map_size[size]() const noexcept; size_type xref:#concurrent_flat_map_size[size]() const noexcept;
@ -503,6 +514,21 @@ Concurrency:;; Blocking on `other`.
--- ---
==== Move Constructor from unordered_flat_map
```c++
concurrent_flat_map(unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other);
```
Move construction from a xref:#unordered_flat_map[`unordered_flat_map`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
[horizontal]
Complexity:;; O(`bucket_count()`)
---
==== Initializer List Constructor ==== Initializer List Constructor
[source,c++,subs="+quotes"] [source,c++,subs="+quotes"]
---- ----
@ -732,6 +758,50 @@ Unsequenced execution policies are not allowed.
--- ---
==== [c]visit_while
```c++
template<class F> bool visit_while(F f);
template<class F> bool visit_while(F f) const;
template<class F> bool cvisit_while(F f) const;
```
Successively invokes `f` with references to each of the elements in the table until `f` returns `false`
or all the elements are visited.
Such references to the elements are const iff `*this` is const.
[horizontal]
Returns:;; `false` iff `f` ever returns `false`.
---
==== Parallel [c]visit_while
```c++
template<class ExecutionPolicy, class F> bool visit_while(ExecutionPolicy&& policy, F f);
template<class ExecutionPolicy, class F> bool visit_while(ExecutionPolicy&& policy, F f) const;
template<class ExecutionPolicy, class F> bool cvisit_while(ExecutionPolicy&& policy, F f) const;
```
Invokes `f` with references to each of the elements in the table until `f` returns `false`
or all the elements are visited.
Such references to the elements are const iff `*this` is const.
Execution is parallelized according to the semantics of the execution policy specified.
[horizontal]
Returns:;; `false` iff `f` ever returns `false`.
Throws:;; Depending on the exception handling mechanism of the execution policy used, may call `std::terminate` if an exception is thrown within `f`.
Notes:;; Only available in compilers supporting C++17 parallel algorithms. +
+
These overloads only participate in overload resolution if `std::is_execution_policy_v<std::remove_cvref_t<ExecutionPolicy>>` is `true`. +
+
Unsequenced execution policies are not allowed. +
+
Parallelization implies that execution does not necessary finish as soon as `f` returns `false`, and as a result
`f` may be invoked with further elements for which the return value is also `false`.
---
=== Size and Capacity === Size and Capacity
==== empty ==== empty

View File

@ -77,6 +77,7 @@ namespace boost {
explicit xref:#unordered_flat_map_allocator_constructor[unordered_flat_map](const Allocator& a); explicit xref:#unordered_flat_map_allocator_constructor[unordered_flat_map](const Allocator& a);
xref:#unordered_flat_map_copy_constructor_with_allocator[unordered_flat_map](const unordered_flat_map& other, const Allocator& a); xref:#unordered_flat_map_copy_constructor_with_allocator[unordered_flat_map](const unordered_flat_map& other, const Allocator& a);
xref:#unordered_flat_map_move_constructor_with_allocator[unordered_flat_map](unordered_flat_map&& other, const Allocator& a); xref:#unordered_flat_map_move_constructor_with_allocator[unordered_flat_map](unordered_flat_map&& other, const Allocator& a);
xref:#unordered_flat_map_move_constructor_from_concurrent_flat_map[unordered_flat_map](concurrent_flat_map<Key, T, Hash, Pred, Allocator>&& other);
xref:#unordered_flat_map_initializer_list_constructor[unordered_flat_map](std::initializer_list<value_type> il, xref:#unordered_flat_map_initializer_list_constructor[unordered_flat_map](std::initializer_list<value_type> il,
size_type n = _implementation-defined_ size_type n = _implementation-defined_
const hasher& hf = hasher(), const hasher& hf = hasher(),
@ -472,6 +473,22 @@ from `other`, and the allocator is copy-constructed from `a`.
--- ---
==== Move Constructor from concurrent_flat_map
```c++
unordered_flat_map(concurrent_flat_map<Key, T, Hash, Pred, Allocator>&& other);
```
Move construction from a xref:#concurrent_flat_map[`concurrent_flat_map`].
The internal bucket array of `other` is transferred directly to the new container.
The hash function, predicate and allocator are moved-constructed from `other`.
[horizontal]
Complexity:;; Constant time.
Concurrency:;; Blocking on `other`.
---
==== Initializer List Constructor ==== Initializer List Constructor
[source,c++,subs="+quotes"] [source,c++,subs="+quotes"]
---- ----

View File

@ -15,6 +15,7 @@
#include <boost/unordered/detail/foa/concurrent_table.hpp> #include <boost/unordered/detail/foa/concurrent_table.hpp>
#include <boost/unordered/detail/foa/flat_map_types.hpp> #include <boost/unordered/detail/foa/flat_map_types.hpp>
#include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/detail/type_traits.hpp>
#include <boost/unordered/unordered_flat_map_fwd.hpp>
#include <boost/container_hash/hash.hpp> #include <boost/container_hash/hash.hpp>
#include <boost/core/allocator_access.hpp> #include <boost/core/allocator_access.hpp>
@ -84,6 +85,9 @@ namespace boost {
template <class Key2, class T2, class Hash2, class Pred2, template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2> class Allocator2>
friend class concurrent_flat_map; friend class concurrent_flat_map;
template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2>
friend class unordered_flat_map;
using type_policy = detail::foa::flat_map_types<Key, T>; using type_policy = detail::foa::flat_map_types<Key, T>;
@ -223,6 +227,13 @@ namespace boost {
{ {
} }
concurrent_flat_map(
unordered_flat_map<Key, T, Hash, Pred, Allocator>&& other)
: table_(std::move(other.table_))
{
}
~concurrent_flat_map() = default; ~concurrent_flat_map() = default;
concurrent_flat_map& operator=(concurrent_flat_map const& rhs) concurrent_flat_map& operator=(concurrent_flat_map const& rhs)
@ -355,6 +366,56 @@ namespace boost {
} }
#endif #endif
template <class F> bool visit_while(F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
return table_.visit_while(f);
}
template <class F> bool visit_while(F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit_while(f);
}
template <class F> bool cvisit_while(F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.cvisit_while(f);
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
bool>::type
visit_while(ExecPolicy&& p, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
return table_.visit_while(p, f);
}
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
bool>::type
visit_while(ExecPolicy&& p, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
return table_.visit_while(p, f);
}
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
bool>::type
cvisit_while(ExecPolicy&& p, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
return table_.cvisit_while(p, f);
}
#endif
/// Modifiers /// Modifiers
/// ///

View File

@ -253,7 +253,21 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
template<typename Allocator> template<typename Allocator>
static concurrent_table_arrays new_(Allocator& al,std::size_t n) static concurrent_table_arrays new_(Allocator& al,std::size_t n)
{ {
concurrent_table_arrays arrays{super::new_(al,n),nullptr}; super x{super::new_(al,n)};
BOOST_TRY{
return new_group_access(al,x);
}
BOOST_CATCH(...){
super::delete_(al,x);
BOOST_RETHROW
}
BOOST_CATCH_END
}
template<typename Allocator>
static concurrent_table_arrays new_group_access(Allocator& al,const super& x)
{
concurrent_table_arrays arrays{x,nullptr};
if(!arrays.elements){ if(!arrays.elements){
arrays.group_accesses=dummy_group_accesses<SizePolicy::min_size()>(); arrays.group_accesses=dummy_group_accesses<SizePolicy::min_size()>();
} }
@ -262,26 +276,26 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
typename boost::allocator_rebind<Allocator,group_access>::type; typename boost::allocator_rebind<Allocator,group_access>::type;
using access_traits=boost::allocator_traits<access_alloc>; using access_traits=boost::allocator_traits<access_alloc>;
BOOST_TRY{ auto aal=access_alloc(al);
auto aal=access_alloc(al); arrays.group_accesses=boost::to_address(
arrays.group_accesses=boost::to_address( access_traits::allocate(aal,arrays.groups_size_mask+1));
access_traits::allocate(aal,arrays.groups_size_mask+1));
for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){ for(std::size_t i=0;i<arrays.groups_size_mask+1;++i){
::new (arrays.group_accesses+i) group_access(); ::new (arrays.group_accesses+i) group_access();
}
} }
BOOST_CATCH(...){
super::delete_(al,arrays);
BOOST_RETHROW
}
BOOST_CATCH_END
} }
return arrays; return arrays;
} }
template<typename Allocator> template<typename Allocator>
static void delete_(Allocator& al,concurrent_table_arrays& arrays)noexcept static void delete_(Allocator& al,concurrent_table_arrays& arrays)noexcept
{
delete_group_access(al,arrays);
super::delete_(al,arrays);
}
template<typename Allocator>
static void delete_group_access(Allocator& al,concurrent_table_arrays& arrays)noexcept
{ {
if(arrays.elements){ if(arrays.elements){
using access_alloc= using access_alloc=
@ -295,7 +309,6 @@ struct concurrent_table_arrays:table_arrays<Value,Group,SizePolicy>
aal,pointer_traits::pointer_to(*arrays.group_accesses), aal,pointer_traits::pointer_to(*arrays.group_accesses),
arrays.groups_size_mask+1); arrays.groups_size_mask+1);
} }
super::delete_(al,arrays);
} }
group_access *group_accesses; group_access *group_accesses;
@ -308,7 +321,7 @@ struct atomic_size_control
atomic_size_control(std::size_t ml_,std::size_t size_): atomic_size_control(std::size_t ml_,std::size_t size_):
pad0_{},ml{ml_},pad1_{},size{size_}{} pad0_{},ml{ml_},pad1_{},size{size_}{}
atomic_size_control(atomic_size_control& x): atomic_size_control(const atomic_size_control& x):
pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{} pad0_{},ml{x.ml.load()},pad1_{},size{x.size.load()}{}
/* padding to avoid false sharing internally and with sorrounding data */ /* padding to avoid false sharing internally and with sorrounding data */
@ -360,7 +373,7 @@ inline void swap(atomic_size_control& x,atomic_size_control& y)
* - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based * - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based
* on C++17 stdlib parallel algorithms. * on C++17 stdlib parallel algorithms.
* *
* Consult boost::unordered_flat_map docs for the full API reference. * Consult boost::concurrent_flat_map docs for the full API reference.
* Heterogeneous lookup is suported by default, that is, without checking for * Heterogeneous lookup is suported by default, that is, without checking for
* any ::is_transparent typedefs --this checking is done by the wrapping * any ::is_transparent typedefs --this checking is done by the wrapping
* containers. * containers.
@ -392,6 +405,9 @@ inline void swap(atomic_size_control& x,atomic_size_control& y)
* over. * over.
*/ */
template<typename,typename,typename,typename>
class table; /* concurrent/non-concurrent interop */
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator> template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using concurrent_table_core_impl=table_core< using concurrent_table_core_impl=table_core<
TypePolicy,group15<atomic_integral>,concurrent_table_arrays, TypePolicy,group15<atomic_integral>,concurrent_table_arrays,
@ -413,10 +429,10 @@ class concurrent_table:
using group_type=typename super::group_type; using group_type=typename super::group_type;
using super::N; using super::N;
using prober=typename super::prober; using prober=typename super::prober;
using arrays_type=typename super::arrays_type;
template< using size_ctrl_type=typename super::size_ctrl_type;
typename TypePolicy2,typename Hash2,typename Pred2,typename Allocator2> using compatible_nonconcurrent_table=table<TypePolicy,Hash,Pred,Allocator>;
friend class concurrent_table; friend compatible_nonconcurrent_table;
public: public:
using key_type=typename super::key_type; using key_type=typename super::key_type;
@ -451,6 +467,21 @@ public:
concurrent_table(x,al_,x.exclusive_access()){} concurrent_table(x,al_,x.exclusive_access()){}
concurrent_table(concurrent_table&& x,const Allocator& al_): concurrent_table(concurrent_table&& x,const Allocator& al_):
concurrent_table(std::move(x),al_,x.exclusive_access()){} concurrent_table(std::move(x),al_,x.exclusive_access()){}
concurrent_table(compatible_nonconcurrent_table&& x):
super{
std::move(x.h()),std::move(x.pred()),std::move(x.al()),
arrays_type(arrays_type::new_group_access(
x.al(),
typename arrays_type::super{
x.arrays.groups_size_index,x.arrays.groups_size_mask,
reinterpret_cast<group_type*>(x.arrays.groups),
reinterpret_cast<value_type*>(x.arrays.elements)})),
size_ctrl_type{x.size_ctrl.ml,x.size_ctrl.size}}
{
x.empty_initialize();
}
~concurrent_table()=default; ~concurrent_table()=default;
concurrent_table& operator=(const concurrent_table& x) concurrent_table& operator=(const concurrent_table& x)
@ -540,6 +571,46 @@ public:
} }
#endif #endif
template<typename F> bool visit_while(F&& f)
{
return visit_while_impl(group_exclusive{},std::forward<F>(f));
}
template<typename F> bool visit_while(F&& f)const
{
return visit_while_impl(group_shared{},std::forward<F>(f));
}
template<typename F> bool cvisit_while(F&& f)const
{
return visit_while(std::forward<F>(f));
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template<typename ExecutionPolicy,typename F>
bool visit_while(ExecutionPolicy&& policy,F&& f)
{
return visit_while_impl(
group_exclusive{},
std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
}
template<typename ExecutionPolicy,typename F>
bool visit_while(ExecutionPolicy&& policy,F&& f)const
{
return visit_while_impl(
group_shared{},
std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
}
template<typename ExecutionPolicy,typename F>
bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
{
return visit_while(
std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
}
#endif
bool empty()const noexcept{return size()==0;} bool empty()const noexcept{return size()==0;}
std::size_t size()const noexcept std::size_t size()const noexcept
@ -836,6 +907,8 @@ public:
} }
private: private:
template<typename,typename,typename,typename> friend class concurrent_table;
using mutex_type=rw_spinlock; using mutex_type=rw_spinlock;
using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine using multimutex_type=multimutex<mutex_type,128>; // TODO: adapt 128 to the machine
using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>; using shared_lock_guard=reentrancy_checked<shared_lock<mutex_type>>;
@ -971,6 +1044,29 @@ private:
} }
#endif #endif
template<typename GroupAccessMode,typename F>
bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
{
auto lck=shared_access();
return for_all_elements_while(access_mode,[&](element_type* p){
return f(cast_for(access_mode,type_policy::value_from(*p)));
});
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
bool visit_while_impl(
GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
{
auto lck=shared_access();
return for_all_elements_while(
access_mode,std::forward<ExecutionPolicy>(policy),
[&](element_type* p){
return f(cast_for(access_mode,type_policy::value_from(*p)));
});
}
#endif
template<typename GroupAccessMode,typename Key,typename F> template<typename GroupAccessMode,typename Key,typename F>
BOOST_FORCEINLINE std::size_t unprotected_visit( BOOST_FORCEINLINE std::size_t unprotected_visit(
GroupAccessMode access_mode, GroupAccessMode access_mode,
@ -1254,19 +1350,38 @@ private:
template<typename GroupAccessMode,typename F> template<typename GroupAccessMode,typename F>
auto for_all_elements(GroupAccessMode access_mode,F f)const auto for_all_elements(GroupAccessMode access_mode,F f)const
->decltype(f(nullptr,0,nullptr),void()) ->decltype(f(nullptr,0,nullptr),void())
{
for_all_elements_while(
access_mode,[&](group_type* pg,unsigned int n,element_type* p)
{f(pg,n,p);return true;});
}
template<typename GroupAccessMode,typename F>
auto for_all_elements_while(GroupAccessMode access_mode,F f)const
->decltype(f(nullptr),bool())
{
return for_all_elements_while(
access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
}
template<typename GroupAccessMode,typename F>
auto for_all_elements_while(GroupAccessMode access_mode,F f)const
->decltype(f(nullptr,0,nullptr),bool())
{ {
auto p=this->arrays.elements; auto p=this->arrays.elements;
if(!p)return; if(p){
for(auto pg=this->arrays.groups,last=pg+this->arrays.groups_size_mask+1; for(auto pg=this->arrays.groups,last=pg+this->arrays.groups_size_mask+1;
pg!=last;++pg,p+=N){ pg!=last;++pg,p+=N){
auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups)); auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups));
auto mask=this->match_really_occupied(pg,last); auto mask=this->match_really_occupied(pg,last);
while(mask){ while(mask){
auto n=unchecked_countr_zero(mask); auto n=unchecked_countr_zero(mask);
f(pg,n,p+n); if(!f(pg,n,p+n))return false;
mask&=mask-1; mask&=mask-1;
}
} }
} }
return true;
} }
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
@ -1290,10 +1405,10 @@ private:
last=first+this->arrays.groups_size_mask+1; last=first+this->arrays.groups_size_mask+1;
std::for_each(std::forward<ExecutionPolicy>(policy),first,last, std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
[&,this](group_type& g){ [&,this](group_type& g){
std::size_t pos=static_cast<std::size_t>(&g-first); auto pos=static_cast<std::size_t>(&g-first);
auto p=this->arrays.elements+pos*N; auto p=this->arrays.elements+pos*N;
auto lck=access(access_mode,pos); auto lck=access(access_mode,pos);
auto mask=this->match_really_occupied(&g,last); auto mask=this->match_really_occupied(&g,last);
while(mask){ while(mask){
auto n=unchecked_countr_zero(mask); auto n=unchecked_countr_zero(mask);
f(&g,n,p+n); f(&g,n,p+n);
@ -1302,6 +1417,29 @@ private:
} }
); );
} }
template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
bool for_all_elements_while(
GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
{
if(!this->arrays.elements)return true;
auto first=this->arrays.groups,
last=first+this->arrays.groups_size_mask+1;
return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
[&,this](group_type& g){
auto pos=static_cast<std::size_t>(&g-first);
auto p=this->arrays.elements+pos*N;
auto lck=access(access_mode,pos);
auto mask=this->match_really_occupied(&g,last);
while(mask){
auto n=unchecked_countr_zero(mask);
if(!f(p+n))return false;
mask&=mask-1;
}
return true;
}
);
}
#endif #endif
static std::atomic<std::size_t> thread_counter; static std::atomic<std::size_t> thread_counter;

View File

@ -1282,6 +1282,17 @@ public:
size_ctrl{initial_max_load(),0} size_ctrl{initial_max_load(),0}
{} {}
/* bare transfer ctor for concurrent/non-concurrent interop */
table_core(
Hash&& h_,Pred&& pred_,Allocator&& al_,
const arrays_type& arrays_,const size_ctrl_type& size_ctrl_):
hash_base{empty_init,std::move(h_)},
pred_base{empty_init,std::move(pred_)},
allocator_base{empty_init,std::move(al_)},
arrays(arrays_),size_ctrl(size_ctrl_)
{}
table_core(const table_core& x): table_core(const table_core& x):
table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{} table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{}
@ -1290,14 +1301,11 @@ public:
std::is_nothrow_move_constructible<Hash>::value&& std::is_nothrow_move_constructible<Hash>::value&&
std::is_nothrow_move_constructible<Pred>::value&& std::is_nothrow_move_constructible<Pred>::value&&
std::is_nothrow_move_constructible<Allocator>::value): std::is_nothrow_move_constructible<Allocator>::value):
hash_base{empty_init,std::move(x.h())}, table_core{
pred_base{empty_init,std::move(x.pred())}, std::move(x.h()),std::move(x.pred()),std::move(x.al()),
allocator_base{empty_init,std::move(x.al())}, x.arrays,x.size_ctrl}
arrays(x.arrays),size_ctrl(x.size_ctrl)
{ {
x.arrays=x.new_arrays(0); x.empty_initialize();
x.size_ctrl.ml=x.initial_max_load();
x.size_ctrl.size=0;
} }
table_core(const table_core& x,const Allocator& al_): table_core(const table_core& x,const Allocator& al_):
@ -1336,6 +1344,13 @@ public:
delete_arrays(arrays); delete_arrays(arrays);
} }
void empty_initialize()noexcept
{
arrays=new_arrays(0);
size_ctrl.ml=initial_max_load();
size_ctrl.size=0;
}
table_core& operator=(const table_core& x) table_core& operator=(const table_core& x)
{ {
BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred)
@ -1804,7 +1819,8 @@ private:
pred_base{empty_init,std::move(pred_)}, pred_base{empty_init,std::move(pred_)},
allocator_base{empty_init,al_},arrays(new_arrays(0)), allocator_base{empty_init,al_},arrays(new_arrays(0)),
size_ctrl{initial_max_load(),0} size_ctrl{initial_max_load(),0}
{} {
}
arrays_type new_arrays(std::size_t n) arrays_type new_arrays(std::size_t n)
{ {

View File

@ -6,6 +6,7 @@
#define BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP #define BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP
#include <boost/core/allocator_access.hpp> #include <boost/core/allocator_access.hpp>
#include <boost/core/no_exceptions_support.hpp>
#include <boost/core/pointer_traits.hpp> #include <boost/core/pointer_traits.hpp>
namespace boost { namespace boost {

View File

@ -6,6 +6,7 @@
#define BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP #define BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP
#include <boost/core/allocator_access.hpp> #include <boost/core/allocator_access.hpp>
#include <boost/core/no_exceptions_support.hpp>
#include <boost/core/pointer_traits.hpp> #include <boost/core/pointer_traits.hpp>
namespace boost { namespace boost {

View File

@ -264,6 +264,9 @@ private:
* checking is done by boost::unordered_(flat|node)_(map|set). * checking is done by boost::unordered_(flat|node)_(map|set).
*/ */
template<typename,typename,typename,typename>
class concurrent_table; /* concurrent/non-concurrent interop */
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator> template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using table_core_impl= using table_core_impl=
table_core<TypePolicy,group15<plain_integral>,table_arrays, table_core<TypePolicy,group15<plain_integral>,table_arrays,
@ -284,7 +287,12 @@ class table:table_core_impl<TypePolicy,Hash,Pred,Allocator>
using group_type=typename super::group_type; using group_type=typename super::group_type;
using super::N; using super::N;
using prober=typename super::prober; using prober=typename super::prober;
using arrays_type=typename super::arrays_type;
using size_ctrl_type=typename super::size_ctrl_type;
using locator=typename super::locator; using locator=typename super::locator;
using compatible_concurrent_table=
concurrent_table<TypePolicy,Hash,Pred,Allocator>;
friend compatible_concurrent_table;
public: public:
using key_type=typename super::key_type; using key_type=typename super::key_type;
@ -323,6 +331,8 @@ public:
table(table&& x)=default; table(table&& x)=default;
table(const table& x,const Allocator& al_):super{x,al_}{} table(const table& x,const Allocator& al_):super{x,al_}{}
table(table&& x,const Allocator& al_):super{std::move(x),al_}{} table(table&& x,const Allocator& al_):super{std::move(x),al_}{}
table(compatible_concurrent_table&& x):
table(std::move(x),x.exclusive_access()){}
~table()=default; ~table()=default;
table& operator=(const table& x)=default; table& operator=(const table& x)=default;
@ -496,6 +506,22 @@ public:
friend bool operator!=(const table& x,const table& y){return !(x==y);} friend bool operator!=(const table& x,const table& y){return !(x==y);}
private: private:
template<typename ExclusiveLockGuard>
table(compatible_concurrent_table&& x,ExclusiveLockGuard):
super{
std::move(x.h()),std::move(x.pred()),std::move(x.al()),
arrays_type{
x.arrays.groups_size_index,x.arrays.groups_size_mask,
reinterpret_cast<group_type*>(x.arrays.groups),
reinterpret_cast<value_type*>(x.arrays.elements)},
size_ctrl_type{
x.size_ctrl.ml,x.size_ctrl.size}}
{
compatible_concurrent_table::arrays_type::delete_group_access(
this->al(),x.arrays);
x.empty_initialize();
}
struct erase_on_exit struct erase_on_exit
{ {
erase_on_exit(table& x_,const_iterator it_):x{x_},it{it_}{} erase_on_exit(table& x_,const_iterator it_):x{x_},it{it_}{}

View File

@ -10,6 +10,7 @@
#pragma once #pragma once
#endif #endif
#include <boost/unordered/concurrent_flat_map_fwd.hpp>
#include <boost/unordered/detail/foa/flat_map_types.hpp> #include <boost/unordered/detail/foa/flat_map_types.hpp>
#include <boost/unordered/detail/foa/table.hpp> #include <boost/unordered/detail/foa/table.hpp>
#include <boost/unordered/detail/type_traits.hpp> #include <boost/unordered/detail/type_traits.hpp>
@ -36,6 +37,10 @@ namespace boost {
template <class Key, class T, class Hash, class KeyEqual, class Allocator> template <class Key, class T, class Hash, class KeyEqual, class Allocator>
class unordered_flat_map class unordered_flat_map
{ {
template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2>
friend class concurrent_flat_map;
using map_types = detail::foa::flat_map_types<Key, T>; using map_types = detail::foa::flat_map_types<Key, T>;
using table_type = detail::foa::table<map_types, Hash, KeyEqual, using table_type = detail::foa::table<map_types, Hash, KeyEqual,
@ -173,6 +178,12 @@ namespace boost {
{ {
} }
unordered_flat_map(
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator>&& other)
: table_(std::move(other.table_))
{
}
~unordered_flat_map() = default; ~unordered_flat_map() = default;
unordered_flat_map& operator=(unordered_flat_map const& other) unordered_flat_map& operator=(unordered_flat_map const& other)

View File

@ -33,6 +33,9 @@ using hasher = stateful_hash;
using key_equal = stateful_key_equal; using key_equal = stateful_key_equal;
using allocator_type = stateful_allocator<std::pair<raii const, raii> >; using allocator_type = stateful_allocator<std::pair<raii const, raii> >;
using flat_map_type = boost::unordered::unordered_flat_map<raii, raii, hasher,
key_equal, allocator_type>;
using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher, using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher,
key_equal, allocator_type>; key_equal, allocator_type>;
@ -843,6 +846,136 @@ namespace {
} }
check_raii_counts(); check_raii_counts();
} }
template <class G> void flat_map_move_assign(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
/*
* basically test that a temporary container is materialized and we
* move-assign from that
*
* we don't need to be super rigorous here because we already have tests for
* container assignment, we're just testing that a temporary is materialized
*/
{
raii::reset_counts();
flat_map_type flat_map(values.begin(), values.end(), values.size(),
hasher(1), key_equal(2), allocator_type(3));
map_type map(0, hasher(2), key_equal(1), allocator_type(3));
BOOST_TEST(flat_map.get_allocator() == map.get_allocator());
map = std::move(flat_map);
BOOST_TEST(flat_map.empty());
BOOST_TEST_EQ(map.size(), reference_map.size());
test_fuzzy_matches_reference(map, reference_map, rg);
BOOST_TEST_EQ(map.hash_function(), hasher(1));
BOOST_TEST_EQ(map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(raii::destructor, 2 * values.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
{
raii::reset_counts();
map_type map(values.begin(), values.end(), values.size(), hasher(1),
key_equal(2), allocator_type(3));
flat_map_type flat_map(0, hasher(2), key_equal(1), allocator_type(3));
BOOST_TEST(flat_map.get_allocator() == map.get_allocator());
flat_map = std::move(map);
BOOST_TEST(map.empty());
BOOST_TEST_EQ(flat_map.size(), reference_map.size());
BOOST_TEST_EQ(flat_map.hash_function(), hasher(1));
BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(raii::destructor, 2 * values.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
{
raii::reset_counts();
flat_map_type flat_map(values.begin(), values.end(), values.size(),
hasher(1), key_equal(2), allocator_type(3));
map_type map(0, hasher(2), key_equal(1), allocator_type(4));
BOOST_TEST(flat_map.get_allocator() != map.get_allocator());
map = std::move(flat_map);
BOOST_TEST(flat_map.empty());
BOOST_TEST_EQ(map.size(), reference_map.size());
test_fuzzy_matches_reference(map, reference_map, rg);
BOOST_TEST_EQ(map.hash_function(), hasher(1));
BOOST_TEST_EQ(map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(
raii::destructor, 2 * values.size() + 2 * reference_map.size());
BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
{
raii::reset_counts();
map_type map(values.begin(), values.end(), values.size(), hasher(1),
key_equal(2), allocator_type(3));
flat_map_type flat_map(0, hasher(2), key_equal(1), allocator_type(4));
BOOST_TEST(flat_map.get_allocator() != map.get_allocator());
flat_map = std::move(map);
BOOST_TEST(map.empty());
BOOST_TEST_EQ(flat_map.size(), reference_map.size());
BOOST_TEST_EQ(flat_map.hash_function(), hasher(1));
BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2));
BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size());
BOOST_TEST_EQ(
raii::destructor, 2 * values.size() + 2 * reference_map.size());
BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
}
} // namespace } // namespace
// clang-format off // clang-format off
@ -860,6 +993,11 @@ UNORDERED_TEST(
insert_and_assign, insert_and_assign,
((init_type_generator)) ((init_type_generator))
((default_generator)(sequential)(limited_range))) ((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
flat_map_move_assign,
((init_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on // clang-format on
RUN_TESTS() RUN_TESTS()

View File

@ -775,6 +775,109 @@ namespace {
check_raii_counts(); check_raii_counts();
} }
template <class G> void flat_map_constructor(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>(
values.begin(), values.end(), values.size());
raii::reset_counts();
{
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
flat_map(values.begin(), values.end(), reference_map.size(), hasher(1),
key_equal(2), allocator_type(3));
auto const old_dc = +raii::default_constructor;
auto const old_mc = +raii::move_constructor;
auto const old_cc = +raii::copy_constructor;
BOOST_TEST_EQ(old_dc, 0u);
BOOST_TEST_GT(old_mc, 0u);
BOOST_TEST_GT(old_cc, 0u);
map_type x(std::move(flat_map));
test_fuzzy_matches_reference(x, reference_map, rg);
BOOST_TEST_EQ(+raii::default_constructor, old_dc);
BOOST_TEST_EQ(+raii::move_constructor, old_mc);
BOOST_TEST_EQ(+raii::copy_constructor, old_cc);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
{
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
flat_map(0, hasher(1), key_equal(2), allocator_type(3));
map_type x(std::move(flat_map));
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
{
map_type flat_map(values.begin(), values.end(), reference_map.size(),
hasher(1), key_equal(2), allocator_type(3));
auto const old_dc = +raii::default_constructor;
auto const old_mc = +raii::move_constructor;
auto const old_cc = +raii::copy_constructor;
BOOST_TEST_EQ(old_dc, 0u);
BOOST_TEST_GT(old_mc, 0u);
BOOST_TEST_GT(old_cc, 0u);
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
x(std::move(flat_map));
BOOST_TEST(x == reference_map);
BOOST_TEST_EQ(+raii::default_constructor, old_dc);
BOOST_TEST_EQ(+raii::move_constructor, old_mc);
BOOST_TEST_EQ(+raii::copy_constructor, old_cc);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
{
map_type flat_map(0, hasher(1), key_equal(2), allocator_type(3));
boost::unordered_flat_map<raii, raii, hasher, key_equal, allocator_type>
x(std::move(flat_map));
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST(flat_map.empty());
}
check_raii_counts();
}
} // namespace } // namespace
// clang-format off // clang-format off
@ -818,6 +921,11 @@ UNORDERED_TEST(
((value_type_generator)) ((value_type_generator))
((default_generator)(sequential)(limited_range))) ((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
flat_map_constructor,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on // clang-format on
RUN_TESTS() RUN_TESTS()

View File

@ -349,6 +349,106 @@ namespace {
} visit_all; } visit_all;
struct visit_while_type
{
template <class T, class X, class M>
void operator()(std::vector<T>& values, X& x, M const& reference_map)
{
using value_type = typename X::value_type;
auto mut_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto const_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto mut_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
auto const_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
{
thread_runner(values, [&x, &mut_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(x.visit_while(mut_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST(y.visit_while(const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(x.cvisit_while(const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &mut_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(x.visit_while(mut_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST_NOT(y.visit_while(const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(x.cvisit_while(const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
}
} visit_while;
struct exec_policy_visit_all_type struct exec_policy_visit_all_type
{ {
template <class T, class X, class M> template <class T, class X, class M>
@ -407,6 +507,120 @@ namespace {
} }
} exec_policy_visit_all; } exec_policy_visit_all;
struct exec_policy_visit_while_type
{
template <class T, class X, class M>
void operator()(std::vector<T>& values, X& x, M const& reference_map)
{
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
using value_type = typename X::value_type;
auto mut_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto const_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto mut_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
auto const_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
{
thread_runner(values, [&x, &mut_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(
x.visit_while(std::execution::par, mut_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST(y.visit_while(
std::execution::par, const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(x.cvisit_while(
std::execution::par, const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &mut_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(
x.visit_while(std::execution::par, mut_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST_NOT(y.visit_while(
std::execution::par, const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(x.cvisit_while(
std::execution::par, const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
#else
(void)values;
(void)x;
(void)reference_map;
#endif
}
} exec_policy_visit_while;
template <class X, class G, class F> template <class X, class G, class F>
void visit(X*, G gen, F visitor, test::random_generator rg) void visit(X*, G gen, F visitor, test::random_generator rg)
{ {
@ -570,7 +784,7 @@ UNORDERED_TEST(
visit, visit,
((map)) ((map))
((value_type_generator)(init_type_generator)) ((value_type_generator)(init_type_generator))
((lvalue_visitor)(visit_all)(exec_policy_visit_all)) ((lvalue_visitor)(visit_all)(visit_while)(exec_policy_visit_all)(exec_policy_visit_while))
((default_generator)(sequential)(limited_range))) ((default_generator)(sequential)(limited_range)))
UNORDERED_TEST( UNORDERED_TEST(