Merge pull request #199 from boostorg/feature/visit_until

Feature/visit until
This commit is contained in:
Christian Mazakas
2023-08-11 12:26:13 -07:00
committed by GitHub
6 changed files with 466 additions and 17 deletions

View File

@@ -6,6 +6,11 @@
:github-pr-url: https://github.com/boostorg/unordered/pull :github-pr-url: https://github.com/boostorg/unordered/pull
:cpp: C++ :cpp: C++
== Release 1.84.0
* Added `[c]visit_while` operations to `boost::concurrent_map`,
with serial and parallel variants.
== Release 1.83.0 - Major update == Release 1.83.0 - Major update
* Added `boost::concurrent_flat_map`, a fast, thread-safe hashmap based on open addressing. * Added `boost::concurrent_flat_map`, a fast, thread-safe hashmap based on open addressing.

View File

@@ -154,7 +154,28 @@ m.visit_all(std::execution::par, [](auto& x) { // run in parallel
}); });
---- ----
There is another whole-table visitation operation, `erase_if`: Traversal can be interrupted midway:
[source,c++]
----
// finds the key to a given (unique) value
int key = 0;
int value = ...;
bool found = !m.visit_while([&](const auto& x) {
if(x.second == value) {
key = x.first;
return false; // finish
}
else {
return true; // keep on visiting
}
});
if(found) { ... }
----
There is one last whole-table visitation operation, `erase_if`:
[source,c++] [source,c++]
---- ----
@@ -163,8 +184,8 @@ m.erase_if([](auto& x) {
}); });
---- ----
`erase_if` can also be parallelized. Note that, in order to increase efficiency, `visit_while` and `erase_if` can also be parallelized. Note that, in order to increase efficiency,
these operations do not block the table during execution: this implies that elements whole-table visitation operations do not block the table during execution: this implies that elements
may be inserted, modified or erased by other threads during visitation. It is may be inserted, modified or erased by other threads during visitation. It is
advisable not to assume too much about the exact global state of a `boost::concurrent_flat_map` advisable not to assume too much about the exact global state of a `boost::concurrent_flat_map`
at any point in your program. at any point in your program.

View File

@@ -114,6 +114,16 @@ namespace boost {
template<class ExecutionPolicy, class F> template<class ExecutionPolicy, class F>
void xref:#concurrent_flat_map_parallel_cvisit_all[cvisit_all](ExecutionPolicy&& policy, F f) const; void xref:#concurrent_flat_map_parallel_cvisit_all[cvisit_all](ExecutionPolicy&& policy, F f) const;
template<class F> bool xref:#concurrent_flat_map_cvisit_while[visit_while](F f);
template<class F> bool xref:#concurrent_flat_map_cvisit_while[visit_while](F f) const;
template<class F> bool xref:#concurrent_flat_map_cvisit_while[cvisit_while](F f) const;
template<class ExecutionPolicy, class F>
bool xref:#concurrent_flat_map_parallel_cvisit_while[visit_while](ExecutionPolicy&& policy, F f);
template<class ExecutionPolicy, class F>
bool xref:#concurrent_flat_map_parallel_cvisit_while[visit_while](ExecutionPolicy&& policy, F f) const;
template<class ExecutionPolicy, class F>
bool xref:#concurrent_flat_map_parallel_cvisit_while[cvisit_while](ExecutionPolicy&& policy, F f) const;
// capacity // capacity
++[[nodiscard]]++ bool xref:#concurrent_flat_map_empty[empty]() const noexcept; ++[[nodiscard]]++ bool xref:#concurrent_flat_map_empty[empty]() const noexcept;
size_type xref:#concurrent_flat_map_size[size]() const noexcept; size_type xref:#concurrent_flat_map_size[size]() const noexcept;
@@ -720,6 +730,50 @@ Unsequenced execution policies are not allowed.
--- ---
==== [c]visit_while
```c++
template<class F> bool visit_while(F f);
template<class F> bool visit_while(F f) const;
template<class F> bool cvisit_while(F f) const;
```
Successively invokes `f` with references to each of the elements in the table until `f` returns `false`
or all the elements are visited.
Such references to the elements are const iff `*this` is const.
[horizontal]
Returns:;; `false` iff `f` ever returns `false`.
---
==== Parallel [c]visit_while
```c++
template<class ExecutionPolicy, class F> bool visit_while(ExecutionPolicy&& policy, F f);
template<class ExecutionPolicy, class F> bool visit_while(ExecutionPolicy&& policy, F f) const;
template<class ExecutionPolicy, class F> bool cvisit_while(ExecutionPolicy&& policy, F f) const;
```
Invokes `f` with references to each of the elements in the table until `f` returns `false`
or all the elements are visited.
Such references to the elements are const iff `*this` is const.
Execution is parallelized according to the semantics of the execution policy specified.
[horizontal]
Returns:;; `false` iff `f` ever returns `false`.
Throws:;; Depending on the exception handling mechanism of the execution policy used, may call `std::terminate` if an exception is thrown within `f`.
Notes:;; Only available in compilers supporting C++17 parallel algorithms. +
+
These overloads only participate in overload resolution if `std::is_execution_policy_v<std::remove_cvref_t<ExecutionPolicy>>` is `true`. +
+
Unsequenced execution policies are not allowed. +
+
Parallelization implies that execution does not necessary finish as soon as `f` returns `false`, and as a result
`f` may be invoked with further elements for which the return value is also `false`.
---
=== Size and Capacity === Size and Capacity
==== empty ==== empty

View File

@@ -355,6 +355,56 @@ namespace boost {
} }
#endif #endif
template <class F> bool visit_while(F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
return table_.visit_while(f);
}
template <class F> bool visit_while(F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit_while(f);
}
template <class F> bool cvisit_while(F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.cvisit_while(f);
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
bool>::type
visit_while(ExecPolicy&& p, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
return table_.visit_while(p, f);
}
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
bool>::type
visit_while(ExecPolicy&& p, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
return table_.visit_while(p, f);
}
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
bool>::type
cvisit_while(ExecPolicy&& p, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
return table_.cvisit_while(p, f);
}
#endif
/// Modifiers /// Modifiers
/// ///

View File

@@ -539,6 +539,46 @@ public:
} }
#endif #endif
template<typename F> bool visit_while(F&& f)
{
return visit_while_impl(group_exclusive{},std::forward<F>(f));
}
template<typename F> bool visit_while(F&& f)const
{
return visit_while_impl(group_shared{},std::forward<F>(f));
}
template<typename F> bool cvisit_while(F&& f)const
{
return visit_while(std::forward<F>(f));
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template<typename ExecutionPolicy,typename F>
bool visit_while(ExecutionPolicy&& policy,F&& f)
{
return visit_while_impl(
group_exclusive{},
std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
}
template<typename ExecutionPolicy,typename F>
bool visit_while(ExecutionPolicy&& policy,F&& f)const
{
return visit_while_impl(
group_shared{},
std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
}
template<typename ExecutionPolicy,typename F>
bool cvisit_while(ExecutionPolicy&& policy,F&& f)const
{
return visit_while(
std::forward<ExecutionPolicy>(policy),std::forward<F>(f));
}
#endif
bool empty()const noexcept{return size()==0;} bool empty()const noexcept{return size()==0;}
std::size_t size()const noexcept std::size_t size()const noexcept
@@ -970,6 +1010,29 @@ private:
} }
#endif #endif
template<typename GroupAccessMode,typename F>
bool visit_while_impl(GroupAccessMode access_mode,F&& f)const
{
auto lck=shared_access();
return for_all_elements_while(access_mode,[&](element_type* p){
return f(cast_for(access_mode,type_policy::value_from(*p)));
});
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
bool visit_while_impl(
GroupAccessMode access_mode,ExecutionPolicy&& policy,F&& f)const
{
auto lck=shared_access();
return for_all_elements_while(
access_mode,std::forward<ExecutionPolicy>(policy),
[&](element_type* p){
return f(cast_for(access_mode,type_policy::value_from(*p)));
});
}
#endif
template<typename GroupAccessMode,typename Key,typename F> template<typename GroupAccessMode,typename Key,typename F>
BOOST_FORCEINLINE std::size_t unprotected_visit( BOOST_FORCEINLINE std::size_t unprotected_visit(
GroupAccessMode access_mode, GroupAccessMode access_mode,
@@ -1253,20 +1316,39 @@ private:
template<typename GroupAccessMode,typename F> template<typename GroupAccessMode,typename F>
auto for_all_elements(GroupAccessMode access_mode,F f)const auto for_all_elements(GroupAccessMode access_mode,F f)const
->decltype(f(nullptr,0,nullptr),void()) ->decltype(f(nullptr,0,nullptr),void())
{
for_all_elements_while(
access_mode,[&](group_type* pg,unsigned int n,element_type* p)
{f(pg,n,p);return true;});
}
template<typename GroupAccessMode,typename F>
auto for_all_elements_while(GroupAccessMode access_mode,F f)const
->decltype(f(nullptr),bool())
{
return for_all_elements_while(
access_mode,[&](group_type*,unsigned int,element_type* p){return f(p);});
}
template<typename GroupAccessMode,typename F>
auto for_all_elements_while(GroupAccessMode access_mode,F f)const
->decltype(f(nullptr,0,nullptr),bool())
{ {
auto p=this->arrays.elements; auto p=this->arrays.elements;
if(!p)return; if(p){
for(auto pg=this->arrays.groups,last=pg+this->arrays.groups_size_mask+1; for(auto pg=this->arrays.groups,last=pg+this->arrays.groups_size_mask+1;
pg!=last;++pg,p+=N){ pg!=last;++pg,p+=N){
auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups)); auto lck=access(access_mode,(std::size_t)(pg-this->arrays.groups));
auto mask=this->match_really_occupied(pg,last); auto mask=this->match_really_occupied(pg,last);
while(mask){ while(mask){
auto n=unchecked_countr_zero(mask); auto n=unchecked_countr_zero(mask);
f(pg,n,p+n); if(!f(pg,n,p+n))return false;
mask&=mask-1; mask&=mask-1;
} }
} }
} }
return true;
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template<typename GroupAccessMode,typename ExecutionPolicy,typename F> template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
@@ -1289,7 +1371,7 @@ private:
last=first+this->arrays.groups_size_mask+1; last=first+this->arrays.groups_size_mask+1;
std::for_each(std::forward<ExecutionPolicy>(policy),first,last, std::for_each(std::forward<ExecutionPolicy>(policy),first,last,
[&,this](group_type& g){ [&,this](group_type& g){
std::size_t pos=static_cast<std::size_t>(&g-first); auto pos=static_cast<std::size_t>(&g-first);
auto p=this->arrays.elements+pos*N; auto p=this->arrays.elements+pos*N;
auto lck=access(access_mode,pos); auto lck=access(access_mode,pos);
auto mask=this->match_really_occupied(&g,last); auto mask=this->match_really_occupied(&g,last);
@@ -1301,6 +1383,29 @@ private:
} }
); );
} }
template<typename GroupAccessMode,typename ExecutionPolicy,typename F>
bool for_all_elements_while(
GroupAccessMode access_mode,ExecutionPolicy&& policy,F f)const
{
if(!this->arrays.elements)return true;
auto first=this->arrays.groups,
last=first+this->arrays.groups_size_mask+1;
return std::all_of(std::forward<ExecutionPolicy>(policy),first,last,
[&,this](group_type& g){
auto pos=static_cast<std::size_t>(&g-first);
auto p=this->arrays.elements+pos*N;
auto lck=access(access_mode,pos);
auto mask=this->match_really_occupied(&g,last);
while(mask){
auto n=unchecked_countr_zero(mask);
if(!f(p+n))return false;
mask&=mask-1;
}
return true;
}
);
}
#endif #endif
static std::atomic<std::size_t> thread_counter; static std::atomic<std::size_t> thread_counter;

View File

@@ -349,6 +349,106 @@ namespace {
} visit_all; } visit_all;
struct visit_while_type
{
template <class T, class X, class M>
void operator()(std::vector<T>& values, X& x, M const& reference_map)
{
using value_type = typename X::value_type;
auto mut_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto const_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto mut_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
auto const_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
{
thread_runner(values, [&x, &mut_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(x.visit_while(mut_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST(y.visit_while(const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(x.cvisit_while(const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &mut_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(x.visit_while(mut_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST_NOT(y.visit_while(const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(x.cvisit_while(const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
}
} visit_while;
struct exec_policy_visit_all_type struct exec_policy_visit_all_type
{ {
template <class T, class X, class M> template <class T, class X, class M>
@@ -407,6 +507,120 @@ namespace {
} }
} exec_policy_visit_all; } exec_policy_visit_all;
struct exec_policy_visit_while_type
{
template <class T, class X, class M>
void operator()(std::vector<T>& values, X& x, M const& reference_map)
{
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
using value_type = typename X::value_type;
auto mut_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto const_truthy_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return true;
};
};
auto mut_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
auto const_falsey_visitor = [&reference_map](
std::atomic<uint64_t>& num_visits) {
return [&reference_map, &num_visits](value_type const& kv) {
BOOST_TEST(reference_map.contains(kv.first));
BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second);
++num_visits;
return (kv.second.x_ % 100) == 0;
};
};
{
thread_runner(values, [&x, &mut_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(
x.visit_while(std::execution::par, mut_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST(y.visit_while(
std::execution::par, const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &const_truthy_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST(x.cvisit_while(
std::execution::par, const_truthy_visitor(num_visits)));
BOOST_TEST_EQ(x.size(), num_visits);
});
}
{
thread_runner(values, [&x, &mut_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(
x.visit_while(std::execution::par, mut_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
auto const& y = x;
BOOST_TEST_NOT(y.visit_while(
std::execution::par, const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
{
thread_runner(values, [&x, &const_falsey_visitor](boost::span<T>) {
std::atomic<std::uint64_t> num_visits{0};
BOOST_TEST_NOT(x.cvisit_while(
std::execution::par, const_falsey_visitor(num_visits)));
BOOST_TEST_LT(num_visits, x.size());
BOOST_TEST_GT(num_visits, 0u);
});
}
#else
(void)values;
(void)x;
(void)reference_map;
#endif
}
} exec_policy_visit_while;
template <class X, class G, class F> template <class X, class G, class F>
void visit(X*, G gen, F visitor, test::random_generator rg) void visit(X*, G gen, F visitor, test::random_generator rg)
{ {
@@ -570,7 +784,7 @@ UNORDERED_TEST(
visit, visit,
((map)) ((map))
((value_type_generator)(init_type_generator)) ((value_type_generator)(init_type_generator))
((lvalue_visitor)(visit_all)(exec_policy_visit_all)) ((lvalue_visitor)(visit_all)(visit_while)(exec_policy_visit_all)(exec_policy_visit_while))
((default_generator)(sequential)(limited_range))) ((default_generator)(sequential)(limited_range)))
UNORDERED_TEST( UNORDERED_TEST(