From 0673c5653cd7b5f669631daa51823b694896a12b Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sun, 10 Sep 2023 18:34:12 +0200 Subject: [PATCH 01/14] added boost::concurrent_flat_set --- .../boost/unordered/concurrent_flat_map.hpp | 67 +- .../unordered/concurrent_flat_map_fwd.hpp | 2 +- .../boost/unordered/concurrent_flat_set.hpp | 697 ++++++++++++++++++ .../unordered/concurrent_flat_set_fwd.hpp | 55 ++ .../detail/concurrent_static_asserts.hpp | 75 ++ .../boost/unordered/unordered_flat_set.hpp | 10 + 6 files changed, 842 insertions(+), 64 deletions(-) create mode 100644 include/boost/unordered/concurrent_flat_set.hpp create mode 100644 include/boost/unordered/concurrent_flat_set_fwd.hpp create mode 100644 include/boost/unordered/detail/concurrent_static_asserts.hpp diff --git a/include/boost/unordered/concurrent_flat_map.hpp b/include/boost/unordered/concurrent_flat_map.hpp index c95a3015..a06ec31f 100644 --- a/include/boost/unordered/concurrent_flat_map.hpp +++ b/include/boost/unordered/concurrent_flat_map.hpp @@ -1,4 +1,4 @@ -/* Fast open-addressing concurrent hash table. +/* Fast open-addressing concurrent hashmap. * * Copyright 2023 Christian Mazakas. * Distributed under the Boost Software License, Version 1.0. @@ -12,6 +12,7 @@ #define BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP #include +#include #include #include #include @@ -20,65 +21,12 @@ #include #include #include -#include -#include #include -#include #include -#include - -#define BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F) \ - static_assert(boost::unordered::detail::is_invocable::value, \ - "The provided Callable must be invocable with value_type&"); - -#define BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) \ - static_assert( \ - boost::unordered::detail::is_invocable::value, \ - "The provided Callable must be invocable with value_type const&"); - -#if BOOST_CXX_VERSION >= 202002L - -#define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \ - static_assert(!std::is_base_of::value, \ - "ExecPolicy must be sequenced."); \ - static_assert( \ - !std::is_base_of::value, \ - "ExecPolicy must be sequenced."); - -#else - -#define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \ - static_assert(!std::is_base_of::value, \ - "ExecPolicy must be sequenced."); -#endif - -#define BOOST_UNORDERED_COMMA , - -#define BOOST_UNORDERED_LAST_ARG(Arg, Args) \ - mp11::mp_back > - -#define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args) \ - BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(BOOST_UNORDERED_LAST_ARG(Arg, Args)) - -#define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args) \ - BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE( \ - BOOST_UNORDERED_LAST_ARG(Arg, Args)) namespace boost { namespace unordered { - namespace detail { - template - struct is_invocable - : std::is_constructible, - std::reference_wrapper::type> > - { - }; - - } // namespace detail - template class concurrent_flat_map { @@ -479,6 +427,7 @@ namespace boost { BOOST_FORCEINLINE auto insert_or_visit(Ty&& value, F f) -> decltype(table_.insert_or_visit(std::forward(value), f)) { + BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F) return table_.insert_or_visit(std::forward(value), f); } @@ -533,7 +482,7 @@ namespace boost { void insert_or_cvisit(std::initializer_list ilist, F f) { BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) - this->insert_or_visit(ilist.begin(), ilist.end(), f); + this->insert_or_cvisit(ilist.begin(), ilist.end(), f); } template BOOST_FORCEINLINE bool emplace(Args&&... args) @@ -882,12 +831,4 @@ namespace boost { using unordered::concurrent_flat_map; } // namespace boost -#undef BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE -#undef BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE -#undef BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY -#undef BOOST_UNORDERED_COMMA -#undef BOOST_UNORDERED_LAST_ARG -#undef BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE -#undef BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE - #endif // BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP diff --git a/include/boost/unordered/concurrent_flat_map_fwd.hpp b/include/boost/unordered/concurrent_flat_map_fwd.hpp index 3a39c7a7..8a1dca3b 100644 --- a/include/boost/unordered/concurrent_flat_map_fwd.hpp +++ b/include/boost/unordered/concurrent_flat_map_fwd.hpp @@ -1,4 +1,4 @@ -/* Fast open-addressing concurrent hash table. +/* Fast open-addressing concurrent hashmap. * * Copyright 2023 Christian Mazakas. * Distributed under the Boost Software License, Version 1.0. diff --git a/include/boost/unordered/concurrent_flat_set.hpp b/include/boost/unordered/concurrent_flat_set.hpp new file mode 100644 index 00000000..96ddd98b --- /dev/null +++ b/include/boost/unordered/concurrent_flat_set.hpp @@ -0,0 +1,697 @@ +/* Fast open-addressing concurrent hashset. + * + * Copyright 2023 Christian Mazakas. + * Copyright 2023 Joaquin M Lopez Munoz. + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * See https://www.boost.org/libs/unordered for library home page. + */ + +#ifndef BOOST_UNORDERED_CONCURRENT_FLAT_SET_HPP +#define BOOST_UNORDERED_CONCURRENT_FLAT_SET_HPP + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +namespace boost { + namespace unordered { + template + class concurrent_flat_set + { + private: + template + friend class concurrent_flat_set; + template + friend class unordered_flat_set; + + using type_policy = detail::foa::flat_set_types; + + detail::foa::concurrent_table table_; + + template + bool friend operator==(concurrent_flat_set const& lhs, + concurrent_flat_set const& rhs); + + template + friend typename concurrent_flat_set::size_type erase_if( + concurrent_flat_set& set, Predicate pred); + + template + friend void serialize( + Archive& ar, concurrent_flat_set& c, + unsigned int version); + + public: + using key_type = Key; + using value_type = typename type_policy::value_type; + using init_type = typename type_policy::init_type; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using hasher = typename boost::type_identity::type; + using key_equal = typename boost::type_identity::type; + using allocator_type = typename boost::type_identity::type; + using reference = value_type&; + using const_reference = value_type const&; + using pointer = typename boost::allocator_pointer::type; + using const_pointer = + typename boost::allocator_const_pointer::type; + + concurrent_flat_set() + : concurrent_flat_set(detail::foa::default_bucket_count) + { + } + + explicit concurrent_flat_set(size_type n, const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()) + : table_(n, hf, eql, a) + { + } + + template + concurrent_flat_set(InputIterator f, InputIterator l, + size_type n = detail::foa::default_bucket_count, + const hasher& hf = hasher(), const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()) + : table_(n, hf, eql, a) + { + this->insert(f, l); + } + + concurrent_flat_set(concurrent_flat_set const& rhs) + : table_(rhs.table_, + boost::allocator_select_on_container_copy_construction( + rhs.get_allocator())) + { + } + + concurrent_flat_set(concurrent_flat_set&& rhs) + : table_(std::move(rhs.table_)) + { + } + + template + concurrent_flat_set( + InputIterator f, InputIterator l, allocator_type const& a) + : concurrent_flat_set(f, l, 0, hasher(), key_equal(), a) + { + } + + explicit concurrent_flat_set(allocator_type const& a) + : table_(detail::foa::default_bucket_count, hasher(), key_equal(), a) + { + } + + concurrent_flat_set( + concurrent_flat_set const& rhs, allocator_type const& a) + : table_(rhs.table_, a) + { + } + + concurrent_flat_set(concurrent_flat_set&& rhs, allocator_type const& a) + : table_(std::move(rhs.table_), a) + { + } + + concurrent_flat_set(std::initializer_list il, + size_type n = detail::foa::default_bucket_count, + const hasher& hf = hasher(), const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()) + : concurrent_flat_set(n, hf, eql, a) + { + this->insert(il.begin(), il.end()); + } + + concurrent_flat_set(size_type n, const allocator_type& a) + : concurrent_flat_set(n, hasher(), key_equal(), a) + { + } + + concurrent_flat_set( + size_type n, const hasher& hf, const allocator_type& a) + : concurrent_flat_set(n, hf, key_equal(), a) + { + } + + template + concurrent_flat_set( + InputIterator f, InputIterator l, size_type n, const allocator_type& a) + : concurrent_flat_set(f, l, n, hasher(), key_equal(), a) + { + } + + template + concurrent_flat_set(InputIterator f, InputIterator l, size_type n, + const hasher& hf, const allocator_type& a) + : concurrent_flat_set(f, l, n, hf, key_equal(), a) + { + } + + concurrent_flat_set( + std::initializer_list il, const allocator_type& a) + : concurrent_flat_set( + il, detail::foa::default_bucket_count, hasher(), key_equal(), a) + { + } + + concurrent_flat_set(std::initializer_list il, size_type n, + const allocator_type& a) + : concurrent_flat_set(il, n, hasher(), key_equal(), a) + { + } + + concurrent_flat_set(std::initializer_list il, size_type n, + const hasher& hf, const allocator_type& a) + : concurrent_flat_set(il, n, hf, key_equal(), a) + { + } + + + concurrent_flat_set( + unordered_flat_set&& other) + : table_(std::move(other.table_)) + { + } + + ~concurrent_flat_set() = default; + + concurrent_flat_set& operator=(concurrent_flat_set const& rhs) + { + table_ = rhs.table_; + return *this; + } + + concurrent_flat_set& operator=(concurrent_flat_set&& rhs) + noexcept(boost::allocator_is_always_equal::type::value || + boost::allocator_propagate_on_container_move_assignment< + Allocator>::type::value) + { + table_ = std::move(rhs.table_); + return *this; + } + + concurrent_flat_set& operator=(std::initializer_list ilist) + { + table_ = ilist; + return *this; + } + + /// Capacity + /// + + size_type size() const noexcept { return table_.size(); } + size_type max_size() const noexcept { return table_.max_size(); } + + BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept + { + return size() == 0; + } + + template + BOOST_FORCEINLINE size_type visit(key_type const& k, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.visit(k, f); + } + + template + BOOST_FORCEINLINE size_type cvisit(key_type const& k, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.visit(k, f); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, size_type>::type + visit(K&& k, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.visit(std::forward(k), f); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, size_type>::type + cvisit(K&& k, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.visit(std::forward(k), f); + } + + template size_type visit_all(F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.visit_all(f); + } + + template size_type cvisit_all(F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.cvisit_all(f); + } + +#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) + template + typename std::enable_if::value, + void>::type + visit_all(ExecPolicy&& p, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) + table_.visit_all(p, f); + } + + template + typename std::enable_if::value, + void>::type + cvisit_all(ExecPolicy&& p, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) + table_.cvisit_all(p, f); + } +#endif + + template bool visit_while(F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.visit_while(f); + } + + template bool cvisit_while(F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.cvisit_while(f); + } + +#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) + template + typename std::enable_if::value, + bool>::type + visit_while(ExecPolicy&& p, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) + return table_.visit_while(p, f); + } + + template + typename std::enable_if::value, + bool>::type + cvisit_while(ExecPolicy&& p, F f) const + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) + return table_.cvisit_while(p, f); + } +#endif + + /// Modifiers + /// + + BOOST_FORCEINLINE bool insert(value_type const& obj) + { + return table_.insert(obj); + } + + BOOST_FORCEINLINE bool insert(value_type&& obj) + { + return table_.insert(std::move(obj)); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, + bool >::type + insert(K&& k) + { + return table_.try_emplace(std::forward(k)); + } + + template + void insert(InputIterator begin, InputIterator end) + { + for (auto pos = begin; pos != end; ++pos) { + table_.emplace(*pos); + } + } + + void insert(std::initializer_list ilist) + { + this->insert(ilist.begin(), ilist.end()); + } + + template + BOOST_FORCEINLINE bool insert_or_visit(value_type const& obj, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.insert_or_cvisit(obj, f); + } + + template + BOOST_FORCEINLINE bool insert_or_visit(value_type&& obj, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.insert_or_cvisit(std::move(obj), f); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, + bool >::type + insert_or_visit(K&& k, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.try_emplace_or_cvisit(std::forward(k), f); + } + + template + void insert_or_visit(InputIterator first, InputIterator last, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + for (; first != last; ++first) { + table_.emplace_or_cvisit(*first, f); + } + } + + template + void insert_or_visit(std::initializer_list ilist, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + this->insert_or_cvisit(ilist.begin(), ilist.end(), f); + } + + template + BOOST_FORCEINLINE bool insert_or_cvisit(value_type const& obj, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.insert_or_cvisit(obj, f); + } + + template + BOOST_FORCEINLINE bool insert_or_cvisit(value_type&& obj, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.insert_or_cvisit(std::move(obj), f); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, + bool >::type + insert_or_cvisit(K&& k, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + return table_.try_emplace_or_cvisit(std::forward(k), f); + } + + template + void insert_or_cvisit(InputIterator first, InputIterator last, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + for (; first != last; ++first) { + table_.emplace_or_cvisit(*first, f); + } + } + + template + void insert_or_cvisit(std::initializer_list ilist, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) + this->insert_or_cvisit(ilist.begin(), ilist.end(), f); + } + + template BOOST_FORCEINLINE bool emplace(Args&&... args) + { + return table_.emplace(std::forward(args)...); + } + + template + BOOST_FORCEINLINE bool emplace_or_visit(Arg&& arg, Args&&... args) + { + BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...) + return table_.emplace_or_cvisit( + std::forward(arg), std::forward(args)...); + } + + template + BOOST_FORCEINLINE bool emplace_or_cvisit(Arg&& arg, Args&&... args) + { + BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...) + return table_.emplace_or_cvisit( + std::forward(arg), std::forward(args)...); + } + + BOOST_FORCEINLINE size_type erase(key_type const& k) + { + return table_.erase(k); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, size_type>::type + erase(K&& k) + { + return table_.erase(std::forward(k)); + } + + template + BOOST_FORCEINLINE size_type erase_if(key_type const& k, F f) + { + return table_.erase_if(k, f); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value && + !detail::is_execution_policy::value, + size_type>::type + erase_if(K&& k, F f) + { + return table_.erase_if(std::forward(k), f); + } + +#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) + template + typename std::enable_if::value, + void>::type + erase_if(ExecPolicy&& p, F f) + { + BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy) + table_.erase_if(p, f); + } +#endif + + template size_type erase_if(F f) { return table_.erase_if(f); } + + void swap(concurrent_flat_set& other) noexcept( + boost::allocator_is_always_equal::type::value || + boost::allocator_propagate_on_container_swap::type::value) + { + return table_.swap(other.table_); + } + + void clear() noexcept { table_.clear(); } + + template + size_type merge(concurrent_flat_set& x) + { + BOOST_ASSERT(get_allocator() == x.get_allocator()); + return table_.merge(x.table_); + } + + template + size_type merge(concurrent_flat_set&& x) + { + return merge(x); + } + + BOOST_FORCEINLINE size_type count(key_type const& k) const + { + return table_.count(k); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, size_type>::type + count(K const& k) + { + return table_.count(k); + } + + BOOST_FORCEINLINE bool contains(key_type const& k) const + { + return table_.contains(k); + } + + template + BOOST_FORCEINLINE typename std::enable_if< + detail::are_transparent::value, bool>::type + contains(K const& k) const + { + return table_.contains(k); + } + + /// Hash Policy + /// + size_type bucket_count() const noexcept { return table_.capacity(); } + + float load_factor() const noexcept { return table_.load_factor(); } + float max_load_factor() const noexcept + { + return table_.max_load_factor(); + } + void max_load_factor(float) {} + size_type max_load() const noexcept { return table_.max_load(); } + + void rehash(size_type n) { table_.rehash(n); } + void reserve(size_type n) { table_.reserve(n); } + + /// Observers + /// + allocator_type get_allocator() const noexcept + { + return table_.get_allocator(); + } + + hasher hash_function() const { return table_.hash_function(); } + key_equal key_eq() const { return table_.key_eq(); } + }; + + template + bool operator==( + concurrent_flat_set const& lhs, + concurrent_flat_set const& rhs) + { + return lhs.table_ == rhs.table_; + } + + template + bool operator!=( + concurrent_flat_set const& lhs, + concurrent_flat_set const& rhs) + { + return !(lhs == rhs); + } + + template + void swap(concurrent_flat_set& x, + concurrent_flat_set& y) + noexcept(noexcept(x.swap(y))) + { + x.swap(y); + } + + template + typename concurrent_flat_set::size_type erase_if( + concurrent_flat_set& c, Predicate pred) + { + return c.table_.erase_if(pred); + } + + template + void serialize( + Archive& ar, concurrent_flat_set& c, unsigned int) + { + ar & core::make_nvp("table",c.table_); + } + +#if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES + + template ::value_type>, + class Pred = + std::equal_to::value_type>, + class Allocator = std::allocator< + typename std::iterator_traits::value_type>, + class = boost::enable_if_t >, + class = boost::enable_if_t >, + class = boost::enable_if_t >, + class = boost::enable_if_t > > + concurrent_flat_set(InputIterator, InputIterator, + std::size_t = boost::unordered::detail::foa::default_bucket_count, + Hash = Hash(), Pred = Pred(), Allocator = Allocator()) + -> concurrent_flat_set< + typename std::iterator_traits::value_type, Hash, Pred, + Allocator>; + + template , + class Pred = std::equal_to, class Allocator = std::allocator, + class = boost::enable_if_t >, + class = boost::enable_if_t >, + class = boost::enable_if_t > > + concurrent_flat_set(std::initializer_list, + std::size_t = boost::unordered::detail::foa::default_bucket_count, + Hash = Hash(), Pred = Pred(), Allocator = Allocator()) + -> concurrent_flat_set< T, Hash, Pred, Allocator>; + + template >, + class = boost::enable_if_t > > + concurrent_flat_set(InputIterator, InputIterator, std::size_t, Allocator) + -> concurrent_flat_set< + typename std::iterator_traits::value_type, + boost::hash::value_type>, + std::equal_to::value_type>, + Allocator>; + + template >, + class = boost::enable_if_t > > + concurrent_flat_set(InputIterator, InputIterator, Allocator) + -> concurrent_flat_set< + typename std::iterator_traits::value_type, + boost::hash::value_type>, + std::equal_to::value_type>, + Allocator>; + + template >, + class = boost::enable_if_t >, + class = boost::enable_if_t > > + concurrent_flat_set( + InputIterator, InputIterator, std::size_t, Hash, Allocator) + -> concurrent_flat_set< + typename std::iterator_traits::value_type, Hash, + std::equal_to::value_type>, + Allocator>; + + template > > + concurrent_flat_set(std::initializer_list, std::size_t, Allocator) + -> concurrent_flat_set,std::equal_to, Allocator>; + + template > > + concurrent_flat_set(std::initializer_list, Allocator) + -> concurrent_flat_set, std::equal_to, Allocator>; + + template >, + class = boost::enable_if_t > > + concurrent_flat_set(std::initializer_list, std::size_t,Hash, Allocator) + -> concurrent_flat_set, Allocator>; + +#endif + + } // namespace unordered + + using unordered::concurrent_flat_set; +} // namespace boost + +#endif // BOOST_UNORDERED_CONCURRENT_FLAT_SET_HPP diff --git a/include/boost/unordered/concurrent_flat_set_fwd.hpp b/include/boost/unordered/concurrent_flat_set_fwd.hpp new file mode 100644 index 00000000..d225a60b --- /dev/null +++ b/include/boost/unordered/concurrent_flat_set_fwd.hpp @@ -0,0 +1,55 @@ +/* Fast open-addressing concurrent hashset. + * + * Copyright 2023 Christian Mazakas. + * Copyright 2023 Joaquin M Lopez Munoz. + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * See https://www.boost.org/libs/unordered for library home page. + */ + +#ifndef BOOST_UNORDERED_CONCURRENT_FLAT_SET_FWD_HPP +#define BOOST_UNORDERED_CONCURRENT_FLAT_SET_FWD_HPP + +#include + +#include +#include + +namespace boost { + namespace unordered { + + template , + class Pred = std::equal_to, + class Allocator = std::allocator > + class concurrent_flat_set; + + template + bool operator==( + concurrent_flat_set const& lhs, + concurrent_flat_set const& rhs); + + template + bool operator!=( + concurrent_flat_set const& lhs, + concurrent_flat_set const& rhs); + + template + void swap(concurrent_flat_set& x, + concurrent_flat_set& y) + noexcept(noexcept(x.swap(y))); + + template + typename concurrent_flat_set::size_type erase_if( + concurrent_flat_set& c, Predicate pred); + + } // namespace unordered + + using boost::unordered::concurrent_flat_set; + using boost::unordered::swap; + using boost::unordered::operator==; + using boost::unordered::operator!=; +} // namespace boost + +#endif // BOOST_UNORDERED_CONCURRENT_FLAT_SET_FWD_HPP diff --git a/include/boost/unordered/detail/concurrent_static_asserts.hpp b/include/boost/unordered/detail/concurrent_static_asserts.hpp new file mode 100644 index 00000000..93717534 --- /dev/null +++ b/include/boost/unordered/detail/concurrent_static_asserts.hpp @@ -0,0 +1,75 @@ +/* Copyright 2023 Christian Mazakas. + * Copyright 2023 Joaquin M Lopez Munoz. + * Distributed under the Boost Software License, Version 1.0. + * (See accompanying file LICENSE_1_0.txt or copy at + * http://www.boost.org/LICENSE_1_0.txt) + * + * See https://www.boost.org/libs/unordered for library home page. + */ + +#ifndef BOOST_UNORDERED_DETAIL_CONCURRENT_STATIC_ASSERTS_HPP +#define BOOST_UNORDERED_DETAIL_CONCURRENT_STATIC_ASSERTS_HPP + +#include +#include + +#include +#include + +#define BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F) \ + static_assert(boost::unordered::detail::is_invocable::value, \ + "The provided Callable must be invocable with value_type&"); + +#define BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) \ + static_assert( \ + boost::unordered::detail::is_invocable::value, \ + "The provided Callable must be invocable with value_type const&"); + +#if BOOST_CXX_VERSION >= 202002L + +#define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \ + static_assert(!std::is_base_of::value, \ + "ExecPolicy must be sequenced."); \ + static_assert( \ + !std::is_base_of::value, \ + "ExecPolicy must be sequenced."); + +#else + +#define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \ + static_assert(!std::is_base_of::value, \ + "ExecPolicy must be sequenced."); +#endif + +#define BOOST_UNORDERED_DETAIL_COMMA , + +#define BOOST_UNORDERED_DETAIL_LAST_ARG(Arg, Args) \ + mp11::mp_back > + +#define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args) \ + BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE( \ + BOOST_UNORDERED_DETAIL_LAST_ARG(Arg, Args)) + +#define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args) \ + BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE( \ + BOOST_UNORDERED_DETAIL_LAST_ARG(Arg, Args)) + +namespace boost { + namespace unordered { + namespace detail { + template + struct is_invocable + : std::is_constructible, + std::reference_wrapper::type> > + { + }; + + } // namespace detail + + } // namespace unordered + +} // namespace boost + +#endif // BOOST_UNORDERED_DETAIL_CONCURRENT_STATIC_ASSERTS_HPP diff --git a/include/boost/unordered/unordered_flat_set.hpp b/include/boost/unordered/unordered_flat_set.hpp index 2676cafc..70286cbb 100644 --- a/include/boost/unordered/unordered_flat_set.hpp +++ b/include/boost/unordered/unordered_flat_set.hpp @@ -10,6 +10,7 @@ #pragma once #endif +#include #include #include #include @@ -35,6 +36,9 @@ namespace boost { template class unordered_flat_set { + template + friend class concurrent_flat_set; + using set_types = detail::foa::flat_set_types; using table_type = detail::foa::table&& other) + : table_(std::move(other.table_)) + { + } + ~unordered_flat_set() = default; unordered_flat_set& operator=(unordered_flat_set const& other) From 49f0929466176a18df278ec987ae2bfe76727483 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sun, 10 Sep 2023 18:35:26 +0200 Subject: [PATCH 02/14] documented boost::concurrent_flat_set --- doc/unordered/changes.adoc | 13 +- doc/unordered/compliance.adoc | 26 +- doc/unordered/concurrent.adoc | 40 +- doc/unordered/concurrent_flat_set.adoc | 1369 ++++++++++++++++++++++++ doc/unordered/intro.adoc | 9 +- doc/unordered/rationale.adoc | 4 +- doc/unordered/ref.adoc | 1 + doc/unordered/structures.adoc | 8 +- doc/unordered/unordered_flat_set.adoc | 18 +- 9 files changed, 1441 insertions(+), 47 deletions(-) create mode 100644 doc/unordered/concurrent_flat_set.adoc diff --git a/doc/unordered/changes.adoc b/doc/unordered/changes.adoc index 76a5f7fe..f8074c2c 100644 --- a/doc/unordered/changes.adoc +++ b/doc/unordered/changes.adoc @@ -6,14 +6,15 @@ :github-pr-url: https://github.com/boostorg/unordered/pull :cpp: C++ -== Release 1.84.0 +== Release 1.84.0 - Major update -* Added `[c]visit_while` operations to `boost::concurrent_map`, +* Added `boost::concurrent_flat_set`. +* Added `[c]visit_while` operations to concurrent containers, with serial and parallel variants. -* Added efficient move construction of `boost::unordered_flat_map` from -`boost::concurrent_flat_map` and vice versa. -* Added debug mode mechanisms for detecting illegal reentrancies into -a `boost::concurrent_flat_map` from user code. +* Added efficient move construction of `boost::unordered_flat_(map|set)` from +`boost::concurrent_flat_(map|set)` and vice versa. +* Added debug-mode mechanisms for detecting illegal reentrancies into +a concurrent container from user code. * Added Boost.Serialization support to all containers and their (non-local) iterator types. * Added support for fancy pointers to open-addressing and concurrent containers. This enables scenarios like the use of Boost.Interprocess allocators to construct containers in shared memory. diff --git a/doc/unordered/compliance.adoc b/doc/unordered/compliance.adoc index eb91ac4f..897c7dfb 100644 --- a/doc/unordered/compliance.adoc +++ b/doc/unordered/compliance.adoc @@ -148,14 +148,14 @@ The main differences with C++ unordered associative containers are: == Concurrent Containers -There is currently no specification in the C++ standard for this or any other concurrent -data structure. `boost::concurrent_flat_map` takes the same template parameters as `std::unordered_map` -and all the maps provided by Boost.Unordered, and its API is modelled after that of -`boost::unordered_flat_map` with the crucial difference that iterators are not provided +There is currently no specification in the C++ standard for this or any other type of concurrent +data structure. The APIs of `boost::concurrent_flat_set` and `boost::concurrent_flat_map` +are modelled after `std::unordered_flat_set` and `std::unordered_flat_map`, respectively, +with the crucial difference that iterators are not provided due to their inherent problems in concurrent scenarios (high contention, prone to deadlocking): -so, `boost::concurrent_flat_map` is technically not a +so, Boost.Unordered concurrent containers are technically not models of https://en.cppreference.com/w/cpp/named_req/Container[Container^], although -it meets all the requirements of https://en.cppreference.com/w/cpp/named_req/AllocatorAwareContainer[AllocatorAware^] +they meet all the requirements of https://en.cppreference.com/w/cpp/named_req/AllocatorAwareContainer[AllocatorAware^] containers except those implying iterators. In a non-concurrent unordered container, iterators serve two main purposes: @@ -163,7 +163,7 @@ In a non-concurrent unordered container, iterators serve two main purposes: * Access to an element previously located via lookup. * Container traversal. -In place of iterators, `boost::concurrent_flat_map` uses _internal visitation_ +In place of iterators, `boost::concurrent_flat_set` and `boost::concurrent_flat_map` use _internal visitation_ facilities as a thread-safe substitute. Classical operations returning an iterator to an element already existing in the container, like for instance: @@ -191,15 +191,15 @@ template size_t visit_all(F f); ---- of which there are parallelized versions in C++17 compilers with parallel -algorithm support. In general, the interface of `boost::concurrent_flat_map` -is derived from that of `boost::unordered_flat_map` by a fairly straightforward -process of replacing iterators with visitation where applicable. If -`iterator` and `const_iterator` provide mutable and const access to elements, +algorithm support. In general, the interface of concurrent containers +is derived from that of their non-concurrent counterparts by a fairly straightforward +process of replacing iterators with visitation where applicable. If for +regular maps `iterator` and `const_iterator` provide mutable and const access to elements, respectively, here visitation is granted mutable or const access depending on the constness of the member function used (there are also `*cvisit` overloads for -explicit const visitation). +explicit const visitation); In the case of `boost::concurrent_flat_set`, visitation is always const. -The one notable operation not provided is `operator[]`/`at`, which can be +One notable operation not provided by `boost::concurrent_flat_map` is `operator[]`/`at`, which can be replaced, if in a more convoluted manner, by xref:#concurrent_flat_map_try_emplace_or_cvisit[`try_emplace_or_visit`]. diff --git a/doc/unordered/concurrent.adoc b/doc/unordered/concurrent.adoc index d418cec4..a0b3d998 100644 --- a/doc/unordered/concurrent.adoc +++ b/doc/unordered/concurrent.adoc @@ -3,8 +3,8 @@ :idprefix: concurrent_ -Boost.Unordered currently provides just one concurrent container named `boost::concurrent_flat_map`. -`boost::concurrent_flat_map` is a hash table that allows concurrent write/read access from +Boost.Unordered provides `boost::concurrent_flat_set` and `boost::concurrent_flat_map`, +hash tables that allow concurrent write/read access from different threads without having to implement any synchronzation mechanism on the user's side. [source,c++] @@ -36,16 +36,16 @@ In the example above, threads access `m` without synchronization, just as we'd d single-threaded scenario. In an ideal setting, if a given workload is distributed among _N_ threads, execution is _N_ times faster than with one thread —this limit is never attained in practice due to synchronization overheads and _contention_ (one thread -waiting for another to leave a locked portion of the map), but `boost::concurrent_flat_map` -is designed to perform with very little overhead and typically achieves _linear scaling_ +waiting for another to leave a locked portion of the map), but Boost.Unordered concurrent containers +are designed to perform with very little overhead and typically achieve _linear scaling_ (that is, performance is proportional to the number of threads up to the number of logical cores in the CPU). == Visitation-based API -The first thing a new user of `boost::concurrent_flat_map` will notice is that this -class _does not provide iterators_ (which makes it technically -not a https://en.cppreference.com/w/cpp/named_req/Container[Container^] +The first thing a new user of `boost::concurrent_flat_set` or `boost::concurrent_flat_map` +will notice is that these classes _do not provide iterators_ (which makes then technically +not https://en.cppreference.com/w/cpp/named_req/Container[Containers^] in the C++ standard sense). The reason for this is that iterators are inherently thread-unsafe. Consider this hypothetical code: @@ -73,7 +73,7 @@ m.visit(k, [](const auto& x) { // x is the element with key k (if it exists) ---- The visitation function passed by the user (in this case, a lambda function) -is executed internally by `boost::concurrent_flat_map` in +is executed internally by Boost.Unordered in a thread-safe manner, so it can access the element without worrying about other threads interfering in the process. @@ -112,7 +112,7 @@ if (found) { } ---- -Visitation is prominent in the API provided by `boost::concurrent_flat_map`, and +Visitation is prominent in the API provided by `boost::concurrent_flat_ser` and `boost::concurrent_flat_map`, and many classical operations have visitation-enabled variations: [source,c++] @@ -129,13 +129,17 @@ the element: as a general rule, operations on a `boost::concurrent_flat_map` `m` will grant visitation functions const/non-const access to the element depending on whether `m` is const/non-const. Const access can be always be explicitly requested by using `cvisit` overloads (for instance, `insert_or_cvisit`) and may result -in higher parallelization. Consult the xref:#concurrent_flat_map[reference] -for a complete list of available operations. +in higher parallelization. For `boost::concurrent_flat_set`, on the other hand, +visitation is always const access. +Consult the references of +xref:#concurrent_flat_set[`boost::concurrent_flat_set`] and +xref:#concurrent_flat_map[`boost::concurrent_flat_map`] +for the complete list of visitation-enabled operations. == Whole-Table Visitation -In the absence of iterators, `boost::concurrent_flat_map` provides `visit_all` -as an alternative way to process all the elements in the map: +In the absence of iterators, `visit_all` is provided +as an alternative way to process all the elements in the container: [source,c++] ---- @@ -187,12 +191,12 @@ m.erase_if([](auto& x) { `visit_while` and `erase_if` can also be parallelized. Note that, in order to increase efficiency, whole-table visitation operations do not block the table during execution: this implies that elements may be inserted, modified or erased by other threads during visitation. It is -advisable not to assume too much about the exact global state of a `boost::concurrent_flat_map` +advisable not to assume too much about the exact global state of a concurrent container at any point in your program. == Blocking Operations -``boost::concurrent_flat_map``s can be copied, assigned, cleared and merged just like any +``boost::concurrent_flat_set``s and ``boost::concurrent_flat_map``s can be copied, assigned, cleared and merged just like any Boost.Unordered container. Unlike most other operations, these are _blocking_, that is, all other threads are prevented from accesing the tables involved while a copy, assignment, clear or merge operation is in progress. Blocking is taken care of automatically by the library @@ -204,8 +208,10 @@ reserving space in advance of bulk insertions will generally speed up the proces == Interoperability with non-concurrent containers -As their internal data structure is basically the same, `boost::unordered_flat_map` can -be efficiently move-constructed from `boost::concurrent_flat_map` and vice versa. +As open-addressing and concurrent containers are based on the same internal data structure, +`boost::unordered_flat_set` and `boost::unordered_flat_map` can +be efficiently move-constructed from `boost::concurrent_flat_set` and `boost::concurrent_flat_map`, +respectively, and vice versa. This interoperability comes handy in multistage scenarios where parts of the data processing happen in parallel whereas other steps are non-concurrent (or non-modifying). In the following example, we want to construct a histogram from a huge input vector of words: diff --git a/doc/unordered/concurrent_flat_set.adoc b/doc/unordered/concurrent_flat_set.adoc new file mode 100644 index 00000000..c7c67aaa --- /dev/null +++ b/doc/unordered/concurrent_flat_set.adoc @@ -0,0 +1,1369 @@ +[#concurrent_flat_set] +== Class Template concurrent_flat_set + +:idprefix: concurrent_flat_set_ + +`boost::concurrent_flat_set` — A hash table that stores unique values and +allows for concurrent element insertion, erasure, lookup and access +without external synchronization mechanisms. + +Even though it acts as a container, `boost::concurrent_flat_set` +does not model the standard C++ https://en.cppreference.com/w/cpp/named_req/Container[Container^] concept. +In particular, iterators and associated operations (`begin`, `end`, etc.) are not provided. +Element access is done through user-provided _visitation functions_ that are passed +to `concurrent_flat_set` operations where they are executed internally in a controlled fashion. +Such visitation-based API allows for low-contention concurrent usage scenarios. + +The internal data structure of `boost::concurrent_flat_set` is similar to that of +`boost::unordered_flat_set`. As a result of its using open-addressing techniques, +`value_type` must be move-constructible and pointer stability is not kept under rehashing. + +=== Synopsis + +[listing,subs="+macros,+quotes"] +----- +// #include + +namespace boost { + template, + class Pred = std::equal_to, + class Allocator = std::allocator> + class concurrent_flat_set { + public: + // types + using key_type = Key; + using value_type = Key; + using init_type = Key; + using hasher = Hash; + using key_equal = Pred; + using allocator_type = Allocator; + using pointer = typename std::allocator_traits::pointer; + using const_pointer = typename std::allocator_traits::const_pointer; + using reference = value_type&; + using const_reference = const value_type&; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + + // construct/copy/destroy + xref:#concurrent_flat_set_default_constructor[concurrent_flat_set](); + explicit xref:#concurrent_flat_set_bucket_count_constructor[concurrent_flat_set](size_type n, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()); + template + xref:#concurrent_flat_set_iterator_range_constructor[concurrent_flat_set](InputIterator f, InputIterator l, + size_type n = _implementation-defined_, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()); + xref:#concurrent_flat_set_copy_constructor[concurrent_flat_set](const concurrent_flat_set& other); + xref:#concurrent_flat_set_move_constructor[concurrent_flat_set](concurrent_flat_set&& other); + template + xref:#concurrent_flat_set_iterator_range_constructor_with_allocator[concurrent_flat_set](InputIterator f, InputIterator l,const allocator_type& a); + explicit xref:#concurrent_flat_set_allocator_constructor[concurrent_flat_set](const Allocator& a); + xref:#concurrent_flat_set_copy_constructor_with_allocator[concurrent_flat_set](const concurrent_flat_set& other, const Allocator& a); + xref:#concurrent_flat_set_move_constructor_with_allocator[concurrent_flat_set](concurrent_flat_set&& other, const Allocator& a); + xref:#concurrent_flat_set_move_constructor_from_unordered_flat_set[concurrent_flat_set](unordered_flat_set&& other); + xref:#concurrent_flat_set_initializer_list_constructor[concurrent_flat_set](std::initializer_list il, + size_type n = _implementation-defined_ + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()); + xref:#concurrent_flat_set_bucket_count_constructor_with_allocator[concurrent_flat_set](size_type n, const allocator_type& a); + xref:#concurrent_flat_set_bucket_count_constructor_with_hasher_and_allocator[concurrent_flat_set](size_type n, const hasher& hf, const allocator_type& a); + template + xref:#concurrent_flat_set_iterator_range_constructor_with_bucket_count_and_allocator[concurrent_flat_set](InputIterator f, InputIterator l, size_type n, + const allocator_type& a); + template + xref:#concurrent_flat_set_iterator_range_constructor_with_bucket_count_and_hasher[concurrent_flat_set](InputIterator f, InputIterator l, size_type n, const hasher& hf, + const allocator_type& a); + xref:#concurrent_flat_set_initializer_list_constructor_with_allocator[concurrent_flat_set](std::initializer_list il, const allocator_type& a); + xref:#concurrent_flat_set_initializer_list_constructor_with_bucket_count_and_allocator[concurrent_flat_set](std::initializer_list il, size_type n, + const allocator_type& a); + xref:#concurrent_flat_set_initializer_list_constructor_with_bucket_count_and_hasher_and_allocator[concurrent_flat_set](std::initializer_list il, size_type n, const hasher& hf, + const allocator_type& a); + xref:#concurrent_flat_set_destructor[~concurrent_flat_set](); + concurrent_flat_set& xref:#concurrent_flat_set_copy_assignment[operator++=++](const concurrent_flat_set& other); + concurrent_flat_set& xref:#concurrent_flat_set_move_assignment[operator++=++](concurrent_flat_set&& other) + noexcept(boost::allocator_traits::is_always_equal::value || + boost::allocator_traits::propagate_on_container_move_assignment::value); + concurrent_flat_set& xref:#concurrent_flat_set_initializer_list_assignment[operator++=++](std::initializer_list); + allocator_type xref:#concurrent_flat_set_get_allocator[get_allocator]() const noexcept; + + + // visitation + template size_t xref:#concurrent_flat_set_cvisit[visit](const key_type& k, F f) const; + template size_t xref:#concurrent_flat_set_cvisit[cvisit](const key_type& k, F f) const; + template size_t xref:#concurrent_flat_set_cvisit[visit](const K& k, F f) const; + template size_t xref:#concurrent_flat_set_cvisit[cvisit](const K& k, F f) const; + + template size_t xref:#concurrent_flat_set_cvisit_all[visit_all](F f) const; + template size_t xref:#concurrent_flat_set_cvisit_all[cvisit_all](F f) const; + template + void xref:#concurrent_flat_set_parallel_cvisit_all[visit_all](ExecutionPolicy&& policy, F f) const; + template + void xref:#concurrent_flat_set_parallel_cvisit_all[cvisit_all](ExecutionPolicy&& policy, F f) const; + + template bool xref:#concurrent_flat_set_cvisit_while[visit_while](F f) const; + template bool xref:#concurrent_flat_set_cvisit_while[cvisit_while](F f) const; + template + bool xref:#concurrent_flat_set_parallel_cvisit_while[visit_while](ExecutionPolicy&& policy, F f) const; + template + bool xref:#concurrent_flat_set_parallel_cvisit_while[cvisit_while](ExecutionPolicy&& policy, F f) const; + + // capacity + ++[[nodiscard]]++ bool xref:#concurrent_flat_set_empty[empty]() const noexcept; + size_type xref:#concurrent_flat_set_size[size]() const noexcept; + size_type xref:#concurrent_flat_set_max_size[max_size]() const noexcept; + + // modifiers + template bool xref:#concurrent_flat_set_emplace[emplace](Args&&... args); + bool xref:#concurrent_flat_set_copy_insert[insert](const value_type& obj); + bool xref:#concurrent_flat_set_move_insert[insert](value_type&& obj); + template bool xref:#concurrent_flat_set_transparent_insert[insert](K&& k); + template size_type xref:#concurrent_flat_set_insert_iterator_range[insert](InputIterator first, InputIterator last); + size_type xref:#concurrent_flat_set_insert_initializer_list[insert](std::initializer_list il); + + template bool xref:#concurrent_flat_set_emplace_or_cvisit[emplace_or_visit](Args&&... args, F&& f); + template bool xref:#concurrent_flat_set_emplace_or_cvisit[emplace_or_cvisit](Args&&... args, F&& f); + template bool xref:#concurrent_flat_set_copy_insert_or_cvisit[insert_or_visit](const value_type& obj, F f); + template bool xref:#concurrent_flat_set_copy_insert_or_cvisit[insert_or_cvisit](const value_type& obj, F f); + template bool xref:#concurrent_flat_set_move_insert_or_cvisit[insert_or_visit](value_type&& obj, F f); + template bool xref:#concurrent_flat_set_move_insert_or_cvisit[insert_or_cvisit](value_type&& obj, F f); + template bool xref:#concurrent_flat_set_transparent_insert_or_cvisit[insert_or_visit](K&& k, F f); + template bool xref:#concurrent_flat_set_transparent_insert_or_cvisit[insert_or_cvisit](K&& k, F f); + template + size_type xref:#concurrent_flat_set_insert_iterator_range_or_visit[insert_or_visit](InputIterator first, InputIterator last, F f); + template + size_type xref:#concurrent_flat_set_insert_iterator_range_or_visit[insert_or_cvisit](InputIterator first, InputIterator last, F f); + template size_type xref:#concurrent_flat_set_insert_initializer_list_or_visit[insert_or_visit](std::initializer_list il, F f); + template size_type xref:#concurrent_flat_set_insert_initializer_list_or_visit[insert_or_cvisit](std::initializer_list il, F f); + + size_type xref:#concurrent_flat_set_erase[erase](const key_type& k); + template size_type xref:#concurrent_flat_set_erase[erase](const K& k); + + template size_type xref:#concurrent_flat_set_erase_if_by_key[erase_if](const key_type& k, F f); + template size_type xref:#concurrent_flat_set_erase_if_by_key[erase_if](const K& k, F f); + template size_type xref:#concurrent_flat_set_erase_if[erase_if](F f); + template void xref:#concurrent_flat_set_parallel_erase_if[erase_if](ExecutionPolicy&& policy, F f); + + void xref:#concurrent_flat_set_swap[swap](concurrent_flat_set& other) + noexcept(boost::allocator_traits::is_always_equal::value || + boost::allocator_traits::propagate_on_container_swap::value); + void xref:#concurrent_flat_set_clear[clear]() noexcept; + + template + size_type xref:#concurrent_flat_set_merge[merge](concurrent_flat_set& source); + template + size_type xref:#concurrent_flat_set_merge[merge](concurrent_flat_set&& source); + + // observers + hasher xref:#concurrent_flat_set_hash_function[hash_function]() const; + key_equal xref:#concurrent_flat_set_key_eq[key_eq]() const; + + // set operations + size_type xref:#concurrent_flat_set_count[count](const key_type& k) const; + template + size_type xref:#concurrent_flat_set_count[count](const K& k) const; + bool xref:#concurrent_flat_set_contains[contains](const key_type& k) const; + template + bool xref:#concurrent_flat_set_contains[contains](const K& k) const; + + // bucket interface + size_type xref:#concurrent_flat_set_bucket_count[bucket_count]() const noexcept; + + // hash policy + float xref:#concurrent_flat_set_load_factor[load_factor]() const noexcept; + float xref:#concurrent_flat_set_max_load_factor[max_load_factor]() const noexcept; + void xref:#concurrent_flat_set_set_max_load_factor[max_load_factor](float z); + size_type xref:#concurrent_flat_set_max_load[max_load]() const noexcept; + void xref:#concurrent_flat_set_rehash[rehash](size_type n); + void xref:#concurrent_flat_set_reserve[reserve](size_type n); + }; + + // Deduction Guides + template>, + class Pred = std::equal_to>, + class Allocator = std::allocator>> + concurrent_flat_set(InputIterator, InputIterator, typename xref:#concurrent_flat_set_deduction_guides[__see below__]::size_type = xref:#concurrent_flat_set_deduction_guides[__see below__], + Hash = Hash(), Pred = Pred(), Allocator = Allocator()) + -> concurrent_flat_set, Hash, Pred, Allocator>; + + template, class Pred = std::equal_to, + class Allocator = std::allocator> + concurrent_flat_set(std::initializer_list, typename xref:#concurrent_flat_set_deduction_guides[__see below__]::size_type = xref:#concurrent_flat_set_deduction_guides[__see below__], + Hash = Hash(), Pred = Pred(), Allocator = Allocator()) + -> concurrent_flat_set; + + template + concurrent_flat_set(InputIterator, InputIterator, typename xref:#concurrent_flat_set_deduction_guides[__see below__]::size_type, Allocator) + -> concurrent_flat_set, + boost::hash>, + std::equal_to>, Allocator>; + + template + concurrent_flat_set(InputIterator, InputIterator, Allocator) + -> concurrent_flat_set, + boost::hash>, + std::equal_to>, Allocator>; + + template + concurrent_flat_set(InputIterator, InputIterator, typename xref:#concurrent_flat_set_deduction_guides[__see below__]::size_type, Hash, + Allocator) + -> concurrent_flat_set, Hash, + std::equal_to>, Allocator>; + + template + concurrent_flat_set(std::initializer_list, typename xref:#concurrent_flat_set_deduction_guides[__see below__]::size_type, Allocator) + -> concurrent_flat_set, std::equal_to, Allocator>; + + template + concurrent_flat_set(std::initializer_list, Allocator) + -> concurrent_flat_set, std::equal_to, Allocator>; + + template + concurrent_flat_set(std::initializer_list, typename xref:#concurrent_flat_set_deduction_guides[__see below__]::size_type, Hash, Allocator) + -> concurrent_flat_set, Allocator>; + + // Equality Comparisons + template + bool xref:#concurrent_flat_set_operator[operator==](const concurrent_flat_set& x, + const concurrent_flat_set& y); + + template + bool xref:#concurrent_flat_set_operator_2[operator!=](const concurrent_flat_set& x, + const concurrent_flat_set& y); + + // swap + template + void xref:#concurrent_flat_set_swap_2[swap](concurrent_flat_set& x, + concurrent_flat_set& y) + noexcept(noexcept(x.swap(y))); + + // Erasure + template + typename concurrent_flat_set::size_type + xref:#concurrent_flat_set_erase_if_2[erase_if](concurrent_flat_set& c, Predicate pred); +} +----- + +--- + +=== Description + +*Template Parameters* + +[cols="1,1"] +|=== + +|_Key_ +|`Key` must be https://en.cppreference.com/w/cpp/named_req/MoveInsertable[MoveInsertable^] into the container +and https://en.cppreference.com/w/cpp/named_req/Erasable[Erasable^] from the container. + +|_Hash_ +|A unary function object type that acts a hash function for a `Key`. It takes a single argument of type `Key` and returns a value of type `std::size_t`. + +|_Pred_ +|A binary function object that induces an equivalence relation on values of type `Key`. It takes two arguments of type `Key` and returns a value of type `bool`. + +|_Allocator_ +|An allocator whose value type is the same as the table's value type. +`std::allocator_traits::pointer` and `std::allocator_traits::const_pointer` +must be convertible to/from `value_type*` and `const value_type*`, respectively. + +|=== + +The elements of the table are held into an internal _bucket array_. An element is inserted into a bucket determined by its +hash code, but if the bucket is already occupied (a _collision_), an available one in the vicinity of the +original position is used. + +The size of the bucket array can be automatically increased by a call to `insert`/`emplace`, or as a result of calling +`rehash`/`reserve`. The _load factor_ of the table (number of elements divided by number of buckets) is never +greater than `max_load_factor()`, except possibly for small sizes where the implementation may decide to +allow for higher loads. + +If `xref:hash_traits_hash_is_avalanching[hash_is_avalanching]::value` is `true`, the hash function +is used as-is; otherwise, a bit-mixing post-processing stage is added to increase the quality of hashing +at the expense of extra computational cost. + +--- + +=== Concurrency Requirements and Guarantees + +Concurrent invocations of `operator()` on the same const instance of `Hash` or `Pred` are required +to not introduce data races. For `Alloc` being either `Allocator` or any allocator type rebound +from `Allocator`, concurrent invocations of the following operations on the same instance `al` of `Alloc` +are required to not introduce data races: + +* Copy construction from `al` of an allocator rebound from `Alloc` +* `std::allocator_traits::allocate` +* `std::allocator_traits::deallocate` +* `std::allocator_traits::construct` +* `std::allocator_traits::destroy` + +In general, these requirements on `Hash`, `Pred` and `Allocator` are met if these types +are not stateful or if the operations only involve constant access to internal data members. + +With the exception of destruction, concurrent invocations of any operation on the same instance of a +`concurrent_flat_set` do not introduce data races — that is, they are thread-safe. + +If an operation *op* is explicitly designated as _blocking on_ `x`, where `x` is an instance of a `boost::concurrent_flat_set`, +prior blocking operations on `x` synchronize with *op*. So, blocking operations on the same +`concurrent_flat_set` execute sequentially in a multithreaded scenario. + +An operation is said to be _blocking on rehashing of_ ``__x__`` if it blocks on `x` +only when an internal rehashing is issued. + +Access or modification of an element of a `boost::concurrent_flat_set` passed by reference to a +user-provided visitation function do not introduce data races when the visitation function +is executed internally by the `boost::concurrent_flat_set`. + +Any `boost::concurrent_flat_set operation` that inserts or modifies an element `e` +synchronizes with the internal invocation of a visitation function on `e`. + +Visitation functions executed by a `boost::concurrent_flat_set` `x` are not allowed to invoke any operation +on `x`; invoking operations on a different `boost::concurrent_flat_set` instance `y` is allowed only +if concurrent outstanding operations on `y` do not access `x` directly or indirectly. + +--- + +=== Configuration Macros + +==== `BOOST_UNORDERED_DISABLE_REENTRANCY_CHECK` + +In debug builds (more precisely, when +link:../../../assert/doc/html/assert.html#boost_assert_is_void[`BOOST_ASSERT_IS_VOID`^] +is not defined), __container reentrancies__ (illegaly invoking an operation on `m` from within +a function visiting elements of `m`) are detected and signalled through `BOOST_ASSERT_MSG`. +When run-time speed is a concern, the feature can be disabled by globally defining +this macro. + + +=== Constructors + +==== Default Constructor +```c++ +concurrent_flat_set(); +``` + +Constructs an empty table using `hasher()` as the hash function, +`key_equal()` as the key equality predicate and `allocator_type()` as the allocator. + +[horizontal] +Postconditions:;; `size() == 0` +Requires:;; If the defaults are used, `hasher`, `key_equal` and `allocator_type` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Bucket Count Constructor +```c++ +explicit concurrent_flat_set(size_type n, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()); +``` + +Constructs an empty table with at least `n` buckets, using `hf` as the hash +function, `eql` as the key equality predicate, and `a` as the allocator. + +[horizontal] +Postconditions:;; `size() == 0` +Requires:;; If the defaults are used, `hasher`, `key_equal` and `allocator_type` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Iterator Range Constructor +[source,c++,subs="+quotes"] +---- +template + concurrent_flat_set(InputIterator f, InputIterator l, + size_type n = _implementation-defined_, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()); +---- + +Constructs an empty table with at least `n` buckets, using `hf` as the hash function, `eql` as the key equality predicate and `a` as the allocator, and inserts the elements from `[f, l)` into it. + +[horizontal] +Requires:;; If the defaults are used, `hasher`, `key_equal` and `allocator_type` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Copy Constructor +```c++ +concurrent_flat_set(concurrent_flat_set const& other); +``` + +The copy constructor. Copies the contained elements, hash function, predicate and allocator. + +If `Allocator::select_on_container_copy_construction` exists and has the right signature, the allocator will be constructed from its result. + +[horizontal] +Requires:;; `value_type` is copy constructible +Concurrency:;; Blocking on `other`. + +--- + +==== Move Constructor +```c++ +concurrent_flat_set(concurrent_flat_set&& other); +``` + +The move constructor. The internal bucket array of `other` is transferred directly to the new table. +The hash function, predicate and allocator are moved-constructed from `other`. + +[horizontal] +Concurrency:;; Blocking on `other`. + +--- + +==== Iterator Range Constructor with Allocator +```c++ +template + concurrent_flat_set(InputIterator f, InputIterator l, const allocator_type& a); +``` + +Constructs an empty table using `a` as the allocator, with the default hash function and key equality predicate and inserts the elements from `[f, l)` into it. + +[horizontal] +Requires:;; `hasher`, `key_equal` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Allocator Constructor +```c++ +explicit concurrent_flat_set(Allocator const& a); +``` + +Constructs an empty table, using allocator `a`. + +--- + +==== Copy Constructor with Allocator +```c++ +concurrent_flat_set(concurrent_flat_set const& other, Allocator const& a); +``` + +Constructs a table, copying ``other``'s contained elements, hash function, and predicate, but using allocator `a`. + +[horizontal] +Concurrency:;; Blocking on `other`. + +--- + +==== Move Constructor with Allocator +```c++ +concurrent_flat_set(concurrent_flat_set&& other, Allocator const& a); +``` + +If `a == other.get_allocator()`, the elements of `other` are transferred directly to the new table; +otherwise, elements are moved-constructed from those of `other`. The hash function and predicate are moved-constructed +from `other`, and the allocator is copy-constructed from `a`. + +[horizontal] +Concurrency:;; Blocking on `other`. + +--- + +==== Move Constructor from unordered_flat_set + +```c++ +concurrent_flat_set(unordered_flat_set&& other); +``` + +Move construction from a xref:#unordered_flat_set[`unordered_flat_set`]. +The internal bucket array of `other` is transferred directly to the new container. +The hash function, predicate and allocator are moved-constructed from `other`. + +[horizontal] +Complexity:;; O(`bucket_count()`) + +--- + +==== Initializer List Constructor +[source,c++,subs="+quotes"] +---- +concurrent_flat_set(std::initializer_list il, + size_type n = _implementation-defined_ + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& a = allocator_type()); +---- + +Constructs an empty table with at least `n` buckets, using `hf` as the hash function, `eql` as the key equality predicate and `a`, and inserts the elements from `il` into it. + +[horizontal] +Requires:;; If the defaults are used, `hasher`, `key_equal` and `allocator_type` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Bucket Count Constructor with Allocator +```c++ +concurrent_flat_set(size_type n, allocator_type const& a); +``` + +Constructs an empty table with at least `n` buckets, using `hf` as the hash function, the default hash function and key equality predicate and `a` as the allocator. + +[horizontal] +Postconditions:;; `size() == 0` +Requires:;; `hasher` and `key_equal` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Bucket Count Constructor with Hasher and Allocator +```c++ +concurrent_flat_set(size_type n, hasher const& hf, allocator_type const& a); +``` + +Constructs an empty table with at least `n` buckets, using `hf` as the hash function, the default key equality predicate and `a` as the allocator. + +[horizontal] +Postconditions:;; `size() == 0` +Requires:;; `key_equal` needs to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Iterator Range Constructor with Bucket Count and Allocator +[source,c++,subs="+quotes"] +---- +template + concurrent_flat_set(InputIterator f, InputIterator l, size_type n, const allocator_type& a); +---- + +Constructs an empty table with at least `n` buckets, using `a` as the allocator and default hash function and key equality predicate, and inserts the elements from `[f, l)` into it. + +[horizontal] +Requires:;; `hasher`, `key_equal` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== Iterator Range Constructor with Bucket Count and Hasher +[source,c++,subs="+quotes"] +---- + template + concurrent_flat_set(InputIterator f, InputIterator l, size_type n, const hasher& hf, + const allocator_type& a); +---- + +Constructs an empty table with at least `n` buckets, using `hf` as the hash function, `a` as the allocator, with the default key equality predicate, and inserts the elements from `[f, l)` into it. + +[horizontal] +Requires:;; `key_equal` needs to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== initializer_list Constructor with Allocator + +```c++ +concurrent_flat_set(std::initializer_list il, const allocator_type& a); +``` + +Constructs an empty table using `a` and default hash function and key equality predicate, and inserts the elements from `il` into it. + +[horizontal] +Requires:;; `hasher` and `key_equal` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== initializer_list Constructor with Bucket Count and Allocator + +```c++ +concurrent_flat_set(std::initializer_list il, size_type n, const allocator_type& a); +``` + +Constructs an empty table with at least `n` buckets, using `a` and default hash function and key equality predicate, and inserts the elements from `il` into it. + +[horizontal] +Requires:;; `hasher` and `key_equal` need to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +==== initializer_list Constructor with Bucket Count and Hasher and Allocator + +```c++ +concurrent_flat_set(std::initializer_list il, size_type n, const hasher& hf, + const allocator_type& a); +``` + +Constructs an empty table with at least `n` buckets, using `hf` as the hash function, `a` as the allocator and default key equality predicate,and inserts the elements from `il` into it. + +[horizontal] +Requires:;; `key_equal` needs to be https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^]. + +--- + +=== Destructor + +```c++ +~concurrent_flat_set(); +``` + +[horizontal] +Note:;; The destructor is applied to every element, and all memory is deallocated + +--- + +=== Assignment + +==== Copy Assignment + +```c++ +concurrent_flat_set& operator=(concurrent_flat_set const& other); +``` + +The assignment operator. Destroys previously existing elements, copy-assigns the hash function and predicate from `other`, +copy-assigns the allocator from `other` if `Alloc::propagate_on_container_copy_assignment` exists and `Alloc::propagate_on_container_copy_assignment::value` is `true`, +and finally inserts copies of the elements of `other`. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] +Concurrency:;; Blocking on `*this` and `other`. + +--- + +==== Move Assignment +```c++ +concurrent_flat_set& operator=(concurrent_flat_set&& other) + noexcept(boost::allocator_traits::is_always_equal::value || + boost::allocator_traits::propagate_on_container_move_assignment::value); +``` +The move assignment operator. Destroys previously existing elements, swaps the hash function and predicate from `other`, +and move-assigns the allocator from `other` if `Alloc::propagate_on_container_move_assignment` exists and `Alloc::propagate_on_container_move_assignment::value` is `true`. +If at this point the allocator is equal to `other.get_allocator()`, the internal bucket array of `other` is transferred directly to `*this`; +otherwise, inserts move-constructed copies of the elements of `other`. + +[horizontal] +Concurrency:;; Blocking on `*this` and `other`. + +--- + +==== Initializer List Assignment +```c++ +concurrent_flat_set& operator=(std::initializer_list il); +``` + +Assign from values in initializer list. All previously existing elements are destroyed. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] +Concurrency:;; Blocking on `*this`. + +--- + +=== Visitation + +==== [c]visit + +```c++ +template size_t visit(const key_type& k, F f) const; +template size_t cvisit(const key_type& k, F f) const; +template size_t visit(const K& k, F f) const; +template size_t cvisit(const K& k, F f) const; +``` + +If an element `x` exists with key equivalent to `k`, invokes `f` with a const reference to `x`. + +[horizontal] +Returns:;; The number of elements visited (0 or 1). +Notes:;; The `template` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + +--- + +==== [c]visit_all + +```c++ +template size_t visit_all(F f) const; +template size_t cvisit_all(F f) const; +``` + +Successively invokes `f` with const references to each of the elements in the table. + +[horizontal] +Returns:;; The number of elements visited. + +--- + +==== Parallel [c]visit_all + +```c++ +template void visit_all(ExecutionPolicy&& policy, F f) const; +template void cvisit_all(ExecutionPolicy&& policy, F f) const; +``` + +Invokes `f` with const references to each of the elements in the table. +Execution is parallelized according to the semantics of the execution policy specified. + +[horizontal] +Throws:;; Depending on the exception handling mechanism of the execution policy used, may call `std::terminate` if an exception is thrown within `f`. +Notes:;; Only available in compilers supporting C++17 parallel algorithms. + ++ +These overloads only participate in overload resolution if `std::is_execution_policy_v>` is `true`. + ++ +Unsequenced execution policies are not allowed. + +--- + +==== [c]visit_while + +```c++ +template bool visit_while(F f) const; +template bool cvisit_while(F f) const; +``` + +Successively invokes `f` with const references to each of the elements in the table until `f` returns `false` +or all the elements are visited. + +[horizontal] +Returns:;; `false` iff `f` ever returns `false`. + +--- + +==== Parallel [c]visit_while + +```c++ +template bool visit_while(ExecutionPolicy&& policy, F f) const; +template bool cvisit_while(ExecutionPolicy&& policy, F f) const; +``` + +Invokes `f` with const references to each of the elements in the table until `f` returns `false` +or all the elements are visited. +Execution is parallelized according to the semantics of the execution policy specified. + +[horizontal] +Returns:;; `false` iff `f` ever returns `false`. +Throws:;; Depending on the exception handling mechanism of the execution policy used, may call `std::terminate` if an exception is thrown within `f`. +Notes:;; Only available in compilers supporting C++17 parallel algorithms. + ++ +These overloads only participate in overload resolution if `std::is_execution_policy_v>` is `true`. + ++ +Unsequenced execution policies are not allowed. + ++ +Parallelization implies that execution does not necessary finish as soon as `f` returns `false`, and as a result +`f` may be invoked with further elements for which the return value is also `false`. + +--- + +=== Size and Capacity + +==== empty + +```c++ +[[nodiscard]] bool empty() const noexcept; +``` + +[horizontal] +Returns:;; `size() == 0` + +--- + +==== size + +```c++ +size_type size() const noexcept; +``` + +[horizontal] +Returns:;; The number of elements in the table. + +[horizontal] +Notes:;; In the presence of concurrent insertion operations, the value returned may not accurately reflect +the true size of the table right after execution. + +--- + +==== max_size + +```c++ +size_type max_size() const noexcept; +``` + +[horizontal] +Returns:;; `size()` of the largest possible table. + +--- + +=== Modifiers + +==== emplace +```c++ +template bool emplace(Args&&... args); +``` + +Inserts an object, constructed with the arguments `args`, in the table if and only if there is no element in the table with an equivalent key. + +[horizontal] +Requires:;; `value_type` is constructible from `args`. +Returns:;; `true` if an insert took place. +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + +--- + +==== Copy Insert +```c++ +bool insert(const value_type& obj); +``` + +Inserts `obj` in the table if and only if there is no element in the table with an equivalent key. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^]. +Returns:;; `true` if an insert took place. + +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + +--- + +==== Move Insert +```c++ +bool insert(value_type&& obj); +``` + +Inserts `obj` in the table if and only if there is no element in the table with an equivalent key. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/MoveInsertable[MoveInsertable^]. +Returns:;; `true` if an insert took place. +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + +--- + +==== Transparent Insert +```c++ +template bool insert(K&& k); +``` + +Inserts an element constructed from `std::forward(k)` in the container if and only if there is no element in the container with an equivalent key. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] from `k`. +Returns:;; `true` if an insert took place. +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + ++ +This overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + +--- + +==== Insert Iterator Range +```c++ +template size_type insert(InputIterator first, InputIterator last); +``` + +Equivalent to +[listing,subs="+macros,+quotes"] +----- + while(first != last) this->xref:#concurrent_flat_set_emplace[emplace](*first++); +----- + +[horizontal] +Returns:;; The number of elements inserted. + +--- + +==== Insert Initializer List +```c++ +size_type insert(std::initializer_list il); +``` + +Equivalent to +[listing,subs="+macros,+quotes"] +----- + this->xref:#concurrent_flat_set_insert_iterator_range[insert](il.begin(), il.end()); +----- + +[horizontal] +Returns:;; The number of elements inserted. + +--- + +==== emplace_or_[c]visit +```c++ +template bool emplace_or_visit(Args&&... args, F&& f); +template bool emplace_or_cvisit(Args&&... args, F&& f); +``` + +Inserts an object, constructed with the arguments `args`, in the table if there is no element in the table with an equivalent key. +Otherwise, invokes `f` with a const reference to the equivalent element. + +[horizontal] +Requires:;; `value_type` is constructible from `args`. +Returns:;; `true` if an insert took place. +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + ++ +The interface is exposition only, as C++ does not allow to declare a parameter `f` after a variadic parameter pack. + +--- + +==== Copy insert_or_[c]visit +```c++ +template bool insert_or_visit(const value_type& obj, F f); +template bool insert_or_cvisit(const value_type& obj, F f); +``` + +Inserts `obj` in the table if and only if there is no element in the table with an equivalent key. +Otherwise, invokes `f` with a const reference to the equivalent element. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^]. +Returns:;; `true` if an insert took place. + +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + +--- + +==== Move insert_or_[c]visit +```c++ +template bool insert_or_visit(value_type&& obj, F f); +template bool insert_or_cvisit(value_type&& obj, F f); +``` + +Inserts `obj` in the table if and only if there is no element in the table with an equivalent key. +Otherwise, invokes `f` with a const reference to the equivalent element. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/MoveInsertable[MoveInsertable^]. +Returns:;; `true` if an insert took place. + +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + +--- + +==== Transparent insert_or_[c]visit +```c++ +template bool insert_or_visit(K&& k, F f); +template bool insert_or_cvisit(K&& k, F f); +``` + +Inserts an element constructed from `std::forward(k)` in the container if and only if there is no element in the container with an equivalent key. +Otherwise, invokes `f` with a const reference to the equivalent element. + +[horizontal] +Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] from `k`. +Returns:;; `true` if an insert took place. +Concurrency:;; Blocking on rehashing of `*this`. +Notes:;; Invalidates pointers and references to elements if a rehashing is issued. + ++ +These overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + +--- + +==== Insert Iterator Range or Visit +```c++ +template + size_type insert_or_visit(InputIterator first, InputIterator last, F f); +template + size_type insert_or_cvisit(InputIterator first, InputIterator last, F f); +``` + +Equivalent to +[listing,subs="+macros,+quotes"] +----- + while(first != last) this->xref:#concurrent_flat_set_emplace_or_cvisit[emplace_or_[c\]visit](*first++, f); +----- + +[horizontal] +Returns:;; The number of elements inserted. + +--- + +==== Insert Initializer List or Visit +```c++ +template size_type insert_or_visit(std::initializer_list il, F f); +template size_type insert_or_cvisit(std::initializer_list il, F f); +``` + +Equivalent to +[listing,subs="+macros,+quotes"] +----- + this->xref:#concurrent_flat_set_insert_iterator_range_or_visit[insert_or[c\]visit](il.begin(), il.end(), f); +----- + +[horizontal] +Returns:;; The number of elements inserted. + +--- + +==== erase +```c++ +size_type erase(const key_type& k); +template size_type erase(const K& k); +``` + +Erases the element with key equivalent to `k` if it exists. + +[horizontal] +Returns:;; The number of elements erased (0 or 1). +Throws:;; Only throws an exception if it is thrown by `hasher` or `key_equal`. +Notes:;; The `template` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + +--- + +==== erase_if by Key +```c++ +template size_type erase_if(const key_type& k, F f); +template size_type erase_if(const K& k, F f); +``` + +Erases the element `x` with key equivalent to `k` if it exists and `f(x)` is `true`. + +[horizontal] +Returns:;; The number of elements erased (0 or 1). +Throws:;; Only throws an exception if it is thrown by `hasher`, `key_equal` or `f`. +Notes:;; The `template` overload only participates in overload resolution if `std::is_execution_policy_v>` is `false`. + ++ +The `template` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + +--- + +==== erase_if +```c++ +template size_type erase_if(F f); +``` + +Successively invokes `f` with references to each of the elements in the table, and erases those for which `f` returns `true`. + +[horizontal] +Returns:;; The number of elements erased. +Throws:;; Only throws an exception if it is thrown by `f`. + +--- + +==== Parallel erase_if +```c++ +template void erase_if(ExecutionPolicy&& policy, F f); +``` + +Invokes `f` with references to each of the elements in the table, and erases those for which `f` returns `true`. +Execution is parallelized according to the semantics of the execution policy specified. + +[horizontal] +Throws:;; Depending on the exception handling mechanism of the execution policy used, may call `std::terminate` if an exception is thrown within `f`. +Notes:;; Only available in compilers supporting C++17 parallel algorithms. + ++ +This overload only participates in overload resolution if `std::is_execution_policy_v>` is `true`. + ++ +Unsequenced execution policies are not allowed. + +--- + +==== swap +```c++ +void swap(concurrent_flat_set& other) + noexcept(boost::allocator_traits::is_always_equal::value || + boost::allocator_traits::propagate_on_container_swap::value); +``` + +Swaps the contents of the table with the parameter. + +If `Allocator::propagate_on_container_swap` is declared and `Allocator::propagate_on_container_swap::value` is `true` then the tables' allocators are swapped. Otherwise, swapping with unequal allocators results in undefined behavior. + +[horizontal] +Throws:;; Nothing unless `key_equal` or `hasher` throw on swapping. +Concurrency:;; Blocking on `*this` and `other`. + +--- + +==== clear +```c++ +void clear() noexcept; +``` + +Erases all elements in the table. + +[horizontal] +Postconditions:;; `size() == 0`, `max_load() >= max_load_factor() * bucket_count()` +Concurrency:;; Blocking on `*this`. + +--- + +==== merge +```c++ +template + size_type merge(concurrent_flat_set& source); +template + size_type merge(concurrent_flat_set&& source); +``` + +Move-inserts all the elements from `source` whose key is not already present in `*this`, and erases them from `source`. + +[horizontal] +Returns:;; The number of elements inserted. +Concurrency:;; Blocking on `*this` and `source`. + +--- + +=== Observers + +==== get_allocator +``` +allocator_type get_allocator() const noexcept; +``` + +[horizontal] +Returns:;; The table's allocator. + +--- + +==== hash_function +``` +hasher hash_function() const; +``` + +[horizontal] +Returns:;; The table's hash function. + +--- + +==== key_eq +``` +key_equal key_eq() const; +``` + +[horizontal] +Returns:;; The table's key equality predicate. + +--- + +=== Set Operations + +==== count +```c++ +size_type count(const key_type& k) const; +template + size_type count(const K& k) const; +``` + +[horizontal] +Returns:;; The number of elements with key equivalent to `k` (0 or 1). +Notes:;; The `template` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + ++ +In the presence of concurrent insertion operations, the value returned may not accurately reflect +the true state of the table right after execution. + +--- + +==== contains +```c++ +bool contains(const key_type& k) const; +template + bool contains(const K& k) const; +``` + +[horizontal] +Returns:;; A boolean indicating whether or not there is an element with key equal to `k` in the table. +Notes:;; The `template` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. + ++ +In the presence of concurrent insertion operations, the value returned may not accurately reflect +the true state of the table right after execution. + +--- +=== Bucket Interface + +==== bucket_count +```c++ +size_type bucket_count() const noexcept; +``` + +[horizontal] +Returns:;; The size of the bucket array. + +--- + +=== Hash Policy + +==== load_factor +```c++ +float load_factor() const noexcept; +``` + +[horizontal] +Returns:;; `static_cast(size())/static_cast(bucket_count())`, or `0` if `bucket_count() == 0`. + +--- + +==== max_load_factor + +```c++ +float max_load_factor() const noexcept; +``` + +[horizontal] +Returns:;; Returns the table's maximum load factor. + +--- + +==== Set max_load_factor +```c++ +void max_load_factor(float z); +``` + +[horizontal] +Effects:;; Does nothing, as the user is not allowed to change this parameter. Kept for compatibility with `boost::unordered_set`. + +--- + + +==== max_load + +```c++ +size_type max_load() const noexcept; +``` + +[horizontal] +Returns:;; The maximum number of elements the table can hold without rehashing, assuming that no further elements will be erased. +Note:;; After construction, rehash or clearance, the table's maximum load is at least `max_load_factor() * bucket_count()`. +This number may decrease on erasure under high-load conditions. + ++ +In the presence of concurrent insertion operations, the value returned may not accurately reflect +the true state of the table right after execution. + +--- + +==== rehash +```c++ +void rehash(size_type n); +``` + +Changes if necessary the size of the bucket array so that there are at least `n` buckets, and so that the load factor is less than or equal to the maximum load factor. When applicable, this will either grow or shrink the `bucket_count()` associated with the table. + +When `size() == 0`, `rehash(0)` will deallocate the underlying buckets array. + +Invalidates pointers and references to elements, and changes the order of elements. + +[horizontal] +Throws:;; The function has no effect if an exception is thrown, unless it is thrown by the table's hash function or comparison function. +Concurrency:;; Blocking on `*this`. +--- + +==== reserve +```c++ +void reserve(size_type n); +``` + +Equivalent to `a.rehash(ceil(n / a.max_load_factor()))`. + +Similar to `rehash`, this function can be used to grow or shrink the number of buckets in the table. + +Invalidates pointers and references to elements, and changes the order of elements. + +[horizontal] +Throws:;; The function has no effect if an exception is thrown, unless it is thrown by the table's hash function or comparison function. +Concurrency:;; Blocking on `*this`. + +--- + +=== Deduction Guides +A deduction guide will not participate in overload resolution if any of the following are true: + + - It has an `InputIterator` template parameter and a type that does not qualify as an input iterator is deduced for that parameter. + - It has an `Allocator` template parameter and a type that does not qualify as an allocator is deduced for that parameter. + - It has a `Hash` template parameter and an integral type or a type that qualifies as an allocator is deduced for that parameter. + - It has a `Pred` template parameter and a type that qualifies as an allocator is deduced for that parameter. + +A `size_­type` parameter type in a deduction guide refers to the `size_­type` member type of the +container type deduced by the deduction guide. Its default value coincides with the default value +of the constructor selected. + +==== __iter-value-type__ +[listings,subs="+macros,+quotes"] +----- +template + using __iter-value-type__ = + typename std::iterator_traits::value_type; // exposition only +----- + +=== Equality Comparisons + +==== operator== +```c++ +template + bool operator==(const concurrent_flat_set& x, + const concurrent_flat_set& y); +``` + +Returns `true` if `x.size() == y.size()` and for every element in `x`, there is an element in `y` with the same key, with an equal value (using `operator==` to compare the value types). + +[horizontal] +Concurrency:;; Blocking on `x` and `y`. +Notes:;; Behavior is undefined if the two tables don't have equivalent equality predicates. + +--- + +==== operator!= +```c++ +template + bool operator!=(const concurrent_flat_set& x, + const concurrent_flat_set& y); +``` + +Returns `false` if `x.size() == y.size()` and for every element in `x`, there is an element in `y` with the same key, with an equal value (using `operator==` to compare the value types). + +[horizontal] +Concurrency:;; Blocking on `x` and `y`. +Notes:;; Behavior is undefined if the two tables don't have equivalent equality predicates. + +--- + +=== Swap +```c++ +template + void swap(concurrent_flat_set& x, + concurrent_flat_set& y) + noexcept(noexcept(x.swap(y))); +``` + +Equivalent to +[listing,subs="+macros,+quotes"] +----- +x.xref:#concurrent_flat_set_swap[swap](y); +----- + +--- + +=== erase_if +```c++ +template + typename concurrent_flat_set::size_type + erase_if(concurrent_flat_set& c, Predicate pred); +``` + +Equivalent to +[listing,subs="+macros,+quotes"] +----- +c.xref:#concurrent_flat_set_erase_if[erase_if](pred); +----- + +=== Serialization + +``concurrent_flat_set``s can be archived/retrieved by means of +link:../../../serialization/index.html[Boost.Serialization^] using the API provided +by this library. Both regular and XML archives are supported. + +==== Saving an concurrent_flat_set to an archive + +Saves all the elements of a `concurrent_flat_set` `x` to an archive (XML archive) `ar`. + +[horizontal] +Requires:;; `value_type` is serializable (XML serializable), and it supports Boost.Serialization +`save_construct_data`/`load_construct_data` protocol (automatically suported by +https://en.cppreference.com/w/cpp/named_req/DefaultConstructible[DefaultConstructible^] +types). +Concurrency:;; Blocking on `x`. + +--- + +==== Loading an concurrent_flat_set from an archive + +Deletes all preexisting elements of a `concurrent_flat_set` `x` and inserts +from an archive (XML archive) `ar` restored copies of the elements of the +original `concurrent_flat_set` `other` saved to the storage read by `ar`. + +[horizontal] +Requires:;; `x.key_equal()` is functionally equivalent to `other.key_equal()`. +Concurrency:;; Blocking on `x`. diff --git a/doc/unordered/intro.adoc b/doc/unordered/intro.adoc index 46bc899c..2c6dfbd3 100644 --- a/doc/unordered/intro.adoc +++ b/doc/unordered/intro.adoc @@ -44,7 +44,8 @@ boost::unordered_flat_map ^.^h|*Concurrent* ^| -^| `boost::concurrent_flat_map` +^| `boost::concurrent_flat_set` + +`boost::concurrent_flat_map` |=== @@ -56,9 +57,8 @@ in the market within the technical constraints imposed by the required standard interface to accommodate the implementation. There are two variants: **flat** (the fastest) and **node-based**, which provide pointer stability under rehashing at the expense of being slower. -* Finally, `boost::concurrent_flat_map` (the only **concurrent container** provided -at present) is a hashmap designed and implemented to be used in high-performance -multithreaded scenarios. Its interface is radically different from that of regular C++ containers. +* Finally, **concurrent containers** are designed and implemented to be used in high-performance +multithreaded scenarios. Their interface is radically different from that of regular C++ containers. All sets and maps in Boost.Unordered are instantiatied similarly as `std::unordered_set` and `std::unordered_map`, respectively: @@ -73,6 +73,7 @@ namespace boost { class Alloc = std::allocator > class unordered_set; // same for unordered_multiset, unordered_flat_set, unordered_node_set + // and concurrent_flat_set template < class Key, class Mapped, diff --git a/doc/unordered/rationale.adoc b/doc/unordered/rationale.adoc index fb7d8dd7..256800ab 100644 --- a/doc/unordered/rationale.adoc +++ b/doc/unordered/rationale.adoc @@ -121,7 +121,7 @@ for Visual Studio on an x64-mode Intel CPU with SSE2 and for GCC on an IBM s390x == Concurrent Containers The same data structure used by Boost.Unordered open-addressing containers has been chosen -also as the foundation of `boost::concurrent_flat_map`: +also as the foundation of `boost::concurrent_flat_set` and `boost::concurrent_flat_map`: * Open-addressing is faster than closed-addressing alternatives, both in non-concurrent and concurrent scenarios. @@ -135,7 +135,7 @@ and vice versa. === Hash Function and Platform Interoperability -`boost::concurrent_flat_map` makes the same decisions and provides the same guarantees +Concurrent containers make the same decisions and provide the same guarantees as Boost.Unordered open-addressing containers with regards to xref:#rationale_hash_function[hash function defaults] and xref:#rationale_platform_interoperability[platform interoperability]. diff --git a/doc/unordered/ref.adoc b/doc/unordered/ref.adoc index 6a9673da..08743fa6 100644 --- a/doc/unordered/ref.adoc +++ b/doc/unordered/ref.adoc @@ -11,3 +11,4 @@ include::unordered_flat_set.adoc[] include::unordered_node_map.adoc[] include::unordered_node_set.adoc[] include::concurrent_flat_map.adoc[] +include::concurrent_flat_set.adoc[] diff --git a/doc/unordered/structures.adoc b/doc/unordered/structures.adoc index 9859c39e..2da13548 100644 --- a/doc/unordered/structures.adoc +++ b/doc/unordered/structures.adoc @@ -67,8 +67,8 @@ xref:#rationale_closed_addressing_containers[corresponding section]. == Open-addressing Containers -The diagram shows the basic internal layout of `boost::unordered_flat_map`/`unordered_node_map` and -`boost:unordered_flat_set`/`unordered_node_set`. +The diagram shows the basic internal layout of `boost::unordered_flat_set`/`unordered_node_set` and +`boost:unordered_flat_map`/`unordered_node_map`. [#img-foa-layout] @@ -76,7 +76,7 @@ The diagram shows the basic internal layout of `boost::unordered_flat_map`/`unor image::foa.png[align=center] As with all open-addressing containers, elements (or pointers to the element nodes in the case of -`boost::unordered_node_map` and `boost::unordered_node_set`) are stored directly in the bucket array. +`boost::unordered_node_set` and `boost::unordered_node_map`) are stored directly in the bucket array. This array is logically divided into 2^_n_^ _groups_ of 15 elements each. In addition to the bucket array, there is an associated _metadata array_ with 2^_n_^ 16-byte words. @@ -129,7 +129,7 @@ xref:#rationale_open_addresing_containers[corresponding section]. == Concurrent Containers -`boost::concurrent_flat_map` uses the basic +`boost::concurrent_flat_set` and `boost::concurrent_flat_map` use the basic xref:#structures_open_addressing_containers[open-addressing layout] described above augmented with synchronization mechanisms. diff --git a/doc/unordered/unordered_flat_set.adoc b/doc/unordered/unordered_flat_set.adoc index 9fbad161..c52b9159 100644 --- a/doc/unordered/unordered_flat_set.adoc +++ b/doc/unordered/unordered_flat_set.adoc @@ -71,7 +71,7 @@ namespace boost { xref:#unordered_flat_set_iterator_range_constructor_with_allocator[unordered_flat_set](InputIterator f, InputIterator l, const allocator_type& a); explicit xref:#unordered_flat_set_allocator_constructor[unordered_flat_set](const Allocator& a); xref:#unordered_flat_set_copy_constructor_with_allocator[unordered_flat_set](const unordered_flat_set& other, const Allocator& a); - xref:#unordered_flat_set_move_constructor_with_allocator[unordered_flat_set](unordered_flat_set&& other, const Allocator& a); + xref:#unordered_flat_set_move_constructor_from_concurrent_flat_set[unordered_flat_set](concurrent_flat_set&& other); xref:#unordered_flat_set_initializer_list_constructor[unordered_flat_set](std::initializer_list il, size_type n = _implementation-defined_ const hasher& hf = hasher(), @@ -422,6 +422,22 @@ from `other`, and the allocator is copy-constructed from `a`. --- +==== Move Constructor from concurrent_flat_set + +```c++ +unordered_flat_set(concurrent_flat_set&& other); +``` + +Move construction from a xref:#concurrent_flat_set[`concurrent_flat_set`]. +The internal bucket array of `other` is transferred directly to the new container. +The hash function, predicate and allocator are moved-constructed from `other`. + +[horizontal] +Complexity:;; Constant time. +Concurrency:;; Blocking on `other`. + +--- + ==== Initializer List Constructor [source,c++,subs="+quotes"] ---- From 8d2a5c25eab2d563e451b5642470423efaf93e5a Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sun, 10 Sep 2023 18:35:51 +0200 Subject: [PATCH 03/14] tested boost::concurrent_flat_set --- test/cfoa/assign_tests.cpp | 590 +++++++++++--------- test/cfoa/clear_tests.cpp | 45 +- test/cfoa/common_helpers.hpp | 137 +++++ test/cfoa/constructor_tests.cpp | 648 +++++++++++++--------- test/cfoa/emplace_tests.cpp | 145 ++++- test/cfoa/equality_tests.cpp | 55 +- test/cfoa/erase_tests.cpp | 152 +++-- test/cfoa/exception_assign_tests.cpp | 138 +++-- test/cfoa/exception_constructor_tests.cpp | 168 ++++-- test/cfoa/exception_erase_tests.cpp | 101 +++- test/cfoa/exception_helpers.hpp | 75 ++- test/cfoa/exception_insert_tests.cpp | 56 +- test/cfoa/exception_merge_tests.cpp | 32 +- test/cfoa/fwd_tests.cpp | 92 ++- test/cfoa/helpers.hpp | 75 +-- test/cfoa/insert_tests.cpp | 292 +++++++--- test/cfoa/merge_tests.cpp | 89 +-- test/cfoa/reentrancy_check_test.cpp | 114 ++-- test/cfoa/rehash_tests.cpp | 57 +- test/cfoa/swap_tests.cpp | 95 ++-- test/cfoa/visit_tests.cpp | 318 ++++++----- 21 files changed, 2301 insertions(+), 1173 deletions(-) create mode 100644 test/cfoa/common_helpers.hpp diff --git a/test/cfoa/assign_tests.cpp b/test/cfoa/assign_tests.cpp index 409e9d8d..a9b0ca08 100644 --- a/test/cfoa/assign_tests.cpp +++ b/test/cfoa/assign_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include #if defined(__clang__) && defined(__has_warning) @@ -31,15 +33,76 @@ using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator2 >; - -using flat_map_type = boost::unordered::unordered_flat_map; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; -using map_value_type = typename map_type::value_type; +using set_type = boost::unordered::concurrent_flat_set >; + +using fancy_map_type = boost::unordered::concurrent_flat_map > >; + +using fancy_set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; +fancy_map_type* fancy_test_map; +fancy_set_type* fancy_test_set; + +std::initializer_list map_init_list{ + {raii{0}, raii{0}}, + {raii{1}, raii{1}}, + {raii{2}, raii{2}}, + {raii{3}, raii{3}}, + {raii{4}, raii{4}}, + {raii{5}, raii{5}}, + {raii{6}, raii{6}}, + {raii{6}, raii{6}}, + {raii{7}, raii{7}}, + {raii{8}, raii{8}}, + {raii{9}, raii{9}}, + {raii{10}, raii{10}}, + {raii{9}, raii{9}}, + {raii{8}, raii{8}}, + {raii{7}, raii{7}}, + {raii{6}, raii{6}}, + {raii{5}, raii{5}}, + {raii{4}, raii{4}}, + {raii{3}, raii{3}}, + {raii{2}, raii{2}}, + {raii{1}, raii{1}}, + {raii{0}, raii{0}}, +}; + +std::initializer_list set_init_list{ + raii{0}, + raii{1}, + raii{2}, + raii{3}, + raii{4}, + raii{5}, + raii{6}, + raii{6}, + raii{7}, + raii{8}, + raii{9}, + raii{10}, + raii{9}, + raii{8}, + raii{7}, + raii{6}, + raii{5}, + raii{4}, + raii{3}, + raii{2}, + raii{1}, + raii{0}, +}; + +auto test_map_and_init_list=std::make_pair(test_map,map_init_list); +auto test_set_and_init_list=std::make_pair(test_set,set_init_list); template struct pocca_allocator { @@ -116,22 +179,28 @@ template struct pocma_allocator }; namespace { - template void copy_assign(G gen, test::random_generator rg) + template + void copy_assign(X*, GF gen_factory, test::random_generator rg) { + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); // lhs empty, rhs empty { raii::reset_counts(); - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); - thread_runner(values, [&x](boost::span s) { + thread_runner(values, [&x](boost::span s) { (void)s; - map_type y; + X y; BOOST_TEST(x.empty()); BOOST_TEST(y.empty()); @@ -153,14 +222,14 @@ namespace { { raii::reset_counts(); - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); - auto const old_size = reference_map.size(); + auto const old_size = reference_cont.size(); - thread_runner(values, [&x, &values](boost::span s) { + thread_runner(values, [&x, &values](boost::span s) { (void)s; - map_type y(values.size()); + X y(values.size()); for (auto const& v : values) { y.insert(v); } @@ -177,11 +246,13 @@ namespace { BOOST_TEST(y.empty()); }); - BOOST_TEST_EQ(raii::destructor, num_threads * (2 * old_size)); + BOOST_TEST_EQ( + raii::destructor, num_threads * (value_type_cardinality * old_size)); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( - raii::copy_constructor, num_threads * 2 * reference_map.size()); + raii::copy_constructor, + num_threads * value_type_cardinality * reference_cont.size()); } check_raii_counts(); @@ -189,7 +260,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -197,10 +268,10 @@ namespace { auto const old_cc = +raii::copy_constructor; thread_runner( - values, [&x, &reference_map](boost::span s) { + values, [&x, &reference_cont](boost::span s) { (void)s; - map_type y; + X y; BOOST_TEST(!x.empty()); BOOST_TEST(y.empty()); @@ -211,14 +282,16 @@ namespace { BOOST_TEST_EQ(x.key_eq(), y.key_eq()); BOOST_TEST(x.get_allocator() != y.get_allocator()); - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); }); - BOOST_TEST_EQ(raii::destructor, num_threads * 2 * x.size()); + BOOST_TEST_EQ( + raii::destructor, num_threads * value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( - raii::copy_constructor, old_cc + (num_threads * 2 * x.size())); + raii::copy_constructor, + old_cc + (num_threads * value_type_cardinality * x.size())); } check_raii_counts(); @@ -226,7 +299,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -234,10 +307,10 @@ namespace { auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; - thread_runner(values, [&x, &values](boost::span s) { + thread_runner(values, [&x, &values](boost::span s) { (void)s; - map_type y(values.size()); + X y(values.size()); for (auto const& v : values) { y.insert(v); } @@ -252,11 +325,13 @@ namespace { BOOST_TEST(x.get_allocator() != y.get_allocator()); }); - BOOST_TEST_EQ(raii::destructor, 2 * num_threads * 2 * old_size); + BOOST_TEST_EQ( + raii::destructor, 2 * num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( - raii::copy_constructor, old_cc + (2 * num_threads * 2 * x.size())); + raii::copy_constructor, + old_cc + (2 * num_threads * value_type_cardinality * x.size())); } check_raii_counts(); @@ -264,7 +339,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -272,7 +347,7 @@ namespace { auto const old_cc = +raii::copy_constructor; thread_runner( - values, [&x, &reference_map](boost::span s) { + values, [&x, &reference_cont](boost::span s) { (void)s; BOOST_TEST(!x.empty()); @@ -283,7 +358,7 @@ namespace { BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); }); BOOST_TEST_EQ(raii::destructor, 0u); @@ -295,15 +370,13 @@ namespace { // propagation { + using pocca_container_type = replace_allocator; using pocca_allocator_type = - pocca_allocator >; - - using pocca_map_type = boost::unordered::concurrent_flat_map; + typename pocca_container_type::allocator_type; raii::reset_counts(); - pocca_map_type x( + pocca_container_type x( values.size(), hasher(1), key_equal(2), pocca_allocator_type(3)); for (auto const& v : values) { x.insert(v); @@ -312,10 +385,10 @@ namespace { auto const old_size = x.size(); auto const old_cc = +raii::copy_constructor; - thread_runner(values, [&x, &values](boost::span s) { + thread_runner(values, [&x, &values](boost::span s) { (void)s; - pocca_map_type y(values.size()); + pocca_container_type y(values.size()); for (auto const& v : values) { y.insert(v); } @@ -332,40 +405,43 @@ namespace { BOOST_TEST(x.get_allocator() == y.get_allocator()); }); - BOOST_TEST_EQ(raii::destructor, 2 * num_threads * 2 * old_size); + BOOST_TEST_EQ( + raii::destructor, 2 * num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( - raii::copy_constructor, old_cc + (2 * num_threads * 2 * x.size())); + raii::copy_constructor, + old_cc + (2 * num_threads * value_type_cardinality * x.size())); } check_raii_counts(); } - template void move_assign(G gen, test::random_generator rg) + template + void move_assign(X*, GF gen_factory, test::random_generator rg) { - using pocma_allocator_type = pocma_allocator >; + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; - using pocma_map_type = boost::unordered::concurrent_flat_map; + using pocma_container_type = replace_allocator; + using pocma_allocator_type = typename pocma_container_type::allocator_type; + + auto gen = gen_factory.template get(); BOOST_STATIC_ASSERT( - std::is_nothrow_move_assignable, std::equal_to, - std::allocator > > >::value); + std::is_nothrow_move_assignable< + replace_allocator >::value); BOOST_STATIC_ASSERT( - std::is_nothrow_move_assignable, std::equal_to, - pocma_allocator > > >::value); + std::is_nothrow_move_assignable::value); BOOST_STATIC_ASSERT( - !std::is_nothrow_move_assignable, std::equal_to, - stateful_allocator > > >::value); + !std::is_nothrow_move_assignable< + replace_allocator >::value); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); // move assignment has more complex requirements than copying // equal allocators: @@ -383,15 +459,15 @@ namespace { { raii::reset_counts(); - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); std::atomic num_transfers{0}; thread_runner( - values, [&x, &num_transfers](boost::span s) { + values, [&x, &num_transfers](boost::span s) { (void)s; - map_type y(0, hasher(2), key_equal(1), allocator_type(3)); + X y(0, hasher(2), key_equal(1), allocator_type(3)); BOOST_TEST(x.empty()); BOOST_TEST(y.empty()); @@ -423,15 +499,15 @@ namespace { { raii::reset_counts(); - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); std::atomic num_transfers{0}; thread_runner( - values, [&x, &values, &num_transfers](boost::span s) { + values, [&x, &values, &num_transfers](boost::span s) { (void)s; - map_type y(values.size(), hasher(2), key_equal(1), allocator_type(3)); + X y(values.size(), hasher(2), key_equal(1), allocator_type(3)); for (auto const& v : values) { y.insert(v); } @@ -458,11 +534,13 @@ namespace { BOOST_TEST_EQ(num_transfers, 1u); - BOOST_TEST_EQ(raii::destructor, num_threads * 2 * reference_map.size()); + BOOST_TEST_EQ( + raii::destructor, num_threads * value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ( - raii::copy_constructor, num_threads * 2 * reference_map.size()); + raii::copy_constructor, + num_threads * value_type_cardinality * reference_cont.size()); } check_raii_counts(); @@ -470,7 +548,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -480,10 +558,10 @@ namespace { std::atomic num_transfers{0}; thread_runner(values, - [&x, &reference_map, &num_transfers](boost::span s) { + [&x, &reference_cont, &num_transfers](boost::span s) { (void)s; - map_type y(allocator_type(3)); + X y(allocator_type(3)); BOOST_TEST(y.empty()); BOOST_TEST(x.get_allocator() == y.get_allocator()); @@ -491,7 +569,7 @@ namespace { y = std::move(x); if (!y.empty()) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); @@ -509,7 +587,8 @@ namespace { BOOST_TEST_EQ(num_transfers, 1u); - BOOST_TEST_EQ(raii::destructor, 2 * reference_map.size()); + BOOST_TEST_EQ( + raii::destructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_constructor, old_cc); @@ -521,7 +600,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -532,11 +611,11 @@ namespace { std::atomic num_transfers{0}; - thread_runner(values, [&x, &values, &num_transfers, &reference_map]( - boost::span s) { + thread_runner(values, [&x, &values, &num_transfers, &reference_cont]( + boost::span s) { (void)s; - map_type y(values.size(), hasher(2), key_equal(1), allocator_type(3)); + X y(values.size(), hasher(2), key_equal(1), allocator_type(3)); for (auto const& v : values) { y.insert(v); } @@ -547,7 +626,7 @@ namespace { y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { @@ -565,12 +644,15 @@ namespace { BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( - raii::destructor, 2 * old_size + num_threads * 2 * old_size); + raii::destructor, + value_type_cardinality * old_size + + num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::copy_constructor, - old_cc + (num_threads * 2 * reference_map.size())); + BOOST_TEST_EQ( + raii::copy_constructor, + old_cc + (num_threads * value_type_cardinality * reference_cont.size())); } check_raii_counts(); @@ -578,7 +660,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -589,11 +671,11 @@ namespace { std::atomic num_transfers{0}; - thread_runner(values, [&x, &values, &num_transfers, &reference_map]( - boost::span s) { + thread_runner(values, [&x, &values, &num_transfers, &reference_cont]( + boost::span s) { (void)s; - map_type y(values.size(), hasher(2), key_equal(1), allocator_type(13)); + X y(values.size(), hasher(2), key_equal(1), allocator_type(13)); for (auto const& v : values) { y.insert(v); } @@ -610,7 +692,7 @@ namespace { y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { @@ -628,12 +710,16 @@ namespace { BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( - raii::destructor, 2 * 2 * old_size + num_threads * 2 * old_size); + raii::destructor, + 2 * value_type_cardinality * old_size + + num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); - BOOST_TEST_EQ(raii::move_constructor, old_mc + 2 * old_size); - BOOST_TEST_EQ(raii::copy_constructor, - old_cc + (num_threads * 2 * reference_map.size())); + BOOST_TEST_EQ( + raii::move_constructor, old_mc + value_type_cardinality * old_size); + BOOST_TEST_EQ( + raii::copy_constructor, + old_cc + (num_threads * value_type_cardinality * reference_cont.size())); } check_raii_counts(); @@ -641,7 +727,7 @@ namespace { { raii::reset_counts(); - pocma_map_type x( + pocma_container_type x( values.size(), hasher(1), key_equal(2), pocma_allocator_type(3)); for (auto const& v : values) { x.insert(v); @@ -653,11 +739,11 @@ namespace { std::atomic num_transfers{0}; - thread_runner(values, [&x, &values, &num_transfers, &reference_map]( - boost::span s) { + thread_runner(values, [&x, &values, &num_transfers, &reference_cont]( + boost::span s) { (void)s; - pocma_map_type y( + pocma_container_type y( values.size(), hasher(2), key_equal(1), pocma_allocator_type(13)); for (auto const& v : values) { y.insert(v); @@ -669,7 +755,7 @@ namespace { y = std::move(x); if (y.hash_function() == hasher(1)) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); } else { @@ -687,12 +773,15 @@ namespace { BOOST_TEST_EQ(num_transfers, 1u); BOOST_TEST_EQ( - raii::destructor, 2 * old_size + num_threads * 2 * old_size); + raii::destructor, + value_type_cardinality * old_size + + num_threads * value_type_cardinality * old_size); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::copy_constructor, - old_cc + (num_threads * 2 * reference_map.size())); + BOOST_TEST_EQ( + raii::copy_constructor, + old_cc + (num_threads * value_type_cardinality * reference_cont.size())); } check_raii_counts(); @@ -700,7 +789,7 @@ namespace { { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); for (auto const& v : values) { x.insert(v); } @@ -709,7 +798,7 @@ namespace { auto const old_mc = +raii::move_constructor; thread_runner( - values, [&x, &reference_map](boost::span s) { + values, [&x, &reference_cont](boost::span s) { (void)s; x = std::move(x); @@ -720,7 +809,7 @@ namespace { BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); }); BOOST_TEST_EQ(raii::destructor, 0u); @@ -732,52 +821,39 @@ namespace { check_raii_counts(); } - UNORDERED_AUTO_TEST (initializer_list_assignment) { - std::initializer_list values{ - map_value_type{raii{0}, raii{0}}, - map_value_type{raii{1}, raii{1}}, - map_value_type{raii{2}, raii{2}}, - map_value_type{raii{3}, raii{3}}, - map_value_type{raii{4}, raii{4}}, - map_value_type{raii{5}, raii{5}}, - map_value_type{raii{6}, raii{6}}, - map_value_type{raii{6}, raii{6}}, - map_value_type{raii{7}, raii{7}}, - map_value_type{raii{8}, raii{8}}, - map_value_type{raii{9}, raii{9}}, - map_value_type{raii{10}, raii{10}}, - map_value_type{raii{9}, raii{9}}, - map_value_type{raii{8}, raii{8}}, - map_value_type{raii{7}, raii{7}}, - map_value_type{raii{6}, raii{6}}, - map_value_type{raii{5}, raii{5}}, - map_value_type{raii{4}, raii{4}}, - map_value_type{raii{3}, raii{3}}, - map_value_type{raii{2}, raii{2}}, - map_value_type{raii{1}, raii{1}}, - map_value_type{raii{0}, raii{0}}, - }; + template + void initializer_list_assign(std::pair p) + { + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); - auto v = std::vector(values.begin(), values.end()); + auto init_list = p.second; + auto reference_cont = reference_container( + init_list.begin(), init_list.end()); + auto v = std::vector(init_list.begin(), init_list.end()); { raii::reset_counts(); - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); - thread_runner(v, [&x, &values](boost::span s) { + thread_runner(v, [&x, &init_list](boost::span s) { (void)s; - x = values; + x = init_list; }); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); BOOST_TEST_EQ(x.hash_function(), hasher(1)); BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); - BOOST_TEST_EQ(raii::copy_constructor, num_threads * 2 * x.size()); - BOOST_TEST_EQ(raii::destructor, (num_threads - 1) * 2 * x.size()); + BOOST_TEST_EQ( + raii::copy_constructor, + num_threads * value_type_cardinality * x.size()); + BOOST_TEST_EQ( + raii::destructor, + (num_threads - 1) * value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); @@ -785,8 +861,12 @@ namespace { check_raii_counts(); } - template void insert_and_assign(G gen, test::random_generator rg) + template + void insert_and_assign(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); std::thread t1, t2, t3; @@ -796,40 +876,39 @@ namespace { auto v2 = v1; shuffle_values(v2); - auto reference_map = - boost::unordered_flat_map(v1.begin(), v1.end()); + auto reference_cont = reference_container(v1.begin(), v1.end()); raii::reset_counts(); { - map_type map1(v1.size(), hasher(1), key_equal(2), allocator_type(3)); - map_type map2(v2.size(), hasher(1), key_equal(2), allocator_type(3)); + X c1(v1.size(), hasher(1), key_equal(2), allocator_type(3)); + X c2(v2.size(), hasher(1), key_equal(2), allocator_type(3)); - t1 = std::thread([&v1, &map1, &start_latch, &end_latch] { + t1 = std::thread([&v1, &c1, &start_latch, &end_latch] { start_latch.arrive_and_wait(); for (auto const& v : v1) { - map1.insert(v); + c1.insert(v); } end_latch.arrive_and_wait(); }); - t2 = std::thread([&v2, &map2, &end_latch, &start_latch] { + t2 = std::thread([&v2, &c2, &end_latch, &start_latch] { start_latch.arrive_and_wait(); for (auto const& v : v2) { - map2.insert(v); + c2.insert(v); } end_latch.arrive_and_wait(); }); std::atomic num_assignments{0}; - t3 = std::thread([&map1, &map2, &end_latch, &num_assignments] { - while (map1.empty() && map2.empty()) { + t3 = std::thread([&c1, &c2, &end_latch, &num_assignments] { + while (c1.empty() && c2.empty()) { std::this_thread::sleep_for(std::chrono::microseconds(10)); } do { - map1 = map2; + c1 = c2; std::this_thread::sleep_for(std::chrono::milliseconds(100)); - map2 = map1; + c2 = c1; std::this_thread::sleep_for(std::chrono::milliseconds(100)); ++num_assignments; } while (!end_latch.try_wait()); @@ -841,21 +920,23 @@ namespace { BOOST_TEST_GT(num_assignments, 0u); - test_fuzzy_matches_reference(map1, reference_map, rg); - test_fuzzy_matches_reference(map2, reference_map, rg); + test_fuzzy_matches_reference(c1, reference_cont, rg); + test_fuzzy_matches_reference(c2, reference_cont, rg); } check_raii_counts(); } - template - void flat_map_move_assign( - FlatMapType*, MapType*, G gen, test::random_generator rg) + template + void flat_move_assign(X*, GF gen_factory, test::random_generator rg) { - using alloc_type = typename MapType::allocator_type; + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); /* * basically test that a temporary container is materialized and we @@ -868,83 +949,29 @@ namespace { { raii::reset_counts(); - FlatMapType flat_map(values.begin(), values.end(), values.size(), - hasher(1), key_equal(2), alloc_type(3)); + flat_container flat(values.begin(), values.end(), values.size(), + hasher(1), key_equal(2), allocator_type(3)); - MapType map(0, hasher(2), key_equal(1), alloc_type(3)); + X x(0, hasher(2), key_equal(1), allocator_type(3)); - BOOST_TEST(flat_map.get_allocator() == map.get_allocator()); + BOOST_TEST(flat.get_allocator() == x.get_allocator()); - map = std::move(flat_map); + x = std::move(flat); - BOOST_TEST(flat_map.empty()); - BOOST_TEST_EQ(map.size(), reference_map.size()); + BOOST_TEST(flat.empty()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); - test_fuzzy_matches_reference(map, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); - BOOST_TEST_EQ(map.hash_function(), hasher(1)); - BOOST_TEST_EQ(map.key_eq(), key_equal(2)); + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); - BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); - BOOST_TEST_EQ(raii::destructor, 2 * values.size()); - BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size()); - BOOST_TEST_EQ(raii::copy_assignment, 0u); - BOOST_TEST_EQ(raii::move_assignment, 0u); - } - - check_raii_counts(); - - { - raii::reset_counts(); - - MapType map(values.begin(), values.end(), values.size(), hasher(1), - key_equal(2), alloc_type(3)); - - FlatMapType flat_map(0, hasher(2), key_equal(1), alloc_type(3)); - - BOOST_TEST(flat_map.get_allocator() == map.get_allocator()); - - flat_map = std::move(map); - - BOOST_TEST(map.empty()); - BOOST_TEST_EQ(flat_map.size(), reference_map.size()); - - BOOST_TEST_EQ(flat_map.hash_function(), hasher(1)); - BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2)); - - BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); - BOOST_TEST_EQ(raii::destructor, 2 * values.size()); - BOOST_TEST_EQ(raii::move_constructor, 2 * reference_map.size()); - BOOST_TEST_EQ(raii::copy_assignment, 0u); - BOOST_TEST_EQ(raii::move_assignment, 0u); - } - - check_raii_counts(); - - { - raii::reset_counts(); - - FlatMapType flat_map(values.begin(), values.end(), values.size(), - hasher(1), key_equal(2), alloc_type(3)); - - MapType map(0, hasher(2), key_equal(1), alloc_type(4)); - - BOOST_TEST(flat_map.get_allocator() != map.get_allocator()); - - map = std::move(flat_map); - - BOOST_TEST(flat_map.empty()); - BOOST_TEST_EQ(map.size(), reference_map.size()); - - test_fuzzy_matches_reference(map, reference_map, rg); - - BOOST_TEST_EQ(map.hash_function(), hasher(1)); - BOOST_TEST_EQ(map.key_eq(), key_equal(2)); - - BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); BOOST_TEST_EQ( - raii::destructor, 2 * values.size() + 2 * reference_map.size()); - BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size()); + raii::copy_constructor, value_type_cardinality * values.size()); + BOOST_TEST_EQ( + raii::destructor, value_type_cardinality * values.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } @@ -954,25 +981,95 @@ namespace { { raii::reset_counts(); - MapType map(values.begin(), values.end(), values.size(), hasher(1), - key_equal(2), alloc_type(3)); + X x(values.begin(), values.end(), values.size(), hasher(1), + key_equal(2), allocator_type(3)); - FlatMapType flat_map(0, hasher(2), key_equal(1), alloc_type(4)); + flat_container flat(0, hasher(2), key_equal(1), allocator_type(3)); - BOOST_TEST(flat_map.get_allocator() != map.get_allocator()); + BOOST_TEST(flat.get_allocator() == x.get_allocator()); - flat_map = std::move(map); + flat = std::move(x); - BOOST_TEST(map.empty()); - BOOST_TEST_EQ(flat_map.size(), reference_map.size()); + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(flat.size(), reference_cont.size()); - BOOST_TEST_EQ(flat_map.hash_function(), hasher(1)); - BOOST_TEST_EQ(flat_map.key_eq(), key_equal(2)); + BOOST_TEST_EQ(flat.hash_function(), hasher(1)); + BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); - BOOST_TEST_EQ(raii::copy_constructor, 2 * values.size()); BOOST_TEST_EQ( - raii::destructor, 2 * values.size() + 2 * reference_map.size()); - BOOST_TEST_EQ(raii::move_constructor, 4 * reference_map.size()); + raii::copy_constructor, value_type_cardinality * values.size()); + BOOST_TEST_EQ( + raii::destructor, value_type_cardinality * values.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * reference_cont.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + } + + check_raii_counts(); + + { + raii::reset_counts(); + + flat_container flat(values.begin(), values.end(), values.size(), + hasher(1), key_equal(2), allocator_type(3)); + + X x(0, hasher(2), key_equal(1), allocator_type(4)); + + BOOST_TEST(flat.get_allocator() != x.get_allocator()); + + x = std::move(flat); + + BOOST_TEST(flat.empty()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); + + test_fuzzy_matches_reference(x, reference_cont, rg); + + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * values.size()); + BOOST_TEST_EQ( + raii::destructor, + value_type_cardinality * values.size() + + value_type_cardinality * reference_cont.size()); + BOOST_TEST_EQ( + raii::move_constructor, + 2 * value_type_cardinality * reference_cont.size()); + BOOST_TEST_EQ(raii::copy_assignment, 0u); + BOOST_TEST_EQ(raii::move_assignment, 0u); + } + + check_raii_counts(); + + { + raii::reset_counts(); + + X x(values.begin(), values.end(), values.size(), hasher(1), + key_equal(2), allocator_type(3)); + + flat_container flat(0, hasher(2), key_equal(1), allocator_type(4)); + + BOOST_TEST(flat.get_allocator() != x.get_allocator()); + + flat = std::move(x); + + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(flat.size(), reference_cont.size()); + + BOOST_TEST_EQ(flat.hash_function(), hasher(1)); + BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); + + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * values.size()); + BOOST_TEST_EQ( + raii::destructor, + value_type_cardinality * values.size() + + value_type_cardinality * reference_cont.size()); + BOOST_TEST_EQ( + raii::move_constructor, + 2 * value_type_cardinality * reference_cont.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } @@ -985,17 +1082,24 @@ namespace { // clang-format off UNORDERED_TEST( copy_assign, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_assign, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) +UNORDERED_TEST( + initializer_list_assign, + ((test_map_and_init_list)(test_set_and_init_list))) + UNORDERED_TEST( insert_and_assign, - ((init_type_generator)) + ((test_map)(test_set)) + ((init_type_generator_factory)) ((default_generator)(sequential)(limited_range))) boost::unordered::unordered_flat_map > >* map_fancy; UNORDERED_TEST( - flat_map_move_assign, - ((flat_map_plain)) - ((map_plain)) - ((init_type_generator)) - ((default_generator)(sequential)(limited_range))) - -UNORDERED_TEST( - flat_map_move_assign, - ((flat_map_fancy)) - ((map_fancy)) - ((init_type_generator)) + flat_move_assign, + ((test_map)(test_set)(fancy_test_map)(fancy_test_set)) + ((init_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/clear_tests.cpp b/test/cfoa/clear_tests.cpp index 79240db3..4a00d08a 100644 --- a/test/cfoa/clear_tests.cpp +++ b/test/cfoa/clear_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include test::seed_t initialize_seed{674140082}; @@ -14,49 +16,62 @@ using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator >; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; -using map_value_type = typename map_type::value_type; +using set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; namespace { - template void clear_tests(G gen, test::random_generator rg) + template + void clear_tests(X*, GF gen_factory, test::random_generator rg) { + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); raii::reset_counts(); - map_type x(values.begin(), values.end(), values.size(), hasher(1), + X x(values.begin(), values.end(), values.size(), hasher(1), key_equal(2), allocator_type(3)); auto const old_size = x.size(); auto const old_d = +raii::destructor; - thread_runner(values, [&x](boost::span s) { + thread_runner(values, [&x](boost::span s) { (void)s; x.clear(); }); BOOST_TEST(x.empty()); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * old_size); + BOOST_TEST_EQ(raii::destructor, old_d + value_type_cardinality * old_size); check_raii_counts(); } - template void insert_and_clear(G gen, test::random_generator rg) + template + void insert_and_clear(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); std::thread t1, t2; { - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); std::mutex m; std::condition_variable cv; @@ -103,7 +118,7 @@ namespace { BOOST_TEST_GE(num_clears, 1u); if (!x.empty()) { - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } } @@ -115,11 +130,13 @@ namespace { // clang-format off UNORDERED_TEST( clear_tests, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST(insert_and_clear, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/common_helpers.hpp b/test/cfoa/common_helpers.hpp new file mode 100644 index 00000000..a8cb7f85 --- /dev/null +++ b/test/cfoa/common_helpers.hpp @@ -0,0 +1,137 @@ +// Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) + +#ifndef BOOST_UNORDERED_TEST_CFOA_COMMON_HELPERS_HPP +#define BOOST_UNORDERED_TEST_CFOA_COMMON_HELPERS_HPP + +#include +#include +#include +#include + +#include +#include +#include + +template +struct value_cardinality +{ + static constexpr std::size_t value=1; +}; + +template +struct value_cardinality > +{ + static constexpr std::size_t value=2; +}; + +template +struct reference_container_impl; + +template +using reference_container = typename reference_container_impl::type; + +template +struct reference_container_impl > +{ + using type = boost::unordered_flat_map; +}; + +template +struct reference_container_impl > +{ + using type = boost::unordered_flat_set; +}; + +template +struct flat_container_impl; + +template +using flat_container = typename flat_container_impl::type; + +template +struct flat_container_impl > +{ + using type = boost::unordered_flat_map; +}; + +template +struct flat_container_impl > +{ + using type = boost::unordered_flat_set; +}; + +template class Allocator> +struct replace_allocator_impl; + +template class Allocator> +using replace_allocator = + typename replace_allocator_impl::type; + +template < + typename K, typename V, typename H, typename P, typename A, + template class Allocator +> +struct replace_allocator_impl< + boost::concurrent_flat_map, Allocator> +{ + using value_type = + typename boost::concurrent_flat_map::value_type; + using type = + boost::concurrent_flat_map >; +}; + +template < + typename K, typename H, typename P, typename A, + template class Allocator +> +struct replace_allocator_impl< + boost::concurrent_flat_set, Allocator> +{ + using value_type = + typename boost::concurrent_flat_set::value_type; + using type = + boost::concurrent_flat_set >; +}; + +template +K const& get_key(K const& x) { return x; } + +template +K const& get_key(const std::pair& x) { return x.first; } + +template +K const& get_value(K const& x) { return x; } + +template +V const& get_value(const std::pair& x) { return x.second; } + +template +V& get_value(std::pair& x) { return x.second; } + +template +void test_matches_reference(X const& x, Y const& reference_cont) +{ + using value_type = typename X::value_type; + BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); + })); +} + +template +void test_fuzzy_matches_reference( + X const& x, Y const& reference_cont, test::random_generator rg) +{ + using value_type = typename X::value_type; + BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + if (rg == test::sequential) { + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); + } + })); +} + +#endif // BOOST_UNORDERED_TEST_CFOA_COMMON_HELPERS_HPP diff --git a/test/cfoa/constructor_tests.cpp b/test/cfoa/constructor_tests.cpp index 595cc281..4b3f927d 100644 --- a/test/cfoa/constructor_tests.cpp +++ b/test/cfoa/constructor_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include test::seed_t initialize_seed(4122023); @@ -46,87 +48,148 @@ template struct soccc_allocator using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator >; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; -using map_value_type = typename map_type::value_type; +using set_type = boost::unordered::concurrent_flat_set >; -UNORDERED_AUTO_TEST (default_constructor) { - boost::unordered::concurrent_flat_map x; - BOOST_TEST(x.empty()); - BOOST_TEST_EQ(x.size(), 0u); -} +map_type* test_map; +set_type* test_set; -UNORDERED_AUTO_TEST (bucket_count_with_hasher_key_equal_and_allocator) { - raii::reset_counts(); - { - map_type x(0); +std::initializer_list map_init_list{ + {raii{0}, raii{0}}, + {raii{1}, raii{1}}, + {raii{2}, raii{2}}, + {raii{3}, raii{3}}, + {raii{4}, raii{4}}, + {raii{5}, raii{5}}, + {raii{6}, raii{6}}, + {raii{6}, raii{6}}, + {raii{7}, raii{7}}, + {raii{8}, raii{8}}, + {raii{9}, raii{9}}, + {raii{10}, raii{10}}, + {raii{9}, raii{9}}, + {raii{8}, raii{8}}, + {raii{7}, raii{7}}, + {raii{6}, raii{6}}, + {raii{5}, raii{5}}, + {raii{4}, raii{4}}, + {raii{3}, raii{3}}, + {raii{2}, raii{2}}, + {raii{1}, raii{1}}, + {raii{0}, raii{0}}, +}; - BOOST_TEST(x.empty()); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher()); - BOOST_TEST_EQ(x.key_eq(), key_equal()); - } +std::initializer_list set_init_list{ + raii{0}, + raii{1}, + raii{2}, + raii{3}, + raii{4}, + raii{5}, + raii{6}, + raii{6}, + raii{7}, + raii{8}, + raii{9}, + raii{10}, + raii{9}, + raii{8}, + raii{7}, + raii{6}, + raii{5}, + raii{4}, + raii{3}, + raii{2}, + raii{1}, + raii{0}, +}; - { - map_type x(0, hasher(1)); - - BOOST_TEST(x.empty()); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher(1)); - BOOST_TEST_EQ(x.key_eq(), key_equal()); - } - - { - map_type x(0, hasher(1), key_equal(2)); - - BOOST_TEST(x.empty()); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher(1)); - BOOST_TEST_EQ(x.key_eq(), key_equal(2)); - } - - { - map_type x(0, hasher(1), key_equal(2), allocator_type{}); - - BOOST_TEST(x.empty()); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher(1)); - BOOST_TEST_EQ(x.key_eq(), key_equal(2)); - BOOST_TEST(x.get_allocator() == allocator_type{}); - } -} - -UNORDERED_AUTO_TEST (soccc) { - raii::reset_counts(); - - boost::unordered::concurrent_flat_map > > - x; - - boost::unordered::concurrent_flat_map > > - y(x); - - BOOST_TEST_EQ(y.hash_function(), x.hash_function()); - BOOST_TEST_EQ(y.key_eq(), x.key_eq()); - BOOST_TEST(y.get_allocator() != x.get_allocator()); -} +auto test_map_and_init_list=std::make_pair(test_map,map_init_list); +auto test_set_and_init_list=std::make_pair(test_set,set_init_list); namespace { - template void from_iterator_range(G gen, test::random_generator rg) + template + void default_constructor(X*) { + X x; + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(x.size(), 0u); + } + + template + void bucket_count_with_hasher_key_equal_and_allocator(X*) + { + using allocator_type = typename X::allocator_type; + + raii::reset_counts(); + { + X x(0); + + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher()); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + } + + { + X x(0, hasher(1)); + + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + } + + { + X x(0, hasher(1), key_equal(2)); + + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + } + + { + X x(0, hasher(1), key_equal(2), allocator_type{}); + + BOOST_TEST(x.empty()); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal(2)); + BOOST_TEST(x.get_allocator() == allocator_type{}); + } + } + + template + void soccc(X*) + { + raii::reset_counts(); + + replace_allocator x, y(x); + + BOOST_TEST_EQ(y.hash_function(), x.hash_function()); + BOOST_TEST_EQ(y.key_eq(), x.key_eq()); + BOOST_TEST(y.get_allocator() != x.get_allocator()); + } + + template + void from_iterator_range(X*, GF gen_factory, test::random_generator rg) + { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { - map_type x(values.begin(), values.end()); + X x(values.begin(), values.end()); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher()); @@ -138,9 +201,9 @@ namespace { } { - map_type x(values.begin(), values.end(), 0); + X x(values.begin(), values.end(), 0); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher()); @@ -152,9 +215,9 @@ namespace { } { - map_type x(values.begin(), values.end(), 0, hasher(1)); + X x(values.begin(), values.end(), 0, hasher(1)); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); @@ -166,9 +229,9 @@ namespace { } { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2)); + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2)); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); @@ -180,10 +243,10 @@ namespace { } { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{}); - test_matches_reference(x, reference_map); + test_matches_reference(x, reference_cont); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); BOOST_TEST_EQ(x.hash_function(), hasher(1)); @@ -197,11 +260,14 @@ namespace { check_raii_counts(); } - template void copy_constructor(G gen, test::random_generator rg) + template + void copy_constructor(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + { - map_type x(0, hasher(1), key_equal(2), allocator_type{}); - map_type y(x); + X x(0, hasher(1), key_equal(2), allocator_type{}); + X y(x); BOOST_TEST_EQ(y.size(), x.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); @@ -209,24 +275,24 @@ namespace { BOOST_TEST(y.get_allocator() == x.get_allocator()); } + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{}); thread_runner( - values, [&x, &reference_map]( + values, [&x, &reference_cont]( boost::span > s) { (void)s; - map_type y(x); + X y(x); - test_matches_reference(x, reference_map); - test_matches_reference(y, reference_map); + test_matches_reference(x, reference_cont); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), x.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); @@ -241,16 +307,16 @@ namespace { { allocator_type a; - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), a); + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), a); thread_runner( - values, [&x, &reference_map, a]( + values, [&x, &reference_cont, a]( boost::span > s) { (void)s; - map_type y(x, a); + X y(x, a); - test_matches_reference(x, reference_map); - test_matches_reference(y, reference_map); + test_matches_reference(x, reference_cont); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), x.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); @@ -261,12 +327,14 @@ namespace { check_raii_counts(); } - template - void copy_constructor_with_insertion(G gen, test::random_generator rg) + template + void copy_constructor_with_insertion(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); std::mutex m; @@ -274,7 +342,7 @@ namespace { bool ready = false; { - map_type x(0, hasher(1), key_equal(2), allocator_type{}); + X x(0, hasher(1), key_equal(2), allocator_type{}); auto f = [&x, &values, &m, &cv, &ready] { { @@ -292,7 +360,7 @@ namespace { std::thread t2(f); thread_runner( - values, [&x, &reference_map, &values, rg, &m, &cv, &ready]( + values, [&x, &reference_cont, &values, rg, &m, &cv, &ready]( boost::span > s) { (void)s; @@ -301,18 +369,18 @@ namespace { cv.wait(lk, [&] { return ready; }); } - map_type y(x); + X y(x); BOOST_TEST_LE(y.size(), values.size()); BOOST_TEST_EQ(y.hash_function(), x.hash_function()); BOOST_TEST_EQ(y.key_eq(), x.key_eq()); BOOST_TEST(y.get_allocator() == x.get_allocator()); - x.visit_all([&reference_map, rg]( - typename map_type::value_type const& val) { - BOOST_TEST(reference_map.contains(val.first)); + x.visit_all([&reference_cont, rg]( + typename X::value_type const& val) { + BOOST_TEST(reference_cont.contains(get_key(val))); if (rg == sequential) { - BOOST_TEST_EQ(val.second, reference_map.find(val.first)->second); + BOOST_TEST_EQ(val, *reference_cont.find(get_key(val))); } }); }); @@ -324,13 +392,19 @@ namespace { check_raii_counts(); } - template void move_constructor(G gen, test::random_generator rg) + template + void move_constructor(X*, GF gen_factory, test::random_generator rg) { + using value_type = typename X::value_type; + using allocator_type = typename X::allocator_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + { - map_type x(0, hasher(1), key_equal(2), allocator_type{}); + X x(0, hasher(1), key_equal(2), allocator_type{}); auto const old_size = x.size(); - map_type y(std::move(x)); + X y(std::move(x)); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); @@ -343,14 +417,14 @@ namespace { BOOST_TEST(y.get_allocator() == x.get_allocator()); } + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{}); std::atomic_uint num_transfers{0}; @@ -358,17 +432,17 @@ namespace { auto const old_mc = +raii::move_constructor; thread_runner( - values, [&x, &reference_map, &num_transfers]( + values, [&x, &reference_cont, &num_transfers]( boost::span > s) { (void)s; auto const old_size = x.size(); - map_type y(std::move(x)); + X y(std::move(x)); if (!y.empty()) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); @@ -395,7 +469,7 @@ namespace { raii::reset_counts(); { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{1}); std::atomic_uint num_transfers{0}; @@ -404,19 +478,19 @@ namespace { auto const old_size = x.size(); thread_runner( - values, [&x, &reference_map, &num_transfers, old_size]( + values, [&x, &reference_cont, &num_transfers, old_size]( boost::span > s) { (void)s; auto a = allocator_type{2}; BOOST_TEST(a != x.get_allocator()); - map_type y(std::move(x), a); + X y(std::move(x), a); if (!y.empty()) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); @@ -435,7 +509,8 @@ namespace { }); BOOST_TEST_EQ(num_transfers, 1u); - BOOST_TEST_EQ(raii::move_constructor, old_mc + (2 * old_size)); + BOOST_TEST_EQ( + raii::move_constructor, old_mc + (value_type_cardinality * old_size)); } check_raii_counts(); @@ -444,7 +519,7 @@ namespace { raii::reset_counts(); { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type{1}); std::atomic_uint num_transfers{0}; @@ -453,19 +528,19 @@ namespace { auto const old_size = x.size(); thread_runner( - values, [&x, &reference_map, &num_transfers, old_size]( + values, [&x, &reference_cont, &num_transfers, old_size]( boost::span > s) { (void)s; auto a = allocator_type{1}; BOOST_TEST(a == x.get_allocator()); - map_type y(std::move(x), a); + X y(std::move(x), a); if (!y.empty()) { ++num_transfers; - test_matches_reference(y, reference_map); + test_matches_reference(y, reference_cont); BOOST_TEST_EQ(y.size(), old_size); BOOST_TEST_EQ(y.hash_function(), hasher(1)); BOOST_TEST_EQ(y.key_eq(), key_equal(2)); @@ -490,12 +565,16 @@ namespace { check_raii_counts(); } - template - void move_constructor_with_insertion(G gen, test::random_generator rg) + template + void move_constructor_with_insertion( + X*, GF gen_factory, test::random_generator rg) { + using value_type = typename X::value_type; + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); @@ -504,7 +583,7 @@ namespace { bool ready = false; { - map_type x(0, hasher(1), key_equal(2), allocator_type{}); + X x(0, hasher(1), key_equal(2), allocator_type{}); std::atomic_uint num_transfers{0}; @@ -527,7 +606,7 @@ namespace { }); thread_runner( - values, [&x, &reference_map, &num_transfers, rg, &m, &ready, &cv]( + values, [&x, &reference_cont, &num_transfers, rg, &m, &ready, &cv]( boost::span > s) { (void)s; @@ -536,15 +615,15 @@ namespace { cv.wait(lk, [&] { return ready; }); } - map_type y(std::move(x)); + X y(std::move(x)); if (!y.empty()) { ++num_transfers; - y.cvisit_all([&reference_map, rg](map_value_type const& val) { - BOOST_TEST(reference_map.contains(val.first)); + y.cvisit_all([&reference_cont, rg](value_type const& val) { + BOOST_TEST(reference_cont.contains(get_key(val))); if (rg == sequential) { BOOST_TEST_EQ( - val.second, reference_map.find(val.first)->second); + val, *reference_cont.find(get_key(val))); } }); } @@ -559,18 +638,21 @@ namespace { check_raii_counts(); } - template - void iterator_range_with_allocator(G gen, test::random_generator rg) + template + void iterator_range_with_allocator( + X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { allocator_type a; - map_type x(values.begin(), values.end(), a); + X x(values.begin(), values.end(), a); BOOST_TEST_GT(x.size(), 0u); BOOST_TEST_LE(x.size(), values.size()); @@ -583,18 +665,22 @@ namespace { BOOST_TEST(x.get_allocator() == a); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } - UNORDERED_AUTO_TEST (explicit_allocator) { + template + void explicit_allocator(X*) + { + using allocator_type = typename X::allocator_type; + raii::reset_counts(); { allocator_type a; - map_type x(a); + X x(a); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST_EQ(x.hash_function(), hasher()); @@ -604,37 +690,20 @@ namespace { } } - UNORDERED_AUTO_TEST (initializer_list_with_all_params) { - // hard-code 11 unique values - std::initializer_list ilist{ - map_value_type{raii{0}, raii{0}}, - map_value_type{raii{1}, raii{1}}, - map_value_type{raii{2}, raii{2}}, - map_value_type{raii{3}, raii{3}}, - map_value_type{raii{4}, raii{4}}, - map_value_type{raii{5}, raii{5}}, - map_value_type{raii{6}, raii{6}}, - map_value_type{raii{6}, raii{6}}, - map_value_type{raii{7}, raii{7}}, - map_value_type{raii{8}, raii{8}}, - map_value_type{raii{9}, raii{9}}, - map_value_type{raii{10}, raii{10}}, - map_value_type{raii{9}, raii{9}}, - map_value_type{raii{8}, raii{8}}, - map_value_type{raii{7}, raii{7}}, - map_value_type{raii{6}, raii{6}}, - map_value_type{raii{5}, raii{5}}, - map_value_type{raii{4}, raii{4}}, - map_value_type{raii{3}, raii{3}}, - map_value_type{raii{2}, raii{2}}, - map_value_type{raii{1}, raii{1}}, - map_value_type{raii{0}, raii{0}}, - }; + template + void initializer_list_with_all_params(std::pair p) + { + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; + + auto init_list = p.second; { raii::reset_counts(); - map_type x(ilist, 0, hasher(1), key_equal(2), allocator_type(3)); + X x(init_list, 0, hasher(1), key_equal(2), allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); @@ -642,15 +711,17 @@ namespace { BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); - BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size()); - BOOST_TEST_EQ(raii::move_constructor, 2 * 11u); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * init_list.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * 11u); } check_raii_counts(); { raii::reset_counts(); - map_type x(ilist, allocator_type(3)); + X x(init_list, allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher()); @@ -658,15 +729,17 @@ namespace { BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); - BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size()); - BOOST_TEST_EQ(raii::move_constructor, 2 * 11u); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * init_list.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * 11u); } check_raii_counts(); { raii::reset_counts(); - map_type x(ilist, 0, allocator_type(3)); + X x(init_list, 0, allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher()); @@ -674,15 +747,17 @@ namespace { BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); - BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size()); - BOOST_TEST_EQ(raii::move_constructor, 2 * 11u); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * init_list.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * 11u); } check_raii_counts(); { raii::reset_counts(); - map_type x(ilist, 0, hasher(1), allocator_type(3)); + X x(init_list, 0, hasher(1), allocator_type(3)); BOOST_TEST_EQ(x.size(), 11u); BOOST_TEST_EQ(x.hash_function(), hasher(1)); @@ -690,58 +765,70 @@ namespace { BOOST_TEST(x.get_allocator() == allocator_type(3)); BOOST_TEST_EQ(raii::default_constructor, 0u); - BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size()); - BOOST_TEST_EQ(raii::move_constructor, 2 * 11u); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * init_list.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * 11u); } check_raii_counts(); } - UNORDERED_AUTO_TEST (bucket_count_and_allocator) { - raii::reset_counts(); - - { - map_type x(0, allocator_type(3)); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher()); - BOOST_TEST_EQ(x.key_eq(), key_equal()); - BOOST_TEST(x.get_allocator() == allocator_type(3)); - } - - { - map_type x(4096, allocator_type(3)); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher()); - BOOST_TEST_EQ(x.key_eq(), key_equal()); - BOOST_TEST(x.get_allocator() == allocator_type(3)); - } - } - - UNORDERED_AUTO_TEST (bucket_count_with_hasher_and_allocator) { - raii::reset_counts(); - - { - map_type x(0, hasher(1), allocator_type(3)); - BOOST_TEST_EQ(x.size(), 0u); - BOOST_TEST_EQ(x.hash_function(), hasher(1)); - BOOST_TEST_EQ(x.key_eq(), key_equal()); - BOOST_TEST(x.get_allocator() == allocator_type(3)); - } - } - - template - void iterator_range_with_bucket_count_and_allocator( - G gen, test::random_generator rg) + template + void bucket_count_and_allocator(X*) { + using allocator_type = typename X::allocator_type; + + raii::reset_counts(); + + { + X x(0, allocator_type(3)); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher()); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + } + + { + X x(4096, allocator_type(3)); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher()); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + } + } + + template + void bucket_count_with_hasher_and_allocator(X*) + { + using allocator_type = typename X::allocator_type; + + raii::reset_counts(); + + { + X x(0, hasher(1), allocator_type(3)); + BOOST_TEST_EQ(x.size(), 0u); + BOOST_TEST_EQ(x.hash_function(), hasher(1)); + BOOST_TEST_EQ(x.key_eq(), key_equal()); + BOOST_TEST(x.get_allocator() == allocator_type(3)); + } + } + + template + void iterator_range_with_bucket_count_and_allocator( + X*, GF gen_factory, test::random_generator rg) + { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { allocator_type a(3); - map_type x(values.begin(), values.end(), 0, a); - test_fuzzy_matches_reference(x, reference_map, rg); + X x(values.begin(), values.end(), 0, a); + test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(x.hash_function(), hasher()); BOOST_TEST_EQ(x.key_eq(), key_equal()); @@ -751,21 +838,23 @@ namespace { check_raii_counts(); } - template + template void iterator_range_with_bucket_count_hasher_and_allocator( - G gen, test::random_generator rg) + X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { allocator_type a(3); hasher hf(1); - map_type x(values.begin(), values.end(), 0, hf, a); - test_fuzzy_matches_reference(x, reference_map, rg); + X x(values.begin(), values.end(), 0, hf, a); + test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(x.hash_function(), hf); BOOST_TEST_EQ(x.key_eq(), key_equal()); @@ -775,19 +864,22 @@ namespace { check_raii_counts(); } - template void flat_map_constructor(G gen, test::random_generator rg) + template + void flat_constructor(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map( - values.begin(), values.end(), values.size()); + auto reference_cont = reference_container(values.begin(), values.end()); + auto reference_flat= flat_container(values.begin(), values.end()); raii::reset_counts(); { - boost::unordered_flat_map - flat_map(values.begin(), values.end(), reference_map.size(), hasher(1), - key_equal(2), allocator_type(3)); + flat_container flat( + values.begin(), values.end(), reference_cont.size(), hasher(1), + key_equal(2), allocator_type(3)); auto const old_dc = +raii::default_constructor; auto const old_mc = +raii::move_constructor; @@ -797,9 +889,9 @@ namespace { BOOST_TEST_GT(old_mc, 0u); BOOST_TEST_GT(old_cc, 0u); - map_type x(std::move(flat_map)); + X x(std::move(flat)); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); BOOST_TEST_EQ(+raii::default_constructor, old_dc); BOOST_TEST_EQ(+raii::move_constructor, old_mc); @@ -809,16 +901,15 @@ namespace { BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); - BOOST_TEST(flat_map.empty()); + BOOST_TEST(flat.empty()); } check_raii_counts(); { - boost::unordered_flat_map - flat_map(0, hasher(1), key_equal(2), allocator_type(3)); + flat_container flat(0, hasher(1), key_equal(2), allocator_type(3)); - map_type x(std::move(flat_map)); + X x(std::move(flat)); BOOST_TEST(x.empty()); @@ -826,13 +917,13 @@ namespace { BOOST_TEST_EQ(x.key_eq(), key_equal(2)); BOOST_TEST(x.get_allocator() == allocator_type(3)); - BOOST_TEST(flat_map.empty()); + BOOST_TEST(flat.empty()); } check_raii_counts(); { - map_type flat_map(values.begin(), values.end(), reference_map.size(), + X x(values.begin(), values.end(), reference_cont.size(), hasher(1), key_equal(2), allocator_type(3)); auto const old_dc = +raii::default_constructor; @@ -843,89 +934,124 @@ namespace { BOOST_TEST_GT(old_mc, 0u); BOOST_TEST_GT(old_cc, 0u); - boost::unordered_flat_map - x(std::move(flat_map)); + flat_container flat(std::move(x)); - BOOST_TEST(x == reference_map); + BOOST_TEST(flat == reference_flat); BOOST_TEST_EQ(+raii::default_constructor, old_dc); BOOST_TEST_EQ(+raii::move_constructor, old_mc); BOOST_TEST_EQ(+raii::copy_constructor, old_cc); - BOOST_TEST_EQ(x.hash_function(), hasher(1)); - BOOST_TEST_EQ(x.key_eq(), key_equal(2)); - BOOST_TEST(x.get_allocator() == allocator_type(3)); + BOOST_TEST_EQ(flat.hash_function(), hasher(1)); + BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); + BOOST_TEST(flat.get_allocator() == allocator_type(3)); - BOOST_TEST(flat_map.empty()); + BOOST_TEST(x.empty()); } check_raii_counts(); { - map_type flat_map(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); - boost::unordered_flat_map - x(std::move(flat_map)); + flat_container flat(std::move(x)); + + BOOST_TEST(flat.empty()); + + BOOST_TEST_EQ(flat.hash_function(), hasher(1)); + BOOST_TEST_EQ(flat.key_eq(), key_equal(2)); + BOOST_TEST(flat.get_allocator() == allocator_type(3)); BOOST_TEST(x.empty()); - - BOOST_TEST_EQ(x.hash_function(), hasher(1)); - BOOST_TEST_EQ(x.key_eq(), key_equal(2)); - BOOST_TEST(x.get_allocator() == allocator_type(3)); - - BOOST_TEST(flat_map.empty()); } check_raii_counts(); } + } // namespace // clang-format off +UNORDERED_TEST( + default_constructor, + ((test_map)(test_set))) + +UNORDERED_TEST( + bucket_count_with_hasher_key_equal_and_allocator, + ((test_map)(test_set))) + +UNORDERED_TEST( + soccc, + ((test_map)(test_set))) + UNORDERED_TEST( from_iterator_range, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( copy_constructor, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( copy_constructor_with_insertion, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_constructor, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_constructor_with_insertion, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( iterator_range_with_allocator, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) +UNORDERED_TEST( + explicit_allocator, + ((test_map)(test_set))) + +UNORDERED_TEST( + initializer_list_with_all_params, + ((test_map_and_init_list)(test_set_and_init_list))) + +UNORDERED_TEST( + bucket_count_and_allocator, + ((test_map)(test_set))) + +UNORDERED_TEST( + bucket_count_with_hasher_and_allocator, + ((test_map)(test_set))) + UNORDERED_TEST( iterator_range_with_bucket_count_and_allocator, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( iterator_range_with_bucket_count_hasher_and_allocator, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( - flat_map_constructor, - ((value_type_generator)) + flat_constructor, + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) - // clang-format on RUN_TESTS() diff --git a/test/cfoa/emplace_tests.cpp b/test/cfoa/emplace_tests.cpp index ba8ac70d..0ecc66d4 100644 --- a/test/cfoa/emplace_tests.cpp +++ b/test/cfoa/emplace_tests.cpp @@ -1,34 +1,114 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include #include namespace { test::seed_t initialize_seed(335740237); + template + bool member_emplace(Container& x, Value const & v) + { + return x.emplace(v.x_); + } + + template + bool member_emplace(Container& x, Value& v) + { + return x.emplace(v.x_); + } + + template + bool member_emplace(Container& x, std::pair const & v) + { + return x.emplace(v.first.x_, v.second.x_); + } + + template + bool member_emplace(Container& x, std::pair& v) + { + return x.emplace(v.first.x_, v.second.x_); + } + + template + bool member_emplace_or_visit(Container& x, Value const & v, F f) + { + return x.emplace_or_visit(v.x_, f); + } + + template + bool member_emplace_or_visit(Container& x, Value& v, F f) + { + return x.emplace_or_visit(v.x_, f); + } + + template + bool member_emplace_or_visit( + Container& x, std::pair const & v, F f) + { + return x.emplace_or_visit(v.first.x_, v.second.x_, f); + } + + template + bool member_emplace_or_visit(Container& x, std::pair& v, F f) + { + return x.emplace_or_visit(v.first.x_, v.second.x_, f); + } + + template + bool member_emplace_or_cvisit(Container& x, Value const & v, F f) + { + return x.emplace_or_cvisit(v.x_, f); + } + + template + bool member_emplace_or_cvisit(Container& x, Value& v, F f) + { + return x.emplace_or_cvisit(v.x_, f); + } + + template + bool member_emplace_or_cvisit( + Container& x, std::pair const & v, F f) + { + return x.emplace_or_cvisit(v.first.x_, v.second.x_, f); + } + + template + bool member_emplace_or_cvisit(Container& x, std::pair& v, F f) + { + return x.emplace_or_cvisit(v.first.x_, v.second.x_, f); + } + struct lvalue_emplacer_type { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span s) { for (auto const& r : s) { - bool b = x.emplace(r.first.x_, r.second.x_); + bool b = member_emplace(x, r); if (b) { ++num_inserts; } } }); BOOST_TEST_EQ(num_inserts, x.size()); - BOOST_TEST_EQ(raii::default_constructor, 2 * values.size()); + BOOST_TEST_EQ( + raii::default_constructor, value_type_cardinality * values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_GE(raii::move_constructor, 2 * x.size()); + BOOST_TEST_GE(raii::move_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); @@ -40,9 +120,12 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + x.reserve(values.size()); lvalue_emplacer_type::operator()(values, x); - BOOST_TEST_EQ(raii::move_constructor, 2 * x.size()); + BOOST_TEST_EQ(raii::move_constructor, value_type_cardinality * x.size()); } } norehash_lvalue_emplacer; @@ -50,12 +133,15 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_inserts{0}; std::atomic num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span s) { for (auto& r : s) { - bool b = x.emplace_or_cvisit( - r.first.x_, r.second.x_, + bool b = member_emplace_or_cvisit( + x, r, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; @@ -70,9 +156,10 @@ namespace { BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); - BOOST_TEST_EQ(raii::default_constructor, 2 * values.size()); + BOOST_TEST_EQ( + raii::default_constructor, value_type_cardinality * values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_GE(raii::move_constructor, 2 * x.size()); + BOOST_TEST_GE(raii::move_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); } @@ -82,13 +169,23 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_inserts{0}; std::atomic num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span s) { for (auto& r : s) { - bool b = x.emplace_or_visit( - r.first.x_, r.second.x_, - [&num_invokes](typename X::value_type& v) { + bool b = member_emplace_or_visit( + x, r, + [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); @@ -102,20 +199,21 @@ namespace { BOOST_TEST_EQ(num_inserts, x.size()); BOOST_TEST_EQ(num_invokes, values.size() - x.size()); - BOOST_TEST_EQ(raii::default_constructor, 2 * values.size()); + BOOST_TEST_EQ( + raii::default_constructor, value_type_cardinality * values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_GE(raii::move_constructor, 2 * x.size()); + BOOST_TEST_GE(raii::move_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::move_assignment, 0u); BOOST_TEST_EQ(raii::copy_assignment, 0u); } } lvalue_emplace_or_visit; - template - void emplace(X*, G gen, F emplacer, test::random_generator rg) + template + void emplace(X*, GF gen_factory, F emplacer, test::random_generator rg) { + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { @@ -123,13 +221,13 @@ namespace { emplacer(values, x); - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); using value_type = typename X::value_type; - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); + BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); if (rg == test::sequential) { - BOOST_TEST_EQ(kv.second, reference_map[kv.first]); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); } })); } @@ -145,6 +243,7 @@ namespace { } boost::unordered::concurrent_flat_map* map; + boost::unordered::concurrent_flat_set* set; } // namespace @@ -156,8 +255,8 @@ using test::sequential; UNORDERED_TEST( emplace, - ((map)) - ((value_type_generator)(init_type_generator)) + ((map)(set)) + ((value_type_generator_factory)(init_type_generator_factory)) ((lvalue_emplacer)(norehash_lvalue_emplacer) (lvalue_emplace_or_cvisit)(lvalue_emplace_or_visit)) ((default_generator)(sequential)(limited_range))) diff --git a/test/cfoa/equality_tests.cpp b/test/cfoa/equality_tests.cpp index 8ab2fbb6..391be096 100644 --- a/test/cfoa/equality_tests.cpp +++ b/test/cfoa/equality_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include test::seed_t initialize_seed{1634048962}; @@ -14,16 +16,21 @@ using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator >; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; -using map_value_type = typename map_type::value_type; +using set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; namespace { - UNORDERED_AUTO_TEST (simple_equality) { + UNORDERED_AUTO_TEST (simple_map_equality) { + using allocator_type = map_type::allocator_type; + { map_type x1( {{1, 11}, {2, 22}}, 0, hasher(1), key_equal(2), allocator_type(3)); @@ -50,17 +57,42 @@ namespace { } } - template void insert_and_compare(G gen, test::random_generator rg) + UNORDERED_AUTO_TEST (simple_set_equality) { + using allocator_type = set_type::allocator_type; + + { + set_type x1( + {1, 2}, 0, hasher(1), key_equal(2), allocator_type(3)); + + set_type x2( + {1, 2}, 0, hasher(2), key_equal(2), allocator_type(3)); + + set_type x3({1}, 0, hasher(2), key_equal(2), allocator_type(3)); + + BOOST_TEST_EQ(x1.size(), x2.size()); + BOOST_TEST(x1 == x2); + BOOST_TEST(!(x1 != x2)); + + BOOST_TEST(x1.size() != x3.size()); + BOOST_TEST(!(x1 == x3)); + BOOST_TEST(x1 != x3); + } + } + + template + void insert_and_compare(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); - boost::unordered_flat_map reference_map( - vals1.begin(), vals1.end()); + auto reference_cont = reference_container(vals1.begin(), vals1.end()); { raii::reset_counts(); - map_type x1(vals1.size(), hasher(1), key_equal(2), allocator_type(3)); - map_type x2(vals1.begin(), vals1.end(), vals1.size(), hasher(2), + X x1(vals1.size(), hasher(1), key_equal(2), allocator_type(3)); + X x2(vals1.begin(), vals1.end(), vals1.size(), hasher(2), key_equal(2), allocator_type(3)); std::thread t1, t2; @@ -126,7 +158,7 @@ namespace { BOOST_TEST(x1 == x2); BOOST_TEST(!(x1 != x2)); - test_matches_reference(x1, reference_map); + test_matches_reference(x1, reference_cont); } check_raii_counts(); } @@ -135,7 +167,8 @@ namespace { // clang-format off UNORDERED_TEST( insert_and_compare, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/erase_tests.cpp b/test/cfoa/erase_tests.cpp index 0bf4041b..bb870701 100644 --- a/test/cfoa/erase_tests.cpp +++ b/test/cfoa/erase_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include #include @@ -15,6 +17,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_erased{0}; auto const old_size = x.size(); @@ -26,11 +31,11 @@ namespace { BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, - raii::destructor + 2 * x.size()); + raii::destructor + value_type_cardinality * x.size()); thread_runner(values, [&values, &num_erased, &x](boost::span) { - for (auto const& k : values) { - auto count = x.erase(k.first); + for (auto const& v : values) { + auto count = x.erase(get_key(v)); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); @@ -41,7 +46,7 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * old_size); + BOOST_TEST_EQ(raii::destructor, old_d + value_type_cardinality * old_size); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST(x.empty()); @@ -53,6 +58,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_erased{0}; auto const old_size = x.size(); @@ -64,7 +72,7 @@ namespace { BOOST_TEST_EQ(raii::default_constructor + raii::copy_constructor + raii::move_constructor, - raii::destructor + 2 * x.size()); + raii::destructor + value_type_cardinality * x.size()); thread_runner(values, [&num_erased, &x](boost::span s) { for (auto const& k : s) { @@ -92,6 +100,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; std::atomic num_erased{0}; @@ -105,8 +122,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -114,15 +131,15 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner(values, [&num_erased, &x, threshold](boost::span s) { - for (auto const& k : s) { - auto count = x.erase_if(k.first, - [threshold](value_type& v) { return v.second.x_ > threshold; }); + for (auto const& v : s) { + auto count = x.erase_if(get_key(v), + [threshold](arg_type& w) { return get_value(w).x_ > threshold; }); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); @@ -136,7 +153,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); } } lvalue_eraser_if; @@ -145,6 +163,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; std::atomic num_erased{0}; @@ -158,8 +185,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -167,15 +194,15 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); thread_runner(values, [&num_erased, &x, threshold](boost::span s) { - for (auto const& k : s) { - auto count = x.erase_if(k.first.x_, - [threshold](value_type& v) { return v.second.x_ > threshold; }); + for (auto const& v : s) { + auto count = x.erase_if(get_key(v).x_, + [threshold](arg_type& w) { return get_value(w).x_ > threshold; }); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); @@ -189,7 +216,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); } } transp_lvalue_eraser_if; @@ -198,6 +226,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; std::atomic num_erased{0}; @@ -211,8 +248,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -220,7 +257,7 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); @@ -229,7 +266,7 @@ namespace { values, [&num_erased, &x, threshold](boost::span /* s */) { for (std::size_t i = 0; i < 128; ++i) { auto count = x.erase_if( - [threshold](value_type& v) { return v.second.x_ > threshold; }); + [threshold](arg_type& v) { return get_value(v).x_ > threshold; }); num_erased += count; } }); @@ -241,7 +278,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); } } erase_if; @@ -250,6 +288,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; std::atomic num_erased{0}; @@ -263,8 +310,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -272,7 +319,7 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); @@ -281,7 +328,8 @@ namespace { values, [&num_erased, &x, threshold](boost::span /* s */) { for (std::size_t i = 0; i < 128; ++i) { auto count = boost::unordered::erase_if(x, - [threshold](value_type& v) { return v.second.x_ > threshold; }); + [threshold](arg_type& v) { + return get_value(v).x_ > threshold; }); num_erased += count; } }); @@ -293,7 +341,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); } } free_fn_erase_if; @@ -303,6 +352,15 @@ namespace { { #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; std::atomic num_invokes{0}; @@ -316,8 +374,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -325,7 +383,7 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); @@ -333,9 +391,9 @@ namespace { thread_runner(values, [&num_invokes, &x, threshold](boost::span s) { (void)s; x.erase_if( - std::execution::par, [&num_invokes, threshold](value_type& v) { + std::execution::par, [&num_invokes, threshold](arg_type& v) { ++num_invokes; - return v.second.x_ > threshold; + return get_value(v).x_ > threshold; }); }); @@ -346,7 +404,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * expected_erasures); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * expected_erasures); #else (void)values; (void)x; @@ -354,12 +413,12 @@ namespace { } } erase_if_exec_policy; - template - void erase(X*, G gen, F eraser, test::random_generator rg) + template + void erase(X*, GF gen_factory, F eraser, test::random_generator rg) { + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { @@ -367,20 +426,23 @@ namespace { x.insert(values.begin(), values.end()); - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); eraser(values, x); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } boost::unordered::concurrent_flat_map* map; + boost::unordered::concurrent_flat_set* set; boost::unordered::concurrent_flat_map* transparent_map; + boost::unordered::concurrent_flat_map* transparent_set; } // namespace @@ -391,15 +453,15 @@ using test::sequential; // clang-format off UNORDERED_TEST( erase, - ((map)) - ((value_type_generator)(init_type_generator)) + ((map)(set)) + ((value_type_generator_factory)(init_type_generator_factory)) ((lvalue_eraser)(lvalue_eraser_if)(erase_if)(free_fn_erase_if)(erase_if_exec_policy)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( erase, ((transparent_map)) - ((value_type_generator)(init_type_generator)) + ((value_type_generator_factory)(init_type_generator_factory)) ((transp_lvalue_eraser)(transp_lvalue_eraser_if)(erase_if_exec_policy)) ((default_generator)(sequential)(limited_range))) diff --git a/test/cfoa/exception_assign_tests.cpp b/test/cfoa/exception_assign_tests.cpp index 36e94367..96199973 100644 --- a/test/cfoa/exception_assign_tests.cpp +++ b/test/cfoa/exception_assign_tests.cpp @@ -1,24 +1,87 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include - -using allocator_type = stateful_allocator >; +#include using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; + +using set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; + +std::initializer_list map_init_list{ + {raii{0}, raii{0}}, + {raii{1}, raii{1}}, + {raii{2}, raii{2}}, + {raii{3}, raii{3}}, + {raii{4}, raii{4}}, + {raii{5}, raii{5}}, + {raii{6}, raii{6}}, + {raii{6}, raii{6}}, + {raii{7}, raii{7}}, + {raii{8}, raii{8}}, + {raii{9}, raii{9}}, + {raii{10}, raii{10}}, + {raii{9}, raii{9}}, + {raii{8}, raii{8}}, + {raii{7}, raii{7}}, + {raii{6}, raii{6}}, + {raii{5}, raii{5}}, + {raii{4}, raii{4}}, + {raii{3}, raii{3}}, + {raii{2}, raii{2}}, + {raii{1}, raii{1}}, + {raii{0}, raii{0}}, +}; + +std::initializer_list set_init_list{ + raii{0}, + raii{1}, + raii{2}, + raii{3}, + raii{4}, + raii{5}, + raii{6}, + raii{6}, + raii{7}, + raii{8}, + raii{9}, + raii{10}, + raii{9}, + raii{8}, + raii{7}, + raii{6}, + raii{5}, + raii{4}, + raii{3}, + raii{2}, + raii{1}, + raii{0}, +}; + +auto test_map_and_init_list=std::make_pair(test_map,map_init_list); +auto test_set_and_init_list=std::make_pair(test_set,set_init_list); namespace { test::seed_t initialize_seed(1794114520); - template void copy_assign(G gen, test::random_generator rg) + template + void copy_assign(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { @@ -31,12 +94,12 @@ namespace { values.begin() + static_cast(values.size() / 2); auto end = values.end(); - auto reference_map = boost::unordered_flat_map(begin, mid); + auto reference_cont = reference_container(begin, mid); - map_type x( + X x( begin, mid, values.size(), hasher(1), key_equal(2), allocator_type(3)); - map_type y( + X y( mid, end, values.size(), hasher(2), key_equal(1), allocator_type(4)); BOOST_TEST(!y.empty()); @@ -53,13 +116,17 @@ namespace { disable_exceptions(); BOOST_TEST_GT(num_throws, 0u); - test_fuzzy_matches_reference(y, reference_map, rg); + test_fuzzy_matches_reference(y, reference_cont, rg); } check_raii_counts(); } - template void move_assign(G gen, test::random_generator rg) + template + void move_assign(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { @@ -72,7 +139,7 @@ namespace { values.begin() + static_cast(values.size() / 2); auto end = values.end(); - auto reference_map = boost::unordered_flat_map(begin, mid); + auto reference_cont = reference_container(begin, mid); BOOST_TEST( !boost::allocator_is_always_equal::type::value); @@ -83,10 +150,10 @@ namespace { for (std::size_t i = 0; i < 2 * alloc_throw_threshold; ++i) { disable_exceptions(); - map_type x(begin, mid, values.size(), hasher(1), key_equal(2), + X x(begin, mid, values.size(), hasher(1), key_equal(2), allocator_type(3)); - map_type y( + X y( mid, end, values.size(), hasher(2), key_equal(1), allocator_type(4)); enable_exceptions(); @@ -96,7 +163,7 @@ namespace { ++num_throws; } disable_exceptions(); - test_fuzzy_matches_reference(y, reference_map, rg); + test_fuzzy_matches_reference(y, reference_cont, rg); } BOOST_TEST_GT(num_throws, 0u); @@ -104,43 +171,22 @@ namespace { check_raii_counts(); } - UNORDERED_AUTO_TEST (intializer_list_assign) { - using value_type = typename map_type::value_type; + template + void intializer_list_assign(std::pair p) + { + using allocator_type = typename X::allocator_type; - std::initializer_list values{ - value_type{raii{0}, raii{0}}, - value_type{raii{1}, raii{1}}, - value_type{raii{2}, raii{2}}, - value_type{raii{3}, raii{3}}, - value_type{raii{4}, raii{4}}, - value_type{raii{5}, raii{5}}, - value_type{raii{6}, raii{6}}, - value_type{raii{6}, raii{6}}, - value_type{raii{7}, raii{7}}, - value_type{raii{8}, raii{8}}, - value_type{raii{9}, raii{9}}, - value_type{raii{10}, raii{10}}, - value_type{raii{9}, raii{9}}, - value_type{raii{8}, raii{8}}, - value_type{raii{7}, raii{7}}, - value_type{raii{6}, raii{6}}, - value_type{raii{5}, raii{5}}, - value_type{raii{4}, raii{4}}, - value_type{raii{3}, raii{3}}, - value_type{raii{2}, raii{2}}, - value_type{raii{1}, raii{1}}, - value_type{raii{0}, raii{0}}, - }; + auto init_list = p.second; { raii::reset_counts(); unsigned num_throws = 0; for (std::size_t i = 0; i < throw_threshold; ++i) { - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); enable_exceptions(); try { - x = values; + x = init_list; } catch (...) { ++num_throws; } @@ -160,13 +206,19 @@ using test::sequential; // clang-format off UNORDERED_TEST( copy_assign, - ((exception_value_type_generator)) + ((test_map)(test_set)) + ((exception_value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( move_assign, - ((exception_value_type_generator)) + ((test_map)(test_set)) + ((exception_value_type_generator_factory)) ((default_generator)(sequential))) + +UNORDERED_TEST( + intializer_list_assign, + ((test_map_and_init_list)(test_set_and_init_list))) // clang-format on RUN_TESTS() diff --git a/test/cfoa/exception_constructor_tests.cpp b/test/cfoa/exception_constructor_tests.cpp index 998a6411..0899138e 100644 --- a/test/cfoa/exception_constructor_tests.cpp +++ b/test/cfoa/exception_constructor_tests.cpp @@ -1,23 +1,84 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include - -using allocator_type = stateful_allocator >; +#include using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; + +using set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; + +std::initializer_list map_init_list{ + {raii{0}, raii{0}}, + {raii{1}, raii{1}}, + {raii{2}, raii{2}}, + {raii{3}, raii{3}}, + {raii{4}, raii{4}}, + {raii{5}, raii{5}}, + {raii{6}, raii{6}}, + {raii{6}, raii{6}}, + {raii{7}, raii{7}}, + {raii{8}, raii{8}}, + {raii{9}, raii{9}}, + {raii{10}, raii{10}}, + {raii{9}, raii{9}}, + {raii{8}, raii{8}}, + {raii{7}, raii{7}}, + {raii{6}, raii{6}}, + {raii{5}, raii{5}}, + {raii{4}, raii{4}}, + {raii{3}, raii{3}}, + {raii{2}, raii{2}}, + {raii{1}, raii{1}}, + {raii{0}, raii{0}}, +}; + +std::initializer_list set_init_list{ + raii{0}, + raii{1}, + raii{2}, + raii{3}, + raii{4}, + raii{5}, + raii{6}, + raii{6}, + raii{7}, + raii{8}, + raii{9}, + raii{10}, + raii{9}, + raii{8}, + raii{7}, + raii{6}, + raii{5}, + raii{4}, + raii{3}, + raii{2}, + raii{1}, + raii{0}, +}; + +auto test_map_and_init_list=std::make_pair(test_map,map_init_list); +auto test_set_and_init_list=std::make_pair(test_set,set_init_list); namespace { test::seed_t initialize_seed(795610904); - UNORDERED_AUTO_TEST (bucket_constructor) { + template + void bucket_constructor(X*) + { raii::reset_counts(); bool was_thrown = false; @@ -25,7 +86,7 @@ namespace { enable_exceptions(); for (std::size_t i = 0; i < alloc_throw_threshold; ++i) { try { - map_type m(128); + X m(128); } catch (...) { was_thrown = true; } @@ -35,8 +96,12 @@ namespace { BOOST_TEST(was_thrown); } - template void iterator_range(G gen, test::random_generator rg) + template + void iterator_range(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { @@ -46,7 +111,7 @@ namespace { enable_exceptions(); try { - map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), + X x(values.begin(), values.end(), 0, hasher(1), key_equal(2), allocator_type(3)); } catch (...) { was_thrown = true; @@ -64,7 +129,7 @@ namespace { enable_exceptions(); try { - map_type x(values.begin(), values.end(), allocator_type(3)); + X x(values.begin(), values.end(), allocator_type(3)); } catch (...) { was_thrown = true; } @@ -81,7 +146,7 @@ namespace { enable_exceptions(); try { - map_type x( + X x( values.begin(), values.end(), values.size(), allocator_type(3)); } catch (...) { was_thrown = true; @@ -99,7 +164,7 @@ namespace { enable_exceptions(); try { - map_type x(values.begin(), values.end(), values.size(), hasher(1), + X x(values.begin(), values.end(), values.size(), hasher(1), allocator_type(3)); } catch (...) { was_thrown = true; @@ -111,8 +176,12 @@ namespace { } } - template void copy_constructor(G gen, test::random_generator rg) + template + void copy_constructor(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); { @@ -121,10 +190,10 @@ namespace { bool was_thrown = false; try { - map_type x(values.begin(), values.end(), 0); + X x(values.begin(), values.end(), 0); enable_exceptions(); - map_type y(x); + X y(x); } catch (...) { was_thrown = true; } @@ -140,10 +209,10 @@ namespace { bool was_thrown = false; try { - map_type x(values.begin(), values.end(), 0); + X x(values.begin(), values.end(), 0); enable_exceptions(); - map_type y(x, allocator_type(4)); + X y(x, allocator_type(4)); } catch (...) { was_thrown = true; } @@ -154,20 +223,24 @@ namespace { } } - template void move_constructor(G gen, test::random_generator rg) + template + void move_constructor(X*, GF gen_factory, test::random_generator rg) { - auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); + using allocator_type = typename X::allocator_type; + auto gen = gen_factory.template get(); + auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); + { raii::reset_counts(); bool was_thrown = false; try { - map_type x(values.begin(), values.end(), 0); + X x(values.begin(), values.end(), 0); enable_exceptions(); - map_type y(std::move(x), allocator_type(4)); + X y(std::move(x), allocator_type(4)); } catch (...) { was_thrown = true; } @@ -178,33 +251,13 @@ namespace { } } - UNORDERED_AUTO_TEST (initializer_list_bucket_count) { - using value_type = typename map_type::value_type; + template + void initializer_list_bucket_count(std::pair p) + { + using value_type = typename X::value_type; + using allocator_type = typename X::allocator_type; - std::initializer_list values{ - value_type{raii{0}, raii{0}}, - value_type{raii{1}, raii{1}}, - value_type{raii{2}, raii{2}}, - value_type{raii{3}, raii{3}}, - value_type{raii{4}, raii{4}}, - value_type{raii{5}, raii{5}}, - value_type{raii{6}, raii{6}}, - value_type{raii{6}, raii{6}}, - value_type{raii{7}, raii{7}}, - value_type{raii{8}, raii{8}}, - value_type{raii{9}, raii{9}}, - value_type{raii{10}, raii{10}}, - value_type{raii{9}, raii{9}}, - value_type{raii{8}, raii{8}}, - value_type{raii{7}, raii{7}}, - value_type{raii{6}, raii{6}}, - value_type{raii{5}, raii{5}}, - value_type{raii{4}, raii{4}}, - value_type{raii{3}, raii{3}}, - value_type{raii{2}, raii{2}}, - value_type{raii{1}, raii{1}}, - value_type{raii{0}, raii{0}}, - }; + auto init_list = p.second; { raii::reset_counts(); @@ -213,7 +266,7 @@ namespace { enable_exceptions(); for (std::size_t i = 0; i < throw_threshold; ++i) { try { - map_type x(values, 0, hasher(1), key_equal(2), allocator_type(3)); + X x(init_list, 0, hasher(1), key_equal(2), allocator_type(3)); } catch (...) { ++num_throws; } @@ -231,7 +284,7 @@ namespace { enable_exceptions(); for (std::size_t i = 0; i < alloc_throw_threshold * 2; ++i) { try { - map_type x(values, allocator_type(3)); + X x(init_list, allocator_type(3)); } catch (...) { ++num_throws; } @@ -249,7 +302,7 @@ namespace { enable_exceptions(); for (std::size_t i = 0; i < alloc_throw_threshold * 2; ++i) { try { - map_type x(values, values.size() * 2, allocator_type(3)); + X x(init_list, init_list.size() * 2, allocator_type(3)); } catch (...) { ++num_throws; } @@ -267,7 +320,7 @@ namespace { enable_exceptions(); for (std::size_t i = 0; i < throw_threshold; ++i) { try { - map_type x(values, values.size() * 2, hasher(1), allocator_type(3)); + X x(init_list, init_list.size() * 2, hasher(1), allocator_type(3)); } catch (...) { ++num_throws; } @@ -285,20 +338,31 @@ using test::limited_range; using test::sequential; // clang-format off +UNORDERED_TEST( + bucket_constructor, + ((test_map)(test_set))) + UNORDERED_TEST( iterator_range, - ((exception_value_type_generator)) + ((test_map)(test_set)) + ((exception_value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( copy_constructor, - ((exception_value_type_generator)) + ((test_map)(test_set)) + ((exception_value_type_generator_factory)) ((default_generator)(sequential))) UNORDERED_TEST( move_constructor, - ((exception_value_type_generator)) + ((test_map)(test_set)) + ((exception_value_type_generator_factory)) ((default_generator)(sequential))) + +UNORDERED_TEST( + initializer_list_bucket_count, + ((test_map_and_init_list)(test_set_and_init_list))) // clang-format on RUN_TESTS() diff --git a/test/cfoa/exception_erase_tests.cpp b/test/cfoa/exception_erase_tests.cpp index 11d167e7..32a51f5e 100644 --- a/test/cfoa/exception_erase_tests.cpp +++ b/test/cfoa/exception_erase_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include +#include #include @@ -15,6 +17,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_erased{0}; auto const old_size = x.size(); @@ -27,9 +32,9 @@ namespace { enable_exceptions(); thread_runner(values, [&values, &num_erased, &x](boost::span) { - for (auto const& k : values) { + for (auto const& v : values) { try { - auto count = x.erase(k.first); + auto count = x.erase(get_key(v)); BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); @@ -46,7 +51,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); } } lvalue_eraser; @@ -55,6 +61,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; std::atomic num_erased{0}; @@ -68,8 +83,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -77,17 +92,17 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); enable_exceptions(); thread_runner(values, [&num_erased, &x, threshold](boost::span s) { - for (auto const& k : s) { + for (auto const& v : s) { try { - auto count = x.erase_if(k.first, - [threshold](value_type& v) { return v.second.x_ > threshold; }); + auto count = x.erase_if(get_key(v), + [threshold](arg_type& w) { return get_value(w).x_ > threshold; }); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); @@ -104,7 +119,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); } } lvalue_eraser_if; @@ -113,6 +129,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; auto const old_size = x.size(); @@ -124,8 +149,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -133,7 +158,7 @@ namespace { auto expected_erasures = 0u; x.visit_all([&expected_erasures, threshold](value_type const& v) { - if (v.second.x_ > threshold) { + if (get_value(v).x_ > threshold) { ++expected_erasures; } }); @@ -142,14 +167,14 @@ namespace { thread_runner(values, [&x, threshold](boost::span /* s */) { for (std::size_t i = 0; i < 256; ++i) { try { - x.erase_if([threshold](value_type& v) { + x.erase_if([threshold](arg_type& v) { static std::atomic c{0}; auto t = ++c; if (should_throw && (t % throw_threshold == 0)) { throw exception_tag{}; } - return v.second.x_ > threshold; + return get_value(v).x_ > threshold; }); } catch (...) { } @@ -161,7 +186,9 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * (old_size - x.size())); + BOOST_TEST_EQ( + raii::destructor, + old_d + value_type_cardinality * (old_size - x.size())); } } erase_if; @@ -170,6 +197,15 @@ namespace { template void operator()(std::vector& values, X& x) { using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; auto const old_size = x.size(); @@ -181,8 +217,8 @@ namespace { auto max = 0; x.visit_all([&max](value_type const& v) { - if (v.second.x_ > max) { - max = v.second.x_; + if (get_value(v).x_ > max) { + max = get_value(v).x_; } }); @@ -192,14 +228,14 @@ namespace { thread_runner(values, [&x, threshold](boost::span /* s */) { for (std::size_t i = 0; i < 256; ++i) { try { - boost::unordered::erase_if(x, [threshold](value_type& v) { + boost::unordered::erase_if(x, [threshold](arg_type& v) { static std::atomic c{0}; auto t = ++c; if (should_throw && (t % throw_threshold == 0)) { throw exception_tag{}; } - return v.second.x_ > threshold; + return get_value(v).x_ > threshold; }); } catch (...) { @@ -212,16 +248,18 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * (old_size - x.size())); + BOOST_TEST_EQ( + raii::destructor, + old_d + value_type_cardinality * (old_size - x.size())); } } free_fn_erase_if; - template - void erase(X*, G gen, F eraser, test::random_generator rg) + template + void erase(X*, GF gen_factory, F eraser, test::random_generator rg) { + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); @@ -231,13 +269,13 @@ namespace { x.insert(v); } - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); BOOST_TEST_EQ(raii::destructor, 0u); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); eraser(values, x); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); @@ -245,6 +283,8 @@ namespace { boost::unordered::concurrent_flat_map > >* map; + boost::unordered::concurrent_flat_set >* set; } // namespace @@ -255,8 +295,9 @@ using test::sequential; // clang-format off UNORDERED_TEST( erase, - ((map)) - ((exception_value_type_generator)(exception_init_type_generator)) + ((map)(set)) + ((exception_value_type_generator_factory) + (exception_init_type_generator_factory)) ((lvalue_eraser)(lvalue_eraser_if)(erase_if)(free_fn_erase_if)) ((default_generator)(sequential)(limited_range))) diff --git a/test/cfoa/exception_helpers.hpp b/test/cfoa/exception_helpers.hpp index 87528558..5afe1c24 100644 --- a/test/cfoa/exception_helpers.hpp +++ b/test/cfoa/exception_helpers.hpp @@ -1,14 +1,20 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +#ifndef BOOST_UNORDERED_TEST_CFOA_EXCEPTION_HELPERS_HPP +#define BOOST_UNORDERED_TEST_CFOA_EXCEPTION_HELPERS_HPP + #include "../helpers/generators.hpp" #include "../helpers/test.hpp" +#include "common_helpers.hpp" #include #include #include #include +#include #include #include @@ -308,16 +314,54 @@ std::size_t hash_value(raii const& r) noexcept return hasher(r.x_); } -struct exception_value_type_generator_type +template +struct exception_value_generator { - std::pair operator()(test::random_generator rg) + using value_type = raii; + + value_type operator()(test::random_generator rg) + { + int* p = nullptr; + int a = generate(p, rg); + return value_type(a); + } +}; + +template +struct exception_value_generator > +{ + static constexpr bool const_key = std::is_const::value; + static constexpr bool const_mapped = std::is_const::value; + using value_type = std::pair< + typename std::conditional::type, + typename std::conditional::type>; + + value_type operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); int b = generate(p, rg); return std::make_pair(raii{a}, raii{b}); } -} exception_value_type_generator; +}; + +struct exception_value_type_generator_factory_type +{ + template + exception_value_generator get() + { + return {}; + } +} exception_value_type_generator_factory; + +struct exception_init_type_generator_factory_type +{ + template + exception_value_generator get() + { + return {}; + } +} exception_init_type_generator_factory; struct exception_init_type_generator_type { @@ -388,29 +432,6 @@ template void thread_runner(std::vector& values, F f) } } -template -void test_matches_reference(X const& x, Y const& reference_map) -{ - using value_type = typename X::value_type; - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); - })); -} - -template -void test_fuzzy_matches_reference( - X const& x, Y const& reference_map, test::random_generator rg) -{ - using value_type = typename X::value_type; - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - if (rg == test::sequential) { - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); - } - })); -} - template using span_value_type = typename T::value_type; void check_raii_counts() @@ -442,3 +463,5 @@ auto make_random_values(std::size_t count, F f) -> std::vector } return v; } + +#endif // BOOST_UNORDERED_TEST_CFOA_EXCEPTION_HELPERS_HPP diff --git a/test/cfoa/exception_insert_tests.cpp b/test/cfoa/exception_insert_tests.cpp index 4804caa6..d9b22c31 100644 --- a/test/cfoa/exception_insert_tests.cpp +++ b/test/cfoa/exception_insert_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include +#include #include @@ -84,6 +86,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + x.reserve(values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); @@ -92,11 +97,19 @@ namespace { rvalue_inserter_type::operator()(values, x); if (std::is_same::value) { - BOOST_TEST_EQ(raii::copy_constructor, x.size()); - BOOST_TEST_EQ(raii::move_constructor, x.size()); + if (std::is_same::value) { + BOOST_TEST_EQ(raii::copy_constructor, 0u); + BOOST_TEST_EQ(raii::move_constructor, x.size()); + } + else { + BOOST_TEST_EQ(raii::copy_constructor, x.size()); + BOOST_TEST_EQ(raii::move_constructor, x.size()); + } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_EQ(raii::move_constructor, 2 * x.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * x.size()); } } } norehash_rvalue_inserter; @@ -246,6 +259,13 @@ namespace { { template void operator()(std::vector& values, X& x) { + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_inserts{0}; enable_exceptions(); @@ -253,7 +273,7 @@ namespace { for (auto& r : s) { try { bool b = - x.insert_or_visit(r, [](typename X::value_type& v) { (void)v; }); + x.insert_or_visit(r, [](arg_type& v) { (void)v; }); if (b) { ++num_inserts; @@ -306,6 +326,13 @@ namespace { { template void operator()(std::vector& values, X& x) { + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_inserts{0}; enable_exceptions(); @@ -313,7 +340,7 @@ namespace { for (auto& r : s) { try { bool b = x.insert_or_visit( - std::move(r), [](typename X::value_type& v) { (void)v; }); + std::move(r), [](arg_type& v) { (void)v; }); if (b) { ++num_inserts; @@ -377,14 +404,14 @@ namespace { } } iterator_range_insert_or_visit; - template - void insert(X*, G gen, F inserter, test::random_generator rg) + template + void insert(X*, GF gen_factory, F inserter, test::random_generator rg) { disable_exceptions(); + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { @@ -392,13 +419,15 @@ namespace { inserter(values, x); - test_fuzzy_matches_reference(x, reference_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } boost::unordered::concurrent_flat_map > >* map; + boost::unordered::concurrent_flat_set >* set; } // namespace @@ -409,8 +438,9 @@ using test::sequential; // clang-format off UNORDERED_TEST( insert, - ((map)) - ((exception_value_type_generator)(exception_init_type_generator)) + ((map)(set)) + ((exception_value_type_generator_factory) + (exception_init_type_generator_factory)) ((lvalue_inserter)(rvalue_inserter)(iterator_range_inserter) (norehash_lvalue_inserter)(norehash_rvalue_inserter) (lvalue_insert_or_cvisit)(lvalue_insert_or_visit) @@ -421,7 +451,7 @@ UNORDERED_TEST( UNORDERED_TEST( insert, ((map)) - ((exception_init_type_generator)) + ((exception_init_type_generator_factory)) ((lvalue_insert_or_assign_copy_assign)(lvalue_insert_or_assign_move_assign) (rvalue_insert_or_assign_copy_assign)(rvalue_insert_or_assign_move_assign)) ((default_generator)(sequential)(limited_range))) diff --git a/test/cfoa/exception_merge_tests.cpp b/test/cfoa/exception_merge_tests.cpp index 0f54eb27..d9e0cc2f 100644 --- a/test/cfoa/exception_merge_tests.cpp +++ b/test/cfoa/exception_merge_tests.cpp @@ -1,29 +1,38 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "exception_helpers.hpp" #include +#include #include -using allocator_type = stateful_allocator >; - using hasher = stateful_hash; using key_equal = stateful_key_equal; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; + +using set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; namespace { test::seed_t initialize_seed(223333016); - template void merge(G gen, test::random_generator rg) + template + void merge(X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); @@ -37,10 +46,10 @@ namespace { for (unsigned i = 0; i < 5 * alloc_throw_threshold; ++i) { disable_exceptions(); - map_type x1(0, hasher(1), key_equal(2), allocator_type(3)); + X x1(0, hasher(1), key_equal(2), allocator_type(3)); x1.insert(begin, mid); - map_type x2(0, hasher(2), key_equal(1), allocator_type(3)); + X x2(0, hasher(2), key_equal(1), allocator_type(3)); x2.insert(mid, end); enable_exceptions(); @@ -51,8 +60,8 @@ namespace { } disable_exceptions(); - test_fuzzy_matches_reference(x1, reference_map, rg); - test_fuzzy_matches_reference(x2, reference_map, rg); + test_fuzzy_matches_reference(x1, reference_cont, rg); + test_fuzzy_matches_reference(x2, reference_cont, rg); } BOOST_TEST_GT(num_throws, 0u); @@ -70,7 +79,8 @@ using test::sequential; // clang-format off UNORDERED_TEST( merge, - ((exception_value_type_generator)) + ((test_map)(test_set)) + ((exception_value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/fwd_tests.cpp b/test/cfoa/fwd_tests.cpp index dced611e..5b37dddd 100644 --- a/test/cfoa/fwd_tests.cpp +++ b/test/cfoa/fwd_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include #include +#include #include test::seed_t initialize_seed{32304628}; @@ -34,37 +36,89 @@ bool unequal_call(boost::unordered::concurrent_flat_map& x1, return x1 != x2; } -#include - -using map_type = boost::unordered::concurrent_flat_map; - -#if !defined(BOOST_CLANG_VERSION) || \ - BOOST_WORKAROUND(BOOST_CLANG_VERSION, < 30700) || \ - BOOST_WORKAROUND(BOOST_CLANG_VERSION, >= 30800) -// clang-3.7 seems to have a codegen bug here so we workaround it -UNORDERED_AUTO_TEST (fwd_swap_call) { - map_type x1, x2; - swap_call(x1, x2); +template +void swap_call(boost::unordered::concurrent_flat_set& x1, + boost::unordered::concurrent_flat_set& x2) +{ + swap(x1, x2); } -#endif +template +bool equal_call(boost::unordered::concurrent_flat_set& x1, + boost::unordered::concurrent_flat_set& x2) +{ + return x1 == x2; +} -UNORDERED_AUTO_TEST (fwd_equal_call) { - map_type x1, x2; +template +bool unequal_call(boost::unordered::concurrent_flat_set& x1, + boost::unordered::concurrent_flat_set& x2) +{ + return x1 != x2; +} + +#include +#include + +using map_type = boost::unordered::concurrent_flat_map; +using set_type = boost::unordered::concurrent_flat_map; + +map_type* test_map; +set_type* test_set; + +template +void fwd_swap_call(X*) +{ +#if !defined(BOOST_CLANG_VERSION) || \ + BOOST_WORKAROUND(BOOST_CLANG_VERSION, < 30700) || \ + BOOST_WORKAROUND(BOOST_CLANG_VERSION, >= 30800) +// clang-3.7 seems to have a codegen bug here so we workaround it + + X x1, x2; + swap_call(x1, x2); +#endif +} + +template +void fwd_equal_call(X*) +{ + X x1, x2; BOOST_TEST(equal_call(x1, x2)); } -UNORDERED_AUTO_TEST (fwd_unequal_call) { - map_type x1, x2; +template +void fwd_unequal_call(X*) +{ + X x1, x2; BOOST_TEST_NOT(unequal_call(x1, x2)); } // this isn't the best place for this test but it's better than introducing a // new file -UNORDERED_AUTO_TEST (max_size) { - map_type x1; +template +void max_size(X*) +{ + X x1; BOOST_TEST_EQ( - x1.max_size(), std::numeric_limits::max()); + x1.max_size(), std::numeric_limits::max()); } +// clang-format off +UNORDERED_TEST( + fwd_swap_call, + ((test_map)(test_set))) + +UNORDERED_TEST( + fwd_equal_call, + ((test_map)(test_set))) + +UNORDERED_TEST( + fwd_unequal_call, + ((test_map)(test_set))) + +UNORDERED_TEST( + max_size, + ((test_map)(test_set))) +// clang-format on + RUN_TESTS() diff --git a/test/cfoa/helpers.hpp b/test/cfoa/helpers.hpp index 79b91bb0..307a1d7f 100644 --- a/test/cfoa/helpers.hpp +++ b/test/cfoa/helpers.hpp @@ -1,4 +1,5 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) @@ -7,11 +8,15 @@ #include "../helpers/generators.hpp" #include "../helpers/test.hpp" +#include "common_helpers.hpp" #include #include #include +#include +#include #include +#include #include #include @@ -328,27 +333,48 @@ auto make_random_values(std::size_t count, F f) -> std::vector return v; } -struct value_type_generator_type +template +struct value_generator { - std::pair operator()(test::random_generator rg) - { - int* p = nullptr; - int a = generate(p, rg); - int b = generate(p, rg); - return std::make_pair(raii{a}, raii{b}); - } -} value_type_generator; + using value_type = raii; -struct init_type_generator_type + value_type operator()(test::random_generator rg) + { + int* p = nullptr; + int a = generate(p, rg); + return value_type(a); + } +}; + +template +struct value_generator > { - std::pair operator()(test::random_generator rg) + static constexpr bool const_key = std::is_const::value; + static constexpr bool const_mapped = std::is_const::value; + using value_type = std::pair< + typename std::conditional::type, + typename std::conditional::type>; + + value_type operator()(test::random_generator rg) { int* p = nullptr; int a = generate(p, rg); int b = generate(p, rg); return std::make_pair(raii{a}, raii{b}); } -} init_type_generator; +}; + +struct value_type_generator_factory_type +{ + template + value_generator get() { return {}; } +} value_type_generator_factory; + +struct init_type_generator_factory_type +{ + template + value_generator get() { return {}; } +} init_type_generator_factory; template std::vector > split( @@ -408,29 +434,6 @@ template void thread_runner(std::vector& values, F f) } } -template -void test_matches_reference(X const& x, Y const& reference_map) -{ - using value_type = typename X::value_type; - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); - })); -} - -template -void test_fuzzy_matches_reference( - X const& x, Y const& reference_map, test::random_generator rg) -{ - using value_type = typename X::value_type; - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - if (rg == test::sequential) { - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); - } - })); -} - template using span_value_type = typename T::value_type; void check_raii_counts() @@ -642,4 +645,4 @@ public: fancy_allocator& operator=(fancy_allocator const&) { return *this; } }; -#endif // BOOST_UNORDERED_TEST_CFOA_HELPERS_HPP \ No newline at end of file +#endif // BOOST_UNORDERED_TEST_CFOA_HELPERS_HPP diff --git a/test/cfoa/insert_tests.cpp b/test/cfoa/insert_tests.cpp index b3bd4d51..e1b53e68 100644 --- a/test/cfoa/insert_tests.cpp +++ b/test/cfoa/insert_tests.cpp @@ -1,18 +1,27 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include #include struct raii_convertible { - int x, y; - raii_convertible(int x_, int y_) : x{x_}, y{y_} {} + int x = 0, y = 0 ; + template + raii_convertible(T const & t) : x{t.x_} {} + + template + raii_convertible(std::pair const & p) : x{p.first.x_}, y{p.second.x_} + {} + + operator raii() { return {x}; } operator std::pair() { return {x, y}; } }; @@ -23,6 +32,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_inserts{0}; thread_runner(values, [&x, &num_inserts](boost::span s) { for (auto const& r : s) { @@ -33,7 +45,8 @@ namespace { } }); BOOST_TEST_EQ(num_inserts, x.size()); - BOOST_TEST_EQ(raii::copy_constructor, 2 * x.size()); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::copy_assignment, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); } @@ -43,9 +56,13 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + x.reserve(values.size()); lvalue_inserter_type::operator()(values, x); - BOOST_TEST_EQ(raii::copy_constructor, 2 * x.size()); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * x.size()); BOOST_TEST_EQ(raii::move_constructor, 0u); } } norehash_lvalue_inserter; @@ -67,7 +84,8 @@ namespace { }); BOOST_TEST_EQ(num_inserts, x.size()); - if (std::is_same::value) { + if (std::is_same::value && + !std::is_same::value) { BOOST_TEST_EQ(raii::copy_constructor, x.size()); } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); @@ -82,6 +100,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + x.reserve(values.size()); BOOST_TEST_EQ(raii::copy_constructor, 0u); @@ -90,11 +111,19 @@ namespace { rvalue_inserter_type::operator()(values, x); if (std::is_same::value) { - BOOST_TEST_EQ(raii::copy_constructor, x.size()); - BOOST_TEST_EQ(raii::move_constructor, x.size()); + if (std::is_same::value) { + BOOST_TEST_EQ(raii::copy_constructor, 0u); + BOOST_TEST_EQ(raii::move_constructor, x.size()); + } + else { + BOOST_TEST_EQ(raii::copy_constructor, x.size()); + BOOST_TEST_EQ(raii::move_constructor, x.size()); + } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_EQ(raii::move_constructor, 2 * x.size()); + BOOST_TEST_EQ( + raii::move_constructor, value_type_cardinality * x.size()); } } } norehash_rvalue_inserter; @@ -103,17 +132,21 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::vector values2; values2.reserve(values.size()); - for (auto const& p : values) { - values2.push_back(raii_convertible(p.first.x_, p.second.x_)); + for (auto const& v : values) { + values2.push_back(raii_convertible(v)); } thread_runner(values2, [&x](boost::span s) { x.insert(s.begin(), s.end()); }); - BOOST_TEST_EQ(raii::default_constructor, 2 * values2.size()); + BOOST_TEST_EQ( + raii::default_constructor, value_type_cardinality * values2.size()); #if BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 50300) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50500) // some versions of old gcc have trouble eliding copies here @@ -253,6 +286,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_inserts{0}; std::atomic num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span s) { @@ -273,7 +309,8 @@ namespace { BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 0u); - BOOST_TEST_EQ(raii::copy_constructor, 2 * x.size()); + BOOST_TEST_EQ( + raii::copy_constructor, value_type_cardinality * x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); @@ -284,12 +321,22 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_inserts{0}; std::atomic num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span s) { for (auto& r : s) { bool b = - x.insert_or_visit(r, [&num_invokes](typename X::value_type& v) { + x.insert_or_visit(r, [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); @@ -304,7 +351,7 @@ namespace { BOOST_TEST_EQ(num_invokes, values.size() - x.size()); BOOST_TEST_EQ(raii::default_constructor, 0u); - BOOST_TEST_EQ(raii::copy_constructor, 2 * x.size()); + BOOST_TEST_EQ(raii::copy_constructor, value_type_cardinality * x.size()); // don't check move construction count here because of rehashing BOOST_TEST_GT(raii::move_constructor, 0u); BOOST_TEST_EQ(raii::move_assignment, 0u); @@ -315,6 +362,9 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::atomic num_inserts{0}; std::atomic num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span s) { @@ -337,11 +387,19 @@ namespace { BOOST_TEST_EQ(raii::default_constructor, 0u); if (std::is_same::value) { - BOOST_TEST_EQ(raii::copy_constructor, x.size()); - BOOST_TEST_GE(raii::move_constructor, x.size()); + if (std::is_same::value) { + BOOST_TEST_EQ(raii::copy_constructor, 0u); + BOOST_TEST_GE(raii::move_constructor, x.size()); + } + else { + BOOST_TEST_EQ(raii::copy_constructor, x.size()); + BOOST_TEST_GE(raii::move_constructor, x.size()); + } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_GE(raii::move_constructor, 2 * x.size()); + BOOST_TEST_GE( + raii::move_constructor, value_type_cardinality * x.size()); } } } rvalue_insert_or_cvisit; @@ -350,12 +408,22 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_inserts{0}; std::atomic num_invokes{0}; thread_runner(values, [&x, &num_inserts, &num_invokes](boost::span s) { for (auto& r : s) { bool b = x.insert_or_visit( - std::move(r), [&num_invokes](typename X::value_type& v) { + std::move(r), [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); @@ -371,11 +439,19 @@ namespace { BOOST_TEST_EQ(raii::default_constructor, 0u); if (std::is_same::value) { - BOOST_TEST_EQ(raii::copy_constructor, x.size()); - BOOST_TEST_GE(raii::move_constructor, x.size()); + if (std::is_same::value) { + BOOST_TEST_EQ(raii::copy_constructor, 0u); + BOOST_TEST_GE(raii::move_constructor, x.size()); + } + else { + BOOST_TEST_EQ(raii::copy_constructor, x.size()); + BOOST_TEST_GE(raii::move_constructor, x.size()); + } } else { BOOST_TEST_EQ(raii::copy_constructor, 0u); - BOOST_TEST_GE(raii::move_constructor, 2 * x.size()); + BOOST_TEST_GE( + raii::move_constructor, value_type_cardinality * x.size()); } } } rvalue_insert_or_visit; @@ -384,10 +460,13 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::vector values2; values2.reserve(values.size()); - for (auto const& p : values) { - values2.push_back(raii_convertible(p.first.x_, p.second.x_)); + for (auto const& v : values) { + values2.push_back(raii_convertible(v)); } std::atomic num_invokes{0}; @@ -402,7 +481,8 @@ namespace { BOOST_TEST_EQ(num_invokes, values.size() - x.size()); - BOOST_TEST_EQ(raii::default_constructor, 2 * values2.size()); + BOOST_TEST_EQ( + raii::default_constructor, value_type_cardinality * values2.size()); #if BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 50300) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50500) // skip test @@ -417,10 +497,13 @@ namespace { { template void operator()(std::vector& values, X& x) { + static constexpr auto value_type_cardinality = + value_cardinality::value; + std::vector values2; values2.reserve(values.size()); - for (auto const& p : values) { - values2.push_back(raii_convertible(p.first.x_, p.second.x_)); + for (auto const& v : values) { + values2.push_back(raii_convertible(v)); } std::atomic num_invokes{0}; @@ -435,7 +518,8 @@ namespace { BOOST_TEST_EQ(num_invokes, values.size() - x.size()); - BOOST_TEST_EQ(raii::default_constructor, 2 * values2.size()); + BOOST_TEST_EQ( + raii::default_constructor, value_type_cardinality * values2.size()); #if BOOST_WORKAROUND(BOOST_GCC_VERSION, >= 50300) && \ BOOST_WORKAROUND(BOOST_GCC_VERSION, < 50500) // skip test @@ -446,12 +530,12 @@ namespace { } } iterator_range_insert_or_visit; - template - void insert(X*, G gen, F inserter, test::random_generator rg) + template + void insert(X*, GF gen_factory, F inserter, test::random_generator rg) { + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); { @@ -459,13 +543,13 @@ namespace { inserter(values, x); - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); using value_type = typename X::value_type; - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); + BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); if (rg == test::sequential) { - BOOST_TEST_EQ(kv.second, reference_map[kv.first]); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); } })); } @@ -480,39 +564,21 @@ namespace { raii::destructor); } - template void insert_initializer_list(X*) + template + void insert_initializer_list(std::pair p) { using value_type = typename X::value_type; + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; - std::initializer_list values{ - value_type{raii{0}, raii{0}}, - value_type{raii{1}, raii{1}}, - value_type{raii{2}, raii{2}}, - value_type{raii{3}, raii{3}}, - value_type{raii{4}, raii{4}}, - value_type{raii{5}, raii{5}}, - value_type{raii{6}, raii{6}}, - value_type{raii{6}, raii{6}}, - value_type{raii{7}, raii{7}}, - value_type{raii{8}, raii{8}}, - value_type{raii{9}, raii{9}}, - value_type{raii{10}, raii{10}}, - value_type{raii{9}, raii{9}}, - value_type{raii{8}, raii{8}}, - value_type{raii{7}, raii{7}}, - value_type{raii{6}, raii{6}}, - value_type{raii{5}, raii{5}}, - value_type{raii{4}, raii{4}}, - value_type{raii{3}, raii{3}}, - value_type{raii{2}, raii{2}}, - value_type{raii{1}, raii{1}}, - value_type{raii{0}, raii{0}}, - }; - + auto init_list = p.second; std::vector dummy; - - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container( + init_list.begin(), init_list.end()); raii::reset_counts(); { @@ -520,13 +586,13 @@ namespace { X x; thread_runner( - dummy, [&x, &values](boost::span) { x.insert(values); }); + dummy, [&x, &init_list](boost::span) { x.insert(init_list); }); - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map[kv.first]); + BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); })); } @@ -549,27 +615,27 @@ namespace { X x; - thread_runner(dummy, [&x, &values, &num_invokes](boost::span) { - x.insert_or_visit(values, [&num_invokes](typename X::value_type& v) { + thread_runner(dummy, [&x, &init_list, &num_invokes](boost::span) { + x.insert_or_visit(init_list, [&num_invokes](arg_type& v) { (void)v; ++num_invokes; }); x.insert_or_cvisit( - values, [&num_invokes](typename X::value_type const& v) { + init_list, [&num_invokes](typename X::value_type const& v) { (void)v; ++num_invokes; }); }); - BOOST_TEST_EQ(num_invokes, (values.size() - x.size()) + - (num_threads - 1) * values.size() + - num_threads * values.size()); - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(num_invokes, (init_list.size() - x.size()) + + (num_threads - 1) * init_list.size() + + num_threads * init_list.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); - BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map[kv.first]); + BOOST_TEST_EQ(x.size(), x.visit_all([&](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); })); } @@ -606,6 +672,64 @@ namespace { std::equal_to, fancy_allocator > >* fancy_map; + boost::unordered::concurrent_flat_set* set; + boost::unordered::concurrent_flat_set, + std::equal_to, fancy_allocator > >* + fancy_set; + + std::initializer_list > map_init_list{ + {raii{0}, raii{0}}, + {raii{1}, raii{1}}, + {raii{2}, raii{2}}, + {raii{3}, raii{3}}, + {raii{4}, raii{4}}, + {raii{5}, raii{5}}, + {raii{6}, raii{6}}, + {raii{6}, raii{6}}, + {raii{7}, raii{7}}, + {raii{8}, raii{8}}, + {raii{9}, raii{9}}, + {raii{10}, raii{10}}, + {raii{9}, raii{9}}, + {raii{8}, raii{8}}, + {raii{7}, raii{7}}, + {raii{6}, raii{6}}, + {raii{5}, raii{5}}, + {raii{4}, raii{4}}, + {raii{3}, raii{3}}, + {raii{2}, raii{2}}, + {raii{1}, raii{1}}, + {raii{0}, raii{0}}, + }; + + std::initializer_list set_init_list{ + raii{0}, + raii{1}, + raii{2}, + raii{3}, + raii{4}, + raii{5}, + raii{6}, + raii{6}, + raii{7}, + raii{8}, + raii{9}, + raii{10}, + raii{9}, + raii{8}, + raii{7}, + raii{6}, + raii{5}, + raii{4}, + raii{3}, + raii{2}, + raii{1}, + raii{0}, + }; + + auto map_and_init_list=std::make_pair(map,map_init_list); + auto set_and_init_list=std::make_pair(set,set_init_list); + } // namespace using test::default_generator; @@ -615,12 +739,12 @@ using test::sequential; // clang-format off UNORDERED_TEST( insert_initializer_list, - ((map))) + ((map_and_init_list)(set_and_init_list))) UNORDERED_TEST( insert, - ((map)(fancy_map)) - ((value_type_generator)(init_type_generator)) + ((map)(fancy_map)(set)(fancy_set)) + ((value_type_generator_factory)(init_type_generator_factory)) ((lvalue_inserter)(rvalue_inserter)(iterator_range_inserter) (norehash_lvalue_inserter)(norehash_rvalue_inserter) (lvalue_insert_or_cvisit)(lvalue_insert_or_visit) @@ -631,7 +755,7 @@ UNORDERED_TEST( UNORDERED_TEST( insert, ((map)) - ((init_type_generator)) + ((init_type_generator_factory)) ((lvalue_insert_or_assign_copy_assign)(lvalue_insert_or_assign_move_assign) (rvalue_insert_or_assign_copy_assign)(rvalue_insert_or_assign_move_assign)) ((default_generator)(sequential)(limited_range))) @@ -639,7 +763,7 @@ UNORDERED_TEST( UNORDERED_TEST( insert, ((trans_map)) - ((init_type_generator)) + ((init_type_generator_factory)) ((trans_insert_or_assign_copy_assign)(trans_insert_or_assign_move_assign)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/merge_tests.cpp b/test/cfoa/merge_tests.cpp index 60d35065..eab8cc29 100644 --- a/test/cfoa/merge_tests.cpp +++ b/test/cfoa/merge_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include test::seed_t initialize_seed{402031699}; @@ -14,12 +16,25 @@ using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator >; -using map_type = boost::unordered::concurrent_flat_map; +using map_type = boost::unordered::concurrent_flat_map > >; +using map2_type = boost::unordered::concurrent_flat_map, std::equal_to, + stateful_allocator > >; -using map_value_type = typename map_type::value_type; +using set_type = boost::unordered::concurrent_flat_set >; +using set2_type = boost::unordered::concurrent_flat_set, + std::equal_to, stateful_allocator >; + +map_type* test_map; +map2_type* test_map2; +auto test_maps=std::make_pair(test_map,test_map2); + +set_type* test_set; +set2_type* test_set2; +auto test_sets=std::make_pair(test_set,test_set2); struct { @@ -40,18 +55,23 @@ struct } rvalue_merge; namespace { - template - void merge_tests(F merger, G gen, test::random_generator rg) + template + void merge_tests( + std::pair, F merger, GF gen_factory, test::random_generator rg) { - auto values = make_random_values(1024 * 8, [&] { return gen(rg); }); + using value_type = typename X::value_type; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; - auto ref_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto gen = gen_factory.template get(); + auto values = make_random_values(1024 * 8, [&] { return gen(rg); }); + auto reference_cont = reference_container(values.begin(), values.end()); { raii::reset_counts(); - map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3)); + X x(values.size(), hasher(1), key_equal(2), allocator_type(3)); auto const old_cc = +raii::copy_constructor; @@ -59,48 +79,50 @@ namespace { std::atomic num_merged{0}; thread_runner(values, [&x, &expected_copies, &num_merged, merger]( - boost::span s) { - using map2_type = boost::unordered::concurrent_flat_map, std::equal_to, allocator_type>; - - map2_type y(s.size(), allocator_type(3)); + boost::span s) { + Y y(s.size(), allocator_type(3)); for (auto const& v : s) { y.insert(v); } - expected_copies += 2 * y.size(); + expected_copies += value_type_cardinality * y.size(); BOOST_TEST(x.get_allocator() == y.get_allocator()); num_merged += merger(x, y); }); BOOST_TEST_EQ(raii::copy_constructor, old_cc + expected_copies); - BOOST_TEST_EQ(raii::move_constructor, 2 * ref_map.size()); - BOOST_TEST_EQ(+num_merged, ref_map.size()); + BOOST_TEST_EQ( + raii::move_constructor, + value_type_cardinality * reference_cont.size()); + BOOST_TEST_EQ(+num_merged, reference_cont.size()); - test_fuzzy_matches_reference(x, ref_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); } - template - void insert_and_merge_tests(G gen, test::random_generator rg) + template + void insert_and_merge_tests( + std::pair, GF gen_factory, test::random_generator rg) { - using map2_type = boost::unordered::concurrent_flat_map, std::equal_to, allocator_type>; + static constexpr auto value_type_cardinality = + value_cardinality::value; + using allocator_type = typename X::allocator_type; + auto gen = gen_factory.template get(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto vals2 = make_random_values(1024 * 4, [&] { return gen(rg); }); - auto ref_map = boost::unordered_flat_map(); - ref_map.insert(vals1.begin(), vals1.end()); - ref_map.insert(vals2.begin(), vals2.end()); + auto reference_cont = reference_container(); + reference_cont.insert(vals1.begin(), vals1.end()); + reference_cont.insert(vals2.begin(), vals2.end()); { raii::reset_counts(); - map_type x1(2 * vals1.size(), hasher(1), key_equal(2), allocator_type(3)); + X x1(2 * vals1.size(), hasher(1), key_equal(2), allocator_type(3)); - map2_type x2(2 * vals1.size(), allocator_type(3)); + Y x2(2 * vals1.size(), allocator_type(3)); std::thread t1, t2, t3; boost::compat::latch l(2); @@ -190,12 +212,13 @@ namespace { if (num_merges > 0) { // num merges is 0 most commonly in the cast of the limited_range // generator as both maps will contains keys from 0 to 99 - BOOST_TEST_EQ(+raii::move_constructor, 2 * num_merges); + BOOST_TEST_EQ( + +raii::move_constructor, value_type_cardinality * num_merges); BOOST_TEST_GE(call_count, 1u); } x1.merge(x2); - test_fuzzy_matches_reference(x1, ref_map, rg); + test_fuzzy_matches_reference(x1, reference_cont, rg); } check_raii_counts(); @@ -206,13 +229,15 @@ namespace { // clang-format off UNORDERED_TEST( merge_tests, + ((test_maps)(test_sets)) ((lvalue_merge)(rvalue_merge)) - ((value_type_generator)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( insert_and_merge_tests, - ((value_type_generator)) + ((test_maps)(test_sets)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/reentrancy_check_test.cpp b/test/cfoa/reentrancy_check_test.cpp index 69ef5efc..0bbb41b0 100644 --- a/test/cfoa/reentrancy_check_test.cpp +++ b/test/cfoa/reentrancy_check_test.cpp @@ -12,23 +12,36 @@ namespace boost { // Caveat lector: a proper handler shouldn't throw as it may be executed // within a noexcept function. -void assertion_failed_msg( - char const*, char const*, char const*, char const*, long) -{ - reentrancy_detected = true; - throw 0; -} + void assertion_failed_msg( + char const*, char const*, char const*, char const*, long) + { + reentrancy_detected = true; + throw 0; + } -void assertion_failed(char const*, char const*, char const*, long) // LCOV_EXCL_START -{ - std::abort(); -} // LCOV_EXCL_STOP + // LCOV_EXCL_START + void assertion_failed(char const*, char const*, char const*, long) + { + std::abort(); + } + // LCOV_EXCL_STOP -} +} // namespace boost + +#include "helpers.hpp" #include +#include #include +using test::default_generator; + +using map_type = boost::unordered::concurrent_flat_map; +using set_type = boost::unordered::concurrent_flat_set; + +map_type* test_map; +set_type* test_set; + template void detect_reentrancy(F f) { @@ -40,40 +53,61 @@ void detect_reentrancy(F f) BOOST_TEST(reentrancy_detected); } -int main() -{ - using map = boost::concurrent_flat_map; - using value_type = typename map::value_type; +namespace { + template + void reentrancy_tests(X*, GF gen_factory, test::random_generator rg) + { + using key_type = typename X::key_type; - map m1, m2; - m1.emplace(0, 0); - m2.emplace(1, 0); + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; - detect_reentrancy([&] { - m1.visit_all([&](value_type&) { (void)m1.contains(0); }); - }); // LCOV_EXCL_LINE + auto gen = gen_factory.template get(); + auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - detect_reentrancy([&] { - m1.visit_all([&](value_type&) { m1.rehash(0); }); - }); // LCOV_EXCL_LINE + X x1, x2; + x1.insert(values.begin(), values.end()); + x2.insert(values.begin(), values.end()); - detect_reentrancy([&] { - m1.visit_all([&](value_type&) { - m2.visit_all([&](value_type&) { - m1=m2; - }); // LCOV_EXCL_START + detect_reentrancy([&] { + x1.visit_all([&](arg_type&) { (void)x1.contains(key_type()); }); + }); // LCOV_EXCL_LINE + + detect_reentrancy([&] { + x1.visit_all([&](arg_type&) { x1.rehash(0); }); + }); // LCOV_EXCL_LINE + + detect_reentrancy([&] { + x1.visit_all([&](arg_type&) { + x2.visit_all([&](arg_type&) { + x1=x2; + }); // LCOV_EXCL_START + }); }); - }); - // LCOV_EXCL_STOP + // LCOV_EXCL_STOP - detect_reentrancy([&] { - m1.visit_all([&](value_type&) { - m2.visit_all([&](value_type&) { - m2=m1; - }); // LCOV_EXCL_START + detect_reentrancy([&] { + x1.visit_all([&](arg_type&) { + x2.visit_all([&](arg_type&) { + x2=x1; + }); // LCOV_EXCL_START + }); }); - }); - // LCOV_EXCL_STOP + // LCOV_EXCL_STOP + } - return boost::report_errors(); -} +} // namespace + +// clang-format off +UNORDERED_TEST( + reentrancy_tests, + ((test_map)(test_set)) + ((value_type_generator_factory)) + ((default_generator))) +// clang-format on + +RUN_TESTS() diff --git a/test/cfoa/rehash_tests.cpp b/test/cfoa/rehash_tests.cpp index 51810f54..fd0b31df 100644 --- a/test/cfoa/rehash_tests.cpp +++ b/test/cfoa/rehash_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include using test::default_generator; using test::limited_range; @@ -12,18 +14,25 @@ using test::sequential; using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator >; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; -using map_value_type = typename map_type::value_type; +using set_type = boost::unordered::concurrent_flat_set >; + +map_type* test_map; +set_type* test_set; namespace { test::seed_t initialize_seed{748775921}; - UNORDERED_AUTO_TEST (rehash_no_insert) { - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + template + void rehash_no_insert(X*) + { + using allocator_type = typename X::allocator_type; + + X x(0, hasher(1), key_equal(2), allocator_type(3)); BOOST_TEST_EQ(x.bucket_count(), 0u); x.rehash(1024); @@ -37,10 +46,13 @@ namespace { BOOST_TEST_EQ(x.bucket_count(), 0u); } - UNORDERED_AUTO_TEST (reserve_no_insert) { - using size_type = map_type::size_type; + template + void reserve_no_insert(X*) + { + using allocator_type = typename X::allocator_type; + using size_type = typename X::size_type; - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); auto f = [&x](double c) { return static_cast(std::ceil(c / x.max_load_factor())); @@ -59,9 +71,13 @@ namespace { BOOST_TEST_EQ(x.bucket_count(), f(0.0)); } - template - void insert_and_erase_with_rehash(G gen, test::random_generator rg) + template + void insert_and_erase_with_rehash( + X*, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto erase_indices = std::vector(vals1.size()); @@ -70,13 +86,13 @@ namespace { } shuffle_values(erase_indices); - auto ref_map = boost::unordered_flat_map(); - ref_map.insert(vals1.begin(), vals1.end()); + auto reference_cont = reference_container(); + reference_cont.insert(vals1.begin(), vals1.end()); { raii::reset_counts(); - map_type x(0, hasher(1), key_equal(2), allocator_type(3)); + X x(0, hasher(1), key_equal(2), allocator_type(3)); std::thread t1, t2, t3; boost::compat::latch l(2); @@ -121,7 +137,7 @@ namespace { for (std::size_t idx = 0; idx < erase_indices.size(); ++idx) { auto const& val = vals1[erase_indices[idx]]; - x.erase(val.first); + x.erase(get_key(val)); if (idx % 100 == 0) { std::this_thread::yield(); } @@ -161,7 +177,7 @@ namespace { BOOST_TEST_GE(call_count, 1u); - test_fuzzy_matches_reference(x, ref_map, rg); + test_fuzzy_matches_reference(x, reference_cont, rg); } check_raii_counts(); @@ -169,9 +185,18 @@ namespace { } // namespace // clang-format off +UNORDERED_TEST( + rehash_no_insert, + ((test_map)(test_set))) + +UNORDERED_TEST( + reserve_no_insert, + ((test_map)(test_set))) + UNORDERED_TEST( insert_and_erase_with_rehash, - ((value_type_generator)) + ((test_map)(test_set)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/swap_tests.cpp b/test/cfoa/swap_tests.cpp index 264dc460..42408e24 100644 --- a/test/cfoa/swap_tests.cpp +++ b/test/cfoa/swap_tests.cpp @@ -1,10 +1,12 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include test::seed_t initialize_seed{996130204}; @@ -56,17 +58,12 @@ template struct pocs_allocator using hasher = stateful_hash; using key_equal = stateful_key_equal; -using allocator_type = stateful_allocator >; using map_type = boost::unordered::concurrent_flat_map; + key_equal, stateful_allocator > >; -using map_value_type = typename map_type::value_type; - -using pocs_allocator_type = pocs_allocator >; - -using pocs_map_type = boost::unordered::concurrent_flat_map; +using set_type = boost::unordered::concurrent_flat_set >; template struct is_nothrow_member_swappable { @@ -75,13 +72,21 @@ template struct is_nothrow_member_swappable }; BOOST_STATIC_ASSERT(is_nothrow_member_swappable< - boost::unordered::concurrent_flat_map, - std::equal_to, std::allocator > > >::value); + replace_allocator >::value); -BOOST_STATIC_ASSERT(is_nothrow_member_swappable::value); +BOOST_STATIC_ASSERT(is_nothrow_member_swappable< + replace_allocator >::value); BOOST_STATIC_ASSERT(!is_nothrow_member_swappable::value); +BOOST_STATIC_ASSERT(is_nothrow_member_swappable< + replace_allocator >::value); + +BOOST_STATIC_ASSERT(is_nothrow_member_swappable< + replace_allocator >::value); + +BOOST_STATIC_ASSERT(!is_nothrow_member_swappable::value); + namespace { struct { @@ -97,31 +102,31 @@ namespace { } } free_fn_swap; - template - void swap_tests(X*, F swapper, G gen, test::random_generator rg) + template + void swap_tests(X*, F swapper, GF gen_factory, test::random_generator rg) { - using allocator = typename X::allocator_type; + using value_type = typename X::value_type; + using allocator_type = typename X::allocator_type; bool const pocs = - boost::allocator_propagate_on_container_swap::type::value; + boost::allocator_propagate_on_container_swap< + allocator_type>::type::value; + auto gen = gen_factory.template get(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto vals2 = make_random_values(1024 * 4, [&] { return gen(rg); }); - auto ref_map1 = - boost::unordered_flat_map(vals1.begin(), vals1.end()); - - auto ref_map2 = - boost::unordered_flat_map(vals2.begin(), vals2.end()); + auto reference_cont1 = reference_container(vals1.begin(), vals1.end()); + auto reference_cont2 = reference_container(vals2.begin(), vals2.end()); { raii::reset_counts(); X x1(vals1.begin(), vals1.end(), vals1.size(), hasher(1), key_equal(2), - allocator(3)); + allocator_type(3)); X x2(vals2.begin(), vals2.end(), vals2.size(), hasher(2), key_equal(1), - pocs ? allocator(4) : allocator(3)); + pocs ? allocator_type(4) : allocator_type(3)); if (pocs) { BOOST_TEST(x1.get_allocator() != x2.get_allocator()); @@ -132,7 +137,7 @@ namespace { auto const old_cc = +raii::copy_constructor; auto const old_mc = +raii::move_constructor; - thread_runner(vals1, [&x1, &x2, swapper](boost::span s) { + thread_runner(vals1, [&x1, &x2, swapper](boost::span s) { (void)s; swapper(x1, x2); @@ -143,20 +148,20 @@ namespace { BOOST_TEST_EQ(raii::move_constructor, old_mc); if (pocs) { - if (x1.get_allocator() == allocator(3)) { - BOOST_TEST(x2.get_allocator() == allocator(4)); + if (x1.get_allocator() == allocator_type(3)) { + BOOST_TEST(x2.get_allocator() == allocator_type(4)); } else { - BOOST_TEST(x1.get_allocator() == allocator(4)); - BOOST_TEST(x2.get_allocator() == allocator(3)); + BOOST_TEST(x1.get_allocator() == allocator_type(4)); + BOOST_TEST(x2.get_allocator() == allocator_type(3)); } } else { - BOOST_TEST(x1.get_allocator() == allocator(3)); + BOOST_TEST(x1.get_allocator() == allocator_type(3)); BOOST_TEST(x1.get_allocator() == x2.get_allocator()); } - if (x1.size() == ref_map1.size()) { - test_matches_reference(x1, ref_map1); - test_matches_reference(x2, ref_map2); + if (x1.size() == reference_cont1.size()) { + test_matches_reference(x1, reference_cont1); + test_matches_reference(x2, reference_cont2); BOOST_TEST_EQ(x1.hash_function(), hasher(1)); BOOST_TEST_EQ(x1.key_eq(), key_equal(2)); @@ -164,8 +169,8 @@ namespace { BOOST_TEST_EQ(x2.hash_function(), hasher(2)); BOOST_TEST_EQ(x2.key_eq(), key_equal(1)); } else { - test_matches_reference(x2, ref_map1); - test_matches_reference(x1, ref_map2); + test_matches_reference(x2, reference_cont1); + test_matches_reference(x1, reference_cont2); BOOST_TEST_EQ(x1.hash_function(), hasher(2)); BOOST_TEST_EQ(x1.key_eq(), key_equal(1)); @@ -177,17 +182,21 @@ namespace { check_raii_counts(); } - template - void insert_and_swap(F swapper, G gen, test::random_generator rg) + template + void insert_and_swap( + X*, F swapper, GF gen_factory, test::random_generator rg) { + using allocator_type = typename X::allocator_type; + + auto gen = gen_factory.template get(); auto vals1 = make_random_values(1024 * 8, [&] { return gen(rg); }); auto vals2 = make_random_values(1024 * 4, [&] { return gen(rg); }); { raii::reset_counts(); - map_type x1(vals1.size(), hasher(1), key_equal(2), allocator_type(3)); - map_type x2(vals2.size(), hasher(2), key_equal(1), allocator_type(3)); + X x1(vals1.size(), hasher(1), key_equal(2), allocator_type(3)); + X x2(vals2.size(), hasher(2), key_equal(1), allocator_type(3)); std::thread t1, t2, t3; boost::compat::latch l(2); @@ -282,21 +291,25 @@ namespace { } map_type* map; - pocs_map_type* pocs_map; + replace_allocator* pocs_map; + + set_type* set; + replace_allocator* pocs_set; } // namespace // clang-format off UNORDERED_TEST( swap_tests, - ((map)(pocs_map)) + ((map)(pocs_map)(set)(pocs_set)) ((member_fn_swap)(free_fn_swap)) - ((value_type_generator)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST(insert_and_swap, + ((map)(set)) ((member_fn_swap)(free_fn_swap)) - ((value_type_generator)) + ((value_type_generator_factory)) ((default_generator)(sequential)(limited_range))) // clang-format on diff --git a/test/cfoa/visit_tests.cpp b/test/cfoa/visit_tests.cpp index 267457df..6660fe7d 100644 --- a/test/cfoa/visit_tests.cpp +++ b/test/cfoa/visit_tests.cpp @@ -1,38 +1,64 @@ // Copyright (C) 2023 Christian Mazakas +// Copyright (C) 2023 Joaquin M Lopez Munoz // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include "helpers.hpp" #include +#include #include +#include #include #include namespace { test::seed_t initialize_seed(335740237); + auto non_present_keys = [] + { + std::array a; + for(std::size_t i = 0; i < a.size(); ++i) { + a[i].x_ = -((int)i + 1); + } + return a; + }(); + + template + raii const & get_non_present_key(T const & x) + { + return non_present_keys[get_key(x).x_ % non_present_keys.size()]; + } + struct lvalue_visitor_type { template - void operator()(std::vector& values, X& x, M const& reference_map) + void operator()(std::vector& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_visits{0}; std::atomic total_count{0}; - auto mut_visitor = [&num_visits, &reference_map](value_type& v) { - BOOST_TEST(reference_map.contains(v.first)); - BOOST_TEST_EQ(v.second, reference_map.find(v.first)->second); + auto mut_visitor = [&num_visits, &reference_cont](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; - auto const_visitor = [&num_visits, &reference_map](value_type const& v) { - BOOST_TEST(reference_map.contains(v.first)); - BOOST_TEST_EQ(v.second, reference_map.find(v.first)->second); + auto const_visitor = + [&num_visits, &reference_cont](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; @@ -40,14 +66,14 @@ namespace { thread_runner( values, [&x, &mut_visitor, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto count = x.visit(val.first, mut_visitor); + auto count = x.visit(get_key(val), mut_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; - count = x.visit(val.second, mut_visitor); + count = x.visit(get_non_present_key(val), mut_visitor); BOOST_TEST_EQ(count, 0u); } }); @@ -63,16 +89,16 @@ namespace { thread_runner( values, [&x, &const_visitor, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto const& y = x; - auto count = y.visit(val.first, const_visitor); + auto count = y.visit(get_key(val), const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; - count = y.visit(val.second, const_visitor); + count = y.visit(get_non_present_key(val), const_visitor); BOOST_TEST_EQ(count, 0u); } }); @@ -88,15 +114,15 @@ namespace { thread_runner( values, [&x, &const_visitor, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto count = x.cvisit(val.first, const_visitor); + auto count = x.cvisit(get_key(val), const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; - count = x.cvisit(val.second, const_visitor); + count = x.cvisit(get_non_present_key(val), const_visitor); BOOST_TEST_EQ(count, 0u); } }); @@ -111,14 +137,14 @@ namespace { { thread_runner(values, [&x, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto count = x.count(val.first); + auto count = x.count(get_key(val)); BOOST_TEST_EQ(count, 1u); total_count += count; - count = x.count(val.second); + count = x.count(get_non_present_key(val)); BOOST_TEST_EQ(count, 0u); } }); @@ -132,13 +158,13 @@ namespace { { thread_runner(values, [&x](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto contains = x.contains(val.first); + auto contains = x.contains(get_key(val)); BOOST_TEST(contains); - contains = x.contains(val.second); + contains = x.contains(get_non_present_key(val)); BOOST_TEST(!contains); } }); @@ -152,22 +178,29 @@ namespace { struct transp_visitor_type { template - void operator()(std::vector& values, X& x, M const& reference_map) + void operator()(std::vector& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic num_visits{0}; std::atomic total_count{0}; - auto mut_visitor = [&num_visits, &reference_map](value_type& v) { - BOOST_TEST(reference_map.contains(v.first)); - BOOST_TEST_EQ(v.second, reference_map.find(v.first)->second); + auto mut_visitor = [&num_visits, &reference_cont](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; - auto const_visitor = [&num_visits, &reference_map](value_type const& v) { - BOOST_TEST(reference_map.contains(v.first)); - BOOST_TEST_EQ(v.second, reference_map.find(v.first)->second); + auto const_visitor = [&num_visits, &reference_cont](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; @@ -175,15 +208,15 @@ namespace { thread_runner( values, [&x, &mut_visitor, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto count = x.visit(val.first.x_, mut_visitor); + auto count = x.visit(get_key(val).x_, mut_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; - count = x.visit(val.second.x_, mut_visitor); + count = x.visit(get_non_present_key(val).x_, mut_visitor); BOOST_TEST_EQ(count, 0u); } }); @@ -199,16 +232,16 @@ namespace { thread_runner( values, [&x, &const_visitor, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); auto const& y = x; - auto count = y.visit(val.first.x_, const_visitor); + auto count = y.visit(get_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; - count = y.visit(val.second.x_, const_visitor); + count = y.visit(get_non_present_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 0u); } }); @@ -224,15 +257,15 @@ namespace { thread_runner( values, [&x, &const_visitor, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto count = x.cvisit(val.first.x_, const_visitor); + auto count = x.cvisit(get_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 1u); total_count += count; - count = x.cvisit(val.second.x_, const_visitor); + count = x.cvisit(get_non_present_key(val).x_, const_visitor); BOOST_TEST_EQ(count, 0u); } }); @@ -247,14 +280,14 @@ namespace { { thread_runner(values, [&x, &total_count](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto count = x.count(val.first.x_); + auto count = x.count(get_key(val).x_); BOOST_TEST_EQ(count, 1u); total_count += count; - count = x.count(val.second.x_); + count = x.count(get_non_present_key(val).x_); BOOST_TEST_EQ(count, 0u); } }); @@ -268,13 +301,13 @@ namespace { { thread_runner(values, [&x](boost::span s) { for (auto const& val : s) { - auto r = val.first.x_; + auto r = get_key(val).x_; BOOST_TEST(r >= 0); - auto contains = x.contains(val.first.x_); + auto contains = x.contains(get_key(val).x_); BOOST_TEST(contains); - contains = x.contains(val.second.x_); + contains = x.contains(get_non_present_key(val).x_); BOOST_TEST(!contains); } }); @@ -288,24 +321,31 @@ namespace { struct visit_all_type { template - void operator()(std::vector& values, X& x, M const& reference_map) + void operator()(std::vector& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + std::atomic total_count{0}; - auto mut_visitor = [&reference_map](std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + auto mut_visitor = [&reference_cont](std::atomic& num_visits) { + return [&reference_cont, &num_visits](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; - auto const_visitor = [&reference_map](std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + auto const_visitor = [&reference_cont](std::atomic& num_visits) { + return [&reference_cont, &num_visits](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; @@ -352,45 +392,52 @@ namespace { struct visit_while_type { template - void operator()(std::vector& values, X& x, M const& reference_map) + void operator()(std::vector& values, X& x, M const& reference_cont) { using value_type = typename X::value_type; - auto mut_truthy_visitor = [&reference_map]( + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + + auto mut_truthy_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + return [&reference_cont, &num_visits](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; - auto const_truthy_visitor = [&reference_map]( + auto const_truthy_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + return [&reference_cont, &num_visits](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; - auto mut_falsey_visitor = [&reference_map]( + auto mut_falsey_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type& kv) { - BOOST_TEST(reference_map.contains(kv.first)); + return [&reference_cont, &num_visits](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); ++num_visits; - return (kv.second.x_ % 100) == 0; + return (get_value(v).x_ % 100) == 0; }; }; - auto const_falsey_visitor = [&reference_map]( + auto const_falsey_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); + return [&reference_cont, &num_visits](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); ++num_visits; - return (kv.second.x_ % 100) == 0; + return (get_value(v).x_ % 100) == 0; }; }; @@ -452,23 +499,30 @@ namespace { struct exec_policy_visit_all_type { template - void operator()(std::vector& values, X& x, M const& reference_map) + void operator()(std::vector& values, X& x, M const& reference_cont) { #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) using value_type = typename X::value_type; - auto mut_visitor = [&reference_map](std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + + auto mut_visitor = [&reference_cont](std::atomic& num_visits) { + return [&reference_cont, &num_visits](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; - auto const_visitor = [&reference_map](std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + auto const_visitor = [&reference_cont](std::atomic& num_visits) { + return [&reference_cont, &num_visits](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; }; }; @@ -502,7 +556,7 @@ namespace { #else (void)values; (void)x; - (void)reference_map; + (void)reference_cont; #endif } } exec_policy_visit_all; @@ -510,48 +564,55 @@ namespace { struct exec_policy_visit_while_type { template - void operator()(std::vector& values, X& x, M const& reference_map) + void operator()(std::vector& values, X& x, M const& reference_cont) { #if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS) using value_type = typename X::value_type; - auto mut_truthy_visitor = [&reference_map]( + // concurrent_flat_set visit is always const access + using arg_type = typename std::conditional< + std::is_same::value, + typename X::value_type const, + typename X::value_type + >::type; + + auto mut_truthy_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + return [&reference_cont, &num_visits](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; - auto const_truthy_visitor = [&reference_map]( + auto const_truthy_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + return [&reference_cont, &num_visits](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; return true; }; }; - auto mut_falsey_visitor = [&reference_map]( + auto mut_falsey_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + return [&reference_cont, &num_visits](arg_type& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; - return (kv.second.x_ % 100) == 0; + return (get_value(v).x_ % 100) == 0; }; }; - auto const_falsey_visitor = [&reference_map]( + auto const_falsey_visitor = [&reference_cont]( std::atomic& num_visits) { - return [&reference_map, &num_visits](value_type const& kv) { - BOOST_TEST(reference_map.contains(kv.first)); - BOOST_TEST_EQ(kv.second, reference_map.find(kv.first)->second); + return [&reference_cont, &num_visits](value_type const& v) { + BOOST_TEST(reference_cont.contains(get_key(v))); + BOOST_TEST_EQ(v, *reference_cont.find(get_key(v))); ++num_visits; - return (kv.second.x_ % 100) == 0; + return (get_value(v).x_ % 100) == 0; }; }; @@ -616,24 +677,17 @@ namespace { #else (void)values; (void)x; - (void)reference_map; + (void)reference_cont; #endif } } exec_policy_visit_while; - template - void visit(X*, G gen, F visitor, test::random_generator rg) + template + void visit(X*, GF gen_factory, F visitor, test::random_generator rg) { + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); - for (auto& val : values) { - if (val.second.x_ == 0) { - val.second.x_ = 1; - } - val.second.x_ *= -1; - } - - auto reference_map = - boost::unordered_flat_map(values.begin(), values.end()); + auto reference_cont = reference_container(values.begin(), values.end()); raii::reset_counts(); @@ -642,7 +696,7 @@ namespace { for (auto const& v : values) { x.insert(v); } - BOOST_TEST_EQ(x.size(), reference_map.size()); + BOOST_TEST_EQ(x.size(), reference_cont.size()); std::uint64_t old_default_constructor = raii::default_constructor; std::uint64_t old_copy_constructor = raii::copy_constructor; @@ -650,7 +704,7 @@ namespace { std::uint64_t old_copy_assignment = raii::copy_assignment; std::uint64_t old_move_assignment = raii::move_assignment; - visitor(values, x, reference_map); + visitor(values, x, reference_cont); BOOST_TEST_EQ(old_default_constructor, raii::default_constructor); BOOST_TEST_EQ(old_copy_constructor, raii::copy_constructor); @@ -669,9 +723,10 @@ namespace { raii::destructor); } - template - void empty_visit(X*, G gen, test::random_generator rg) + template + void empty_visit(X*, GF gen_factory, test::random_generator rg) { + auto gen = gen_factory.template get(); auto values = make_random_values(1024 * 16, [&] { return gen(rg); }); using values_type = decltype(values); using span_value_type = typename values_type::value_type; @@ -696,7 +751,7 @@ namespace { BOOST_TEST_EQ(num_visits, 0u); for (auto const& val : s) { - auto count = x.visit(val.first, + auto count = x.visit(get_key(val), [&num_visits](typename X::value_type const&) { ++num_visits; }); BOOST_TEST_EQ(count, 0u); } @@ -716,8 +771,8 @@ namespace { BOOST_TEST_EQ(raii::destructor, 0u); } - template - void insert_and_visit(X*, G gen, test::random_generator rg) + template + void insert_and_visit(X*, GF gen_factory, test::random_generator rg) { // here we attempt to ensure happens-before and synchronizes-with // the visitation thread essentially chases the insertion one @@ -726,6 +781,7 @@ namespace { BOOST_TEST(rg == test::sequential); + auto gen = gen_factory.template get(); auto const values = make_random_values(1024 * 16, [&] { return gen(rg); }); { @@ -752,9 +808,9 @@ namespace { for (std::size_t idx = 0; idx < values.size(); ++idx) { std::atomic_bool b{false}; while (!b) { - x.cvisit(values[idx].first, + x.cvisit(get_key(values[idx]), [&b, &strs, idx, &values](typename X::value_type const& v) { - BOOST_TEST_EQ(v.second, values[idx].second); + BOOST_TEST_EQ(get_value(v), get_value(values[idx])); BOOST_TEST_EQ(strs[idx], "rawr"); b = true; }); @@ -771,6 +827,9 @@ namespace { boost::unordered::concurrent_flat_map* map; boost::unordered::concurrent_flat_map* transp_map; + boost::unordered::concurrent_flat_set* set; + boost::unordered::concurrent_flat_set* transp_set; } // namespace @@ -782,29 +841,30 @@ using test::sequential; UNORDERED_TEST( visit, - ((map)) - ((value_type_generator)(init_type_generator)) - ((lvalue_visitor)(visit_all)(visit_while)(exec_policy_visit_all)(exec_policy_visit_while)) + ((map)(set)) + ((value_type_generator_factory)(init_type_generator_factory)) + ((lvalue_visitor)(visit_all)(visit_while)(exec_policy_visit_all) + (exec_policy_visit_while)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( visit, - ((transp_map)) - ((value_type_generator)(init_type_generator)) + ((transp_map)(transp_set)) + ((value_type_generator_factory)(init_type_generator_factory)) ((transp_visitor)) ((default_generator)(sequential)(limited_range))) UNORDERED_TEST( empty_visit, - ((map)(transp_map)) - ((value_type_generator)(init_type_generator)) + ((map)(transp_map)(set)(transp_set)) + ((value_type_generator_factory)(init_type_generator_factory)) ((default_generator)(sequential)(limited_range)) ) UNORDERED_TEST( insert_and_visit, - ((map)) - ((value_type_generator)) + ((map)(set)) + ((value_type_generator_factory)) ((sequential)) ) From 65d4a9cafa2c76583389f9f5e17fdfce976865a8 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sun, 10 Sep 2023 18:55:54 +0200 Subject: [PATCH 04/14] fixed try_emplace tests after (value|init)_type_generator removal in helpers.hpp --- test/cfoa/try_emplace_tests.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/cfoa/try_emplace_tests.cpp b/test/cfoa/try_emplace_tests.cpp index 374c5f02..fa8ca5e6 100644 --- a/test/cfoa/try_emplace_tests.cpp +++ b/test/cfoa/try_emplace_tests.cpp @@ -373,6 +373,9 @@ using test::default_generator; using test::limited_range; using test::sequential; +value_generator > value_type_generator; +value_generator > init_type_generator; + // clang-format off UNORDERED_TEST( try_emplace, From 2ce456768d74b0380ba2c10a548057455bf89afa Mon Sep 17 00:00:00 2001 From: joaquintides Date: Mon, 11 Sep 2023 09:52:35 +0200 Subject: [PATCH 05/14] completed concurrent_flat_set testing --- test/cfoa/erase_tests.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/cfoa/erase_tests.cpp b/test/cfoa/erase_tests.cpp index bb870701..71f814cd 100644 --- a/test/cfoa/erase_tests.cpp +++ b/test/cfoa/erase_tests.cpp @@ -75,8 +75,8 @@ namespace { raii::destructor + value_type_cardinality * x.size()); thread_runner(values, [&num_erased, &x](boost::span s) { - for (auto const& k : s) { - auto count = x.erase(k.first.x_); + for (auto const& v : s) { + auto count = x.erase(get_key(v).x_); num_erased += count; BOOST_TEST_LE(count, 1u); BOOST_TEST_GE(count, 0u); @@ -87,7 +87,8 @@ namespace { BOOST_TEST_EQ(raii::copy_constructor, old_cc); BOOST_TEST_EQ(raii::move_constructor, old_mc); - BOOST_TEST_EQ(raii::destructor, old_d + 2 * num_erased); + BOOST_TEST_EQ( + raii::destructor, old_d + value_type_cardinality * num_erased); BOOST_TEST_EQ(x.size(), 0u); BOOST_TEST(x.empty()); @@ -441,7 +442,7 @@ namespace { boost::unordered::concurrent_flat_set* set; boost::unordered::concurrent_flat_map* transparent_map; - boost::unordered::concurrent_flat_map* transparent_set; } // namespace @@ -460,7 +461,7 @@ UNORDERED_TEST( UNORDERED_TEST( erase, - ((transparent_map)) + ((transparent_map)(transparent_set)) ((value_type_generator_factory)(init_type_generator_factory)) ((transp_lvalue_eraser)(transp_lvalue_eraser_if)(erase_if_exec_policy)) ((default_generator)(sequential)(limited_range))) From d9d4ff1676eceec8a88e9fa8b6051ff5eb8efe21 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Mon, 11 Sep 2023 09:52:57 +0200 Subject: [PATCH 06/14] removed unused typedef --- test/cfoa/exception_constructor_tests.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/test/cfoa/exception_constructor_tests.cpp b/test/cfoa/exception_constructor_tests.cpp index 0899138e..58ea4fe2 100644 --- a/test/cfoa/exception_constructor_tests.cpp +++ b/test/cfoa/exception_constructor_tests.cpp @@ -254,7 +254,6 @@ namespace { template void initializer_list_bucket_count(std::pair p) { - using value_type = typename X::value_type; using allocator_type = typename X::allocator_type; auto init_list = p.second; From 651f209e207a63bd9a54e6f37ab44afb815fc146 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Mon, 11 Sep 2023 09:53:20 +0200 Subject: [PATCH 07/14] shut down VS warning --- test/cfoa/insert_tests.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/cfoa/insert_tests.cpp b/test/cfoa/insert_tests.cpp index e1b53e68..229f0324 100644 --- a/test/cfoa/insert_tests.cpp +++ b/test/cfoa/insert_tests.cpp @@ -5,11 +5,16 @@ #include "helpers.hpp" +#include #include #include #include +#if defined(BOOST_MSVC) +#pragma warning(disable : 4127) // conditional expression is constant +#endif + struct raii_convertible { int x = 0, y = 0 ; From 31c3ce97dedd33896f6626e08dcda00986349c58 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Mon, 11 Sep 2023 09:54:16 +0200 Subject: [PATCH 08/14] avoided sign-conversion warning --- test/cfoa/visit_tests.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/cfoa/visit_tests.cpp b/test/cfoa/visit_tests.cpp index 6660fe7d..d604bffa 100644 --- a/test/cfoa/visit_tests.cpp +++ b/test/cfoa/visit_tests.cpp @@ -29,7 +29,8 @@ namespace { template raii const & get_non_present_key(T const & x) { - return non_present_keys[get_key(x).x_ % non_present_keys.size()]; + return non_present_keys[ + (std::size_t)get_key(x).x_ % non_present_keys.size()]; } struct lvalue_visitor_type From 88f8228079adf3c14c71823fc8ea754ae3b54100 Mon Sep 17 00:00:00 2001 From: joaquintides Date: Mon, 11 Sep 2023 09:54:48 +0200 Subject: [PATCH 09/14] tested concurrent_flat_set --- test/cfoa/serialization_tests.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/cfoa/serialization_tests.cpp b/test/cfoa/serialization_tests.cpp index ad762451..135567c5 100644 --- a/test/cfoa/serialization_tests.cpp +++ b/test/cfoa/serialization_tests.cpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace { @@ -69,9 +70,11 @@ namespace { boost::concurrent_flat_map< test::object, test::object, test::hash, test::equal_to>* test_flat_map; + boost::concurrent_flat_set< + test::object, test::hash, test::equal_to>* test_flat_set; UNORDERED_TEST(serialization_tests, - ((test_flat_map)) + ((test_flat_map)(test_flat_set)) ((text_archive)(xml_archive)) ((default_generator))) } From d5d6a18298eb8090057a3df368c7685105390e1b Mon Sep 17 00:00:00 2001 From: joaquintides Date: Mon, 11 Sep 2023 11:07:56 +0200 Subject: [PATCH 10/14] removed unused utilities --- test/cfoa/emplace_tests.cpp | 38 ------------------------------------- 1 file changed, 38 deletions(-) diff --git a/test/cfoa/emplace_tests.cpp b/test/cfoa/emplace_tests.cpp index 0ecc66d4..ec2e1bc0 100644 --- a/test/cfoa/emplace_tests.cpp +++ b/test/cfoa/emplace_tests.cpp @@ -19,68 +19,30 @@ namespace { return x.emplace(v.x_); } - template - bool member_emplace(Container& x, Value& v) - { - return x.emplace(v.x_); - } - template bool member_emplace(Container& x, std::pair const & v) { return x.emplace(v.first.x_, v.second.x_); } - template - bool member_emplace(Container& x, std::pair& v) - { - return x.emplace(v.first.x_, v.second.x_); - } - - template - bool member_emplace_or_visit(Container& x, Value const & v, F f) - { - return x.emplace_or_visit(v.x_, f); - } - template bool member_emplace_or_visit(Container& x, Value& v, F f) { return x.emplace_or_visit(v.x_, f); } - template - bool member_emplace_or_visit( - Container& x, std::pair const & v, F f) - { - return x.emplace_or_visit(v.first.x_, v.second.x_, f); - } - template bool member_emplace_or_visit(Container& x, std::pair& v, F f) { return x.emplace_or_visit(v.first.x_, v.second.x_, f); } - template - bool member_emplace_or_cvisit(Container& x, Value const & v, F f) - { - return x.emplace_or_cvisit(v.x_, f); - } - template bool member_emplace_or_cvisit(Container& x, Value& v, F f) { return x.emplace_or_cvisit(v.x_, f); } - template - bool member_emplace_or_cvisit( - Container& x, std::pair const & v, F f) - { - return x.emplace_or_cvisit(v.first.x_, v.second.x_, f); - } - template bool member_emplace_or_cvisit(Container& x, std::pair& v, F f) { From 9e38e3c578ec6ac7c95dd54f55e4ff786523a83b Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sat, 16 Sep 2023 09:47:10 +0200 Subject: [PATCH 11/14] typos --- doc/unordered/concurrent.adoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/unordered/concurrent.adoc b/doc/unordered/concurrent.adoc index a0b3d998..1bb441b4 100644 --- a/doc/unordered/concurrent.adoc +++ b/doc/unordered/concurrent.adoc @@ -44,7 +44,7 @@ logical cores in the CPU). == Visitation-based API The first thing a new user of `boost::concurrent_flat_set` or `boost::concurrent_flat_map` -will notice is that these classes _do not provide iterators_ (which makes then technically +will notice is that these classes _do not provide iterators_ (which makes them technically not https://en.cppreference.com/w/cpp/named_req/Container[Containers^] in the C++ standard sense). The reason for this is that iterators are inherently thread-unsafe. Consider this hypothetical code: @@ -112,7 +112,7 @@ if (found) { } ---- -Visitation is prominent in the API provided by `boost::concurrent_flat_ser` and `boost::concurrent_flat_map`, and +Visitation is prominent in the API provided by `boost::concurrent_flat_set` and `boost::concurrent_flat_map`, and many classical operations have visitation-enabled variations: [source,c++] From 44582ecbb9c8eb4fd5f2837402ee2913677605ce Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sat, 16 Sep 2023 10:18:36 +0200 Subject: [PATCH 12/14] removed unused typedefs --- test/cfoa/assign_tests.cpp | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/test/cfoa/assign_tests.cpp b/test/cfoa/assign_tests.cpp index a9b0ca08..6d2253f2 100644 --- a/test/cfoa/assign_tests.cpp +++ b/test/cfoa/assign_tests.cpp @@ -1102,16 +1102,6 @@ UNORDERED_TEST( ((init_type_generator_factory)) ((default_generator)(sequential)(limited_range))) -boost::unordered::unordered_flat_map > >* flat_map_plain; -boost::unordered::unordered_flat_map > >* flat_map_fancy; - -boost::unordered::concurrent_flat_map > >* map_plain; -boost::unordered::concurrent_flat_map > >* map_fancy; - UNORDERED_TEST( flat_move_assign, ((test_map)(test_set)(fancy_test_map)(fancy_test_set)) From cad3509a17fca87fb1dc212f654640631eef6f1e Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sat, 16 Sep 2023 11:18:39 +0200 Subject: [PATCH 13/14] added boost::concurrent_flat_set to mmap_tests --- test/Jamfile.v2 | 1 + test/unordered/mmap_tests.cpp | 35 +++++++++++++++++++++++++++++------ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/test/Jamfile.v2 b/test/Jamfile.v2 index 79883626..9313c7c1 100644 --- a/test/Jamfile.v2 +++ b/test/Jamfile.v2 @@ -260,6 +260,7 @@ local MMAP_CONTAINERS = unordered_multimap unordered_multiset concurrent_flat_map + concurrent_flat_set ; for local container in $(MMAP_CONTAINERS) diff --git a/test/unordered/mmap_tests.cpp b/test/unordered/mmap_tests.cpp index 1e900bc6..73838544 100644 --- a/test/unordered/mmap_tests.cpp +++ b/test/unordered/mmap_tests.cpp @@ -1,4 +1,5 @@ // Copyright 2023 Christian Mazakas. +// Copyright 2023 Joaquin M Lopez Munoz. // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) @@ -21,6 +22,7 @@ int main() {} #include #include +#include #include #include @@ -82,10 +84,25 @@ get_container_type() using concurrent_map = decltype( get_container_type()); +using concurrent_set = decltype( + get_container_type()); + +template +struct is_concurrent_container: std::false_type {}; + +template +struct is_concurrent_container >: + std::true_type {}; + +template +struct is_concurrent_container >: + std::true_type {}; + static char const* shm_map_name = "shared_map"; template -void parent(std::string const& shm_name_, char const* exe_name, C*) +typename std::enable_if::value, void>::type +parent(std::string const& shm_name_, char const* exe_name, C*) { struct shm_remove { @@ -151,7 +168,9 @@ void parent(std::string const& shm_name_, char const* exe_name, C*) segment.destroy(shm_map_name); } -template void child(std::string const& shm_name, C*) +template +typename std::enable_if::value, void>::type +child(std::string const& shm_name, C*) { using container_type = C; using iterator = typename container_type::iterator; @@ -184,7 +203,9 @@ template void child(std::string const& shm_name, C*) } } -void parent(std::string const& shm_name_, char const* exe_name, concurrent_map*) +template +typename std::enable_if::value, void>::type +parent(std::string const& shm_name_, char const* exe_name, C*) { struct shm_remove { @@ -200,7 +221,7 @@ void parent(std::string const& shm_name_, char const* exe_name, concurrent_map*) } } remover{shm_name_.c_str()}; - using container_type = concurrent_map; + using container_type = C; std::size_t const shm_size = 64 * 1024; @@ -239,9 +260,11 @@ void parent(std::string const& shm_name_, char const* exe_name, concurrent_map*) segment.destroy(shm_map_name); } -void child(std::string const& shm_name, concurrent_map*) +template +typename std::enable_if::value, void>::type +child(std::string const& shm_name, C*) { - using container_type = concurrent_map; + using container_type = C; boost::interprocess::managed_shared_memory segment( boost::interprocess::open_only, shm_name.c_str()); From 82f4d9899ca88bb35fc47e143b539f75110d46ba Mon Sep 17 00:00:00 2001 From: joaquintides Date: Sat, 16 Sep 2023 12:17:07 +0200 Subject: [PATCH 14/14] stylistic/editorial --- .../unordered/detail/foa/concurrent_table.hpp | 25 ++-- include/boost/unordered/detail/foa/core.hpp | 126 ++++++++++-------- 2 files changed, 88 insertions(+), 63 deletions(-) diff --git a/include/boost/unordered/detail/foa/concurrent_table.hpp b/include/boost/unordered/detail/foa/concurrent_table.hpp index 3a90c2f4..a351815a 100644 --- a/include/boost/unordered/detail/foa/concurrent_table.hpp +++ b/include/boost/unordered/detail/foa/concurrent_table.hpp @@ -264,7 +264,8 @@ struct concurrent_table_arrays:table_arrays return boost::to_address(group_accesses_); } - static concurrent_table_arrays new_(group_access_allocator_type al,std::size_t n) + static concurrent_table_arrays new_( + group_access_allocator_type al,std::size_t n) { super x{super::new_(al,n)}; BOOST_TRY{ @@ -310,20 +311,23 @@ struct concurrent_table_arrays:table_arrays } } - static concurrent_table_arrays new_group_access(group_access_allocator_type al,const super& x) + static concurrent_table_arrays new_group_access( + group_access_allocator_type al,const super& x) { concurrent_table_arrays arrays{x,nullptr}; set_group_access(al,arrays); return arrays; } - static void delete_(group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept + static void delete_( + group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept { delete_group_access(al,arrays); super::delete_(al,arrays); } - static void delete_group_access(group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept + static void delete_group_access( + group_access_allocator_type al,concurrent_table_arrays& arrays)noexcept { if(arrays.elements()){ boost::allocator_deallocate( @@ -369,9 +373,7 @@ inline void swap(atomic_size_control& x,atomic_size_control& y) } /* foa::concurrent_table serves as the foundation for end-user concurrent - * hash containers. The TypePolicy parameter can specify flat/node-based - * map-like and set-like containers, though currently we're only providing - * boost::concurrent_flat_map. + * hash containers. * * The exposed interface (completed by the wrapping containers) is not that * of a regular container (in fact, it does not model Container as understood @@ -393,7 +395,7 @@ inline void swap(atomic_size_control& x,atomic_size_control& y) * - Parallel versions of [c]visit_all(f) and erase_if(f) are provided based * on C++17 stdlib parallel algorithms. * - * Consult boost::concurrent_flat_map docs for the full API reference. + * Consult boost::concurrent_flat_(map|set) docs for the full API reference. * Heterogeneous lookup is suported by default, that is, without checking for * any ::is_transparent typedefs --this checking is done by the wrapping * containers. @@ -421,8 +423,8 @@ inline void swap(atomic_size_control& x,atomic_size_control& y) * reduced hash value is set) and the insertion counter is atomically * incremented: if no other thread has incremented the counter during the * whole operation (which is checked by comparing with c0), then we're - * good to go and complete the insertion, otherwise we roll back and start - * over. + * good to go and complete the insertion, otherwise we roll back and + * start over. */ template @@ -946,7 +948,8 @@ private: using multimutex_type=multimutex; // TODO: adapt 128 to the machine using shared_lock_guard=reentrancy_checked>; using exclusive_lock_guard=reentrancy_checked>; - using exclusive_bilock_guard=reentrancy_bichecked>; + using exclusive_bilock_guard= + reentrancy_bichecked>; using group_shared_lock_guard=typename group_access::shared_lock_guard; using group_exclusive_lock_guard=typename group_access::exclusive_lock_guard; using group_insert_counter_type=typename group_access::insert_counter_type; diff --git a/include/boost/unordered/detail/foa/core.hpp b/include/boost/unordered/detail/foa/core.hpp index c1fb4850..6e0f6123 100644 --- a/include/boost/unordered/detail/foa/core.hpp +++ b/include/boost/unordered/detail/foa/core.hpp @@ -133,10 +133,10 @@ #define BOOST_UNORDERED_THREAD_SANITIZER #endif -#define BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) \ - static_assert(boost::is_nothrow_swappable::value, \ - "Template parameter Hash is required to be nothrow Swappable."); \ - static_assert(boost::is_nothrow_swappable::value, \ +#define BOOST_UNORDERED_STATIC_ASSERT_HASH_PRED(Hash, Pred) \ + static_assert(boost::is_nothrow_swappable::value, \ + "Template parameter Hash is required to be nothrow Swappable."); \ + static_assert(boost::is_nothrow_swappable::value, \ "Template parameter Pred is required to be nothrow Swappable"); namespace boost{ @@ -148,7 +148,7 @@ static constexpr std::size_t default_bucket_count=0; /* foa::table_core is the common base of foa::table and foa::concurrent_table, * which in their turn serve as the foundational core of - * boost::unordered_(flat|node)_(map|set) and boost::concurrent_flat_map, + * boost::unordered_(flat|node)_(map|set) and boost::concurrent_flat_(map|set), * respectively. Its main internal design aspects are: * * - Element slots are logically split into groups of size N=15. The number @@ -337,38 +337,49 @@ private: { static constexpr boost::uint32_t word[]= { - 0x08080808u,0x09090909u,0x02020202u,0x03030303u,0x04040404u,0x05050505u,0x06060606u,0x07070707u, - 0x08080808u,0x09090909u,0x0A0A0A0Au,0x0B0B0B0Bu,0x0C0C0C0Cu,0x0D0D0D0Du,0x0E0E0E0Eu,0x0F0F0F0Fu, - 0x10101010u,0x11111111u,0x12121212u,0x13131313u,0x14141414u,0x15151515u,0x16161616u,0x17171717u, - 0x18181818u,0x19191919u,0x1A1A1A1Au,0x1B1B1B1Bu,0x1C1C1C1Cu,0x1D1D1D1Du,0x1E1E1E1Eu,0x1F1F1F1Fu, - 0x20202020u,0x21212121u,0x22222222u,0x23232323u,0x24242424u,0x25252525u,0x26262626u,0x27272727u, - 0x28282828u,0x29292929u,0x2A2A2A2Au,0x2B2B2B2Bu,0x2C2C2C2Cu,0x2D2D2D2Du,0x2E2E2E2Eu,0x2F2F2F2Fu, - 0x30303030u,0x31313131u,0x32323232u,0x33333333u,0x34343434u,0x35353535u,0x36363636u,0x37373737u, - 0x38383838u,0x39393939u,0x3A3A3A3Au,0x3B3B3B3Bu,0x3C3C3C3Cu,0x3D3D3D3Du,0x3E3E3E3Eu,0x3F3F3F3Fu, - 0x40404040u,0x41414141u,0x42424242u,0x43434343u,0x44444444u,0x45454545u,0x46464646u,0x47474747u, - 0x48484848u,0x49494949u,0x4A4A4A4Au,0x4B4B4B4Bu,0x4C4C4C4Cu,0x4D4D4D4Du,0x4E4E4E4Eu,0x4F4F4F4Fu, - 0x50505050u,0x51515151u,0x52525252u,0x53535353u,0x54545454u,0x55555555u,0x56565656u,0x57575757u, - 0x58585858u,0x59595959u,0x5A5A5A5Au,0x5B5B5B5Bu,0x5C5C5C5Cu,0x5D5D5D5Du,0x5E5E5E5Eu,0x5F5F5F5Fu, - 0x60606060u,0x61616161u,0x62626262u,0x63636363u,0x64646464u,0x65656565u,0x66666666u,0x67676767u, - 0x68686868u,0x69696969u,0x6A6A6A6Au,0x6B6B6B6Bu,0x6C6C6C6Cu,0x6D6D6D6Du,0x6E6E6E6Eu,0x6F6F6F6Fu, - 0x70707070u,0x71717171u,0x72727272u,0x73737373u,0x74747474u,0x75757575u,0x76767676u,0x77777777u, - 0x78787878u,0x79797979u,0x7A7A7A7Au,0x7B7B7B7Bu,0x7C7C7C7Cu,0x7D7D7D7Du,0x7E7E7E7Eu,0x7F7F7F7Fu, - 0x80808080u,0x81818181u,0x82828282u,0x83838383u,0x84848484u,0x85858585u,0x86868686u,0x87878787u, - 0x88888888u,0x89898989u,0x8A8A8A8Au,0x8B8B8B8Bu,0x8C8C8C8Cu,0x8D8D8D8Du,0x8E8E8E8Eu,0x8F8F8F8Fu, - 0x90909090u,0x91919191u,0x92929292u,0x93939393u,0x94949494u,0x95959595u,0x96969696u,0x97979797u, - 0x98989898u,0x99999999u,0x9A9A9A9Au,0x9B9B9B9Bu,0x9C9C9C9Cu,0x9D9D9D9Du,0x9E9E9E9Eu,0x9F9F9F9Fu, - 0xA0A0A0A0u,0xA1A1A1A1u,0xA2A2A2A2u,0xA3A3A3A3u,0xA4A4A4A4u,0xA5A5A5A5u,0xA6A6A6A6u,0xA7A7A7A7u, - 0xA8A8A8A8u,0xA9A9A9A9u,0xAAAAAAAAu,0xABABABABu,0xACACACACu,0xADADADADu,0xAEAEAEAEu,0xAFAFAFAFu, - 0xB0B0B0B0u,0xB1B1B1B1u,0xB2B2B2B2u,0xB3B3B3B3u,0xB4B4B4B4u,0xB5B5B5B5u,0xB6B6B6B6u,0xB7B7B7B7u, - 0xB8B8B8B8u,0xB9B9B9B9u,0xBABABABAu,0xBBBBBBBBu,0xBCBCBCBCu,0xBDBDBDBDu,0xBEBEBEBEu,0xBFBFBFBFu, - 0xC0C0C0C0u,0xC1C1C1C1u,0xC2C2C2C2u,0xC3C3C3C3u,0xC4C4C4C4u,0xC5C5C5C5u,0xC6C6C6C6u,0xC7C7C7C7u, - 0xC8C8C8C8u,0xC9C9C9C9u,0xCACACACAu,0xCBCBCBCBu,0xCCCCCCCCu,0xCDCDCDCDu,0xCECECECEu,0xCFCFCFCFu, - 0xD0D0D0D0u,0xD1D1D1D1u,0xD2D2D2D2u,0xD3D3D3D3u,0xD4D4D4D4u,0xD5D5D5D5u,0xD6D6D6D6u,0xD7D7D7D7u, - 0xD8D8D8D8u,0xD9D9D9D9u,0xDADADADAu,0xDBDBDBDBu,0xDCDCDCDCu,0xDDDDDDDDu,0xDEDEDEDEu,0xDFDFDFDFu, - 0xE0E0E0E0u,0xE1E1E1E1u,0xE2E2E2E2u,0xE3E3E3E3u,0xE4E4E4E4u,0xE5E5E5E5u,0xE6E6E6E6u,0xE7E7E7E7u, - 0xE8E8E8E8u,0xE9E9E9E9u,0xEAEAEAEAu,0xEBEBEBEBu,0xECECECECu,0xEDEDEDEDu,0xEEEEEEEEu,0xEFEFEFEFu, - 0xF0F0F0F0u,0xF1F1F1F1u,0xF2F2F2F2u,0xF3F3F3F3u,0xF4F4F4F4u,0xF5F5F5F5u,0xF6F6F6F6u,0xF7F7F7F7u, - 0xF8F8F8F8u,0xF9F9F9F9u,0xFAFAFAFAu,0xFBFBFBFBu,0xFCFCFCFCu,0xFDFDFDFDu,0xFEFEFEFEu,0xFFFFFFFFu, + 0x08080808u,0x09090909u,0x02020202u,0x03030303u,0x04040404u,0x05050505u, + 0x06060606u,0x07070707u,0x08080808u,0x09090909u,0x0A0A0A0Au,0x0B0B0B0Bu, + 0x0C0C0C0Cu,0x0D0D0D0Du,0x0E0E0E0Eu,0x0F0F0F0Fu,0x10101010u,0x11111111u, + 0x12121212u,0x13131313u,0x14141414u,0x15151515u,0x16161616u,0x17171717u, + 0x18181818u,0x19191919u,0x1A1A1A1Au,0x1B1B1B1Bu,0x1C1C1C1Cu,0x1D1D1D1Du, + 0x1E1E1E1Eu,0x1F1F1F1Fu,0x20202020u,0x21212121u,0x22222222u,0x23232323u, + 0x24242424u,0x25252525u,0x26262626u,0x27272727u,0x28282828u,0x29292929u, + 0x2A2A2A2Au,0x2B2B2B2Bu,0x2C2C2C2Cu,0x2D2D2D2Du,0x2E2E2E2Eu,0x2F2F2F2Fu, + 0x30303030u,0x31313131u,0x32323232u,0x33333333u,0x34343434u,0x35353535u, + 0x36363636u,0x37373737u,0x38383838u,0x39393939u,0x3A3A3A3Au,0x3B3B3B3Bu, + 0x3C3C3C3Cu,0x3D3D3D3Du,0x3E3E3E3Eu,0x3F3F3F3Fu,0x40404040u,0x41414141u, + 0x42424242u,0x43434343u,0x44444444u,0x45454545u,0x46464646u,0x47474747u, + 0x48484848u,0x49494949u,0x4A4A4A4Au,0x4B4B4B4Bu,0x4C4C4C4Cu,0x4D4D4D4Du, + 0x4E4E4E4Eu,0x4F4F4F4Fu,0x50505050u,0x51515151u,0x52525252u,0x53535353u, + 0x54545454u,0x55555555u,0x56565656u,0x57575757u,0x58585858u,0x59595959u, + 0x5A5A5A5Au,0x5B5B5B5Bu,0x5C5C5C5Cu,0x5D5D5D5Du,0x5E5E5E5Eu,0x5F5F5F5Fu, + 0x60606060u,0x61616161u,0x62626262u,0x63636363u,0x64646464u,0x65656565u, + 0x66666666u,0x67676767u,0x68686868u,0x69696969u,0x6A6A6A6Au,0x6B6B6B6Bu, + 0x6C6C6C6Cu,0x6D6D6D6Du,0x6E6E6E6Eu,0x6F6F6F6Fu,0x70707070u,0x71717171u, + 0x72727272u,0x73737373u,0x74747474u,0x75757575u,0x76767676u,0x77777777u, + 0x78787878u,0x79797979u,0x7A7A7A7Au,0x7B7B7B7Bu,0x7C7C7C7Cu,0x7D7D7D7Du, + 0x7E7E7E7Eu,0x7F7F7F7Fu,0x80808080u,0x81818181u,0x82828282u,0x83838383u, + 0x84848484u,0x85858585u,0x86868686u,0x87878787u,0x88888888u,0x89898989u, + 0x8A8A8A8Au,0x8B8B8B8Bu,0x8C8C8C8Cu,0x8D8D8D8Du,0x8E8E8E8Eu,0x8F8F8F8Fu, + 0x90909090u,0x91919191u,0x92929292u,0x93939393u,0x94949494u,0x95959595u, + 0x96969696u,0x97979797u,0x98989898u,0x99999999u,0x9A9A9A9Au,0x9B9B9B9Bu, + 0x9C9C9C9Cu,0x9D9D9D9Du,0x9E9E9E9Eu,0x9F9F9F9Fu,0xA0A0A0A0u,0xA1A1A1A1u, + 0xA2A2A2A2u,0xA3A3A3A3u,0xA4A4A4A4u,0xA5A5A5A5u,0xA6A6A6A6u,0xA7A7A7A7u, + 0xA8A8A8A8u,0xA9A9A9A9u,0xAAAAAAAAu,0xABABABABu,0xACACACACu,0xADADADADu, + 0xAEAEAEAEu,0xAFAFAFAFu,0xB0B0B0B0u,0xB1B1B1B1u,0xB2B2B2B2u,0xB3B3B3B3u, + 0xB4B4B4B4u,0xB5B5B5B5u,0xB6B6B6B6u,0xB7B7B7B7u,0xB8B8B8B8u,0xB9B9B9B9u, + 0xBABABABAu,0xBBBBBBBBu,0xBCBCBCBCu,0xBDBDBDBDu,0xBEBEBEBEu,0xBFBFBFBFu, + 0xC0C0C0C0u,0xC1C1C1C1u,0xC2C2C2C2u,0xC3C3C3C3u,0xC4C4C4C4u,0xC5C5C5C5u, + 0xC6C6C6C6u,0xC7C7C7C7u,0xC8C8C8C8u,0xC9C9C9C9u,0xCACACACAu,0xCBCBCBCBu, + 0xCCCCCCCCu,0xCDCDCDCDu,0xCECECECEu,0xCFCFCFCFu,0xD0D0D0D0u,0xD1D1D1D1u, + 0xD2D2D2D2u,0xD3D3D3D3u,0xD4D4D4D4u,0xD5D5D5D5u,0xD6D6D6D6u,0xD7D7D7D7u, + 0xD8D8D8D8u,0xD9D9D9D9u,0xDADADADAu,0xDBDBDBDBu,0xDCDCDCDCu,0xDDDDDDDDu, + 0xDEDEDEDEu,0xDFDFDFDFu,0xE0E0E0E0u,0xE1E1E1E1u,0xE2E2E2E2u,0xE3E3E3E3u, + 0xE4E4E4E4u,0xE5E5E5E5u,0xE6E6E6E6u,0xE7E7E7E7u,0xE8E8E8E8u,0xE9E9E9E9u, + 0xEAEAEAEAu,0xEBEBEBEBu,0xECECECECu,0xEDEDEDEDu,0xEEEEEEEEu,0xEFEFEFEFu, + 0xF0F0F0F0u,0xF1F1F1F1u,0xF2F2F2F2u,0xF3F3F3F3u,0xF4F4F4F4u,0xF5F5F5F5u, + 0xF6F6F6F6u,0xF7F7F7F7u,0xF8F8F8F8u,0xF9F9F9F9u,0xFAFAFAFAu,0xFBFBFBFBu, + 0xFCFCFCFCu,0xFDFDFDFDu,0xFEFEFEFEu,0xFFFFFFFFu, }; return (int)word[narrow_cast(hash)]; @@ -549,7 +560,8 @@ private: } /* Copied from - * https://github.com/simd-everywhere/simde/blob/master/simde/x86/sse2.h#L3763 + * https://github.com/simd-everywhere/simde/blob/master/simde/x86/ + * sse2.h#L3763 */ static inline int simde_mm_movemask_epi8(uint8x16_t a) @@ -628,7 +640,8 @@ struct group15 BOOST_ASSERT(pos= n. + * - size(size_index_) returns the number of groups for the given index. It + * is guaranteed that size(size_index(n)) >= n. * - min_size() is the minimum number of groups permissible, i.e. * size(size_index(0)). * - position(hash,size_index_) maps hash to a position in the range @@ -1003,7 +1016,9 @@ struct table_arrays rebind; using group_type_pointer_traits=boost::pointer_traits; - table_arrays(std::size_t gsi,std::size_t gsm,group_type_pointer pg,value_type_pointer pe): + table_arrays( + std::size_t gsi,std::size_t gsm, + group_type_pointer pg,value_type_pointer pe): groups_size_index{gsi},groups_size_mask{gsm},groups_{pg},elements_{pe}{} value_type* elements()const noexcept{return boost::to_address(elements_);} @@ -1016,7 +1031,8 @@ struct table_arrays } static void set_arrays( - table_arrays& arrays,allocator_type al,std::size_t,std::false_type /* always allocate */) + table_arrays& arrays,allocator_type al,std::size_t, + std::false_type /* always allocate */) { using storage_traits=boost::allocator_traits; auto groups_size_index=arrays.groups_size_index; @@ -1032,7 +1048,8 @@ struct table_arrays auto p=reinterpret_cast(arrays.elements()+groups_size*N-1); p+=(uintptr_t(sizeof(group_type))- reinterpret_cast(p))%sizeof(group_type); - arrays.groups_=group_type_pointer_traits::pointer_to(*reinterpret_cast(p)); + arrays.groups_= + group_type_pointer_traits::pointer_to(*reinterpret_cast(p)); initialize_groups( arrays.groups(),groups_size, @@ -1049,7 +1066,8 @@ struct table_arrays } static void set_arrays( - table_arrays& arrays,allocator_type al,std::size_t n,std::true_type /* optimize for n==0*/) + table_arrays& arrays,allocator_type al,std::size_t n, + std::true_type /* optimize for n==0*/) { if(!n){ arrays.groups_=dummy_groups(); @@ -1262,8 +1280,8 @@ alloc_make_insert_type(const Allocator& al,Args&&... args) * both init_type and value_type references. * * - TypePolicy::construct and TypePolicy::destroy are used for the - * construction and destruction of the internal types: value_type, init_type - * and element_type. + * construction and destruction of the internal types: value_type, + * init_type and element_type. * * - TypePolicy::move is used to provide move semantics for the internal * types used by the container during rehashing and emplace. These types @@ -1376,9 +1394,12 @@ public: table_core{x,alloc_traits::select_on_container_copy_construction(x.al())}{} template - table_core(table_core&& x,arrays_holder&& ah,ArraysFn arrays_fn): + table_core( + table_core&& x,arrays_holder&& ah, + ArraysFn arrays_fn): table_core( - std::move(x.h()),std::move(x.pred()),std::move(x.al()),arrays_fn,x.size_ctrl) + std::move(x.h()),std::move(x.pred()),std::move(x.al()), + arrays_fn,x.size_ctrl) { ah.release(); x.arrays=ah.get(); @@ -1393,7 +1414,8 @@ public: std::is_nothrow_move_constructible::value&& !uses_fancy_pointers): table_core{ - std::move(x),arrays_holder{x.new_arrays(0),x.al()}, + std::move(x),arrays_holder{ + x.new_arrays(0),x.al()}, [&x]{return x.arrays;}} {} @@ -2075,8 +2097,8 @@ private: void recover_slot(unsigned char* pc) { - /* If this slot potentially caused overflow, we decrease the maximum load so - * that average probe length won't increase unboundedly in repeated + /* If this slot potentially caused overflow, we decrease the maximum load + * so that average probe length won't increase unboundedly in repeated * insert/erase cycles (drift). */ size_ctrl.ml-=group_type::maybe_caused_overflow(pc);