diff --git a/doc/unordered/changes.adoc b/doc/unordered/changes.adoc index e17fe5c1..e308c988 100644 --- a/doc/unordered/changes.adoc +++ b/doc/unordered/changes.adoc @@ -6,6 +6,11 @@ :github-pr-url: https://github.com/boostorg/unordered/pull :cpp: C++ +== Release 1.81.0 - Major update + +* Added fast containers `boost::unordered_flat_map` and `boost::unordered_flat_set` + based on open addressing. + == Release 1.80.0 - Major update * Refactor internal implementation to be dramatically faster diff --git a/doc/unordered/hash_traits.adoc b/doc/unordered/hash_traits.adoc new file mode 100644 index 00000000..900aace8 --- /dev/null +++ b/doc/unordered/hash_traits.adoc @@ -0,0 +1,46 @@ +[#hash_traits] +== Hash traits + +:idprefix: hash_traits_ + +=== Synopsis + +[listing,subs="+macros,+quotes"] +----- +// #include + +namespace boost { +namespace unordered { + +template +struct xref:#hash_traits_hash_is_avalanching[hash_is_avalanching]; + +} // namespace unordered +} // namespace boost +----- + +--- + +=== hash_is_avalanching +```c++ +template +struct hash_is_avalanching; +``` + +A hash function is said to have the _avalanching property_ if small changes in the input translate to +large changes in the returned hash code —ideally, flipping one bit in the representation of +the input value results in each bit of the hash code flipping with probability 50%. This property is +critical for the proper behavior of open-addressing hash containers. + +`hash_is_avalanching` derives from `std::true_type` if `Hash::is_avalanching` is a valid type, +and derives from `std::false_type` otherwise. +Users can then declare a hash function `Hash` as avalanching either by embedding an `is_avalanching` typedef +into the definition of `Hash`, or directly by specializing `hash_is_avalanching` to derive from +`std::true_type`. + +xref:unordered_flat_set[`boost::unordered_flat_set`] and xref:unordered_flat_map[`boost::unordered_flat_map`] +use the provided hash function `Hash` as-is if `hash_is_avalanching::value` is `true`; otherwise, they +implement a bit-mixing post-processing stage to increase the quality of hashing at the expense of +extra computational cost. + +--- diff --git a/doc/unordered/ref.adoc b/doc/unordered/ref.adoc index 9e401b40..bb4b83e9 100644 --- a/doc/unordered/ref.adoc +++ b/doc/unordered/ref.adoc @@ -5,4 +5,5 @@ include::unordered_map.adoc[] include::unordered_multimap.adoc[] include::unordered_set.adoc[] include::unordered_multiset.adoc[] +include::hash_traits.adoc[] include::unordered_flat_map.adoc[] diff --git a/doc/unordered/unordered_flat_map.adoc b/doc/unordered/unordered_flat_map.adoc index 0a6af9ab..e3a41a96 100644 --- a/doc/unordered/unordered_flat_map.adoc +++ b/doc/unordered/unordered_flat_map.adoc @@ -49,8 +49,8 @@ namespace boost { using hasher = Hash; using key_equal = Pred; using allocator_type = Allocator; - using pointer = value_type*; *TODO: missing in impl* - using const_pointer = const value_type*; *TODO: missing in impl* + using pointer = typename std::allocator_traits::pointer; + using const_pointer = typename std::allocator_traits::const_pointer; using reference = value_type&; using const_reference = const value_type&; using size_type = std::size_t; @@ -122,9 +122,9 @@ namespace boost { std::pair xref:#unordered_flat_map_move_insert[insert](value_type&& obj); std::pair xref:#unordered_flat_map_move_insert[insert](init_type&& obj); iterator xref:#unordered_flat_map_copy_insert_with_hint[insert](const_iterator hint, const value_type& obj); - iterator xref:#unordered_flat_map_copy_insert_with_hint[insert](const_iterator hint, const init_type& obj); *TODO: missing in impl* + iterator xref:#unordered_flat_map_copy_insert_with_hint[insert](const_iterator hint, const init_type& obj); iterator xref:#unordered_flat_map_move_insert_with_hint[insert](const_iterator hint, value_type&& obj); - iterator xref:#unordered_flat_map_copy_insert_with_hint[insert](const_iterator hint, init_type&& obj); *TODO: missing in impl* + iterator xref:#unordered_flat_map_copy_insert_with_hint[insert](const_iterator hint, init_type&& obj); template void xref:#unordered_flat_map_insert_iterator_range[insert](InputIterator first, InputIterator last); void xref:#unordered_flat_map_insert_initializer_list[insert](std::initializer_list); @@ -178,12 +178,6 @@ namespace boost { iterator xref:#unordered_flat_map_find[find](const K& k); template const_iterator xref:#unordered_flat_map_find[find](const K& k) const; - template - iterator xref:#unordered_flat_map_find[find](CompatibleKey const& k, CompatibleHash const& hash, - CompatiblePredicate const& eq); *TODO: missing in impl* - template - const_iterator xref:#unordered_flat_map_find[find](CompatibleKey const& k, CompatibleHash const& hash, - CompatiblePredicate const& eq) const; *TODO: missing in impl* size_type xref:#unordered_flat_map_count[count](const key_type& k) const; template size_type xref:#unordered_flat_map_count[count](const K& k) const; @@ -274,7 +268,9 @@ The size of the bucket array can be automatically increased by a call to `insert greater than `max_load_factor()`, except possibly for small sizes where the implementation may decide to allow for higher load factors. -*TODO: Write about mixing* +If `xref:hash_traits_hash_is_avalanching[hash_is_avalanching]::value` is `true`, the hash function +is used as-is; otherwise, a bit-mixing post-processing stage is added to increase the quality of hashing +at the expense of extra computational cost. --- @@ -1051,22 +1047,12 @@ iterator find(const key_type& k); const_iterator find(const key_type& k) const; template iterator find(const K& k); -template - const_iterator find(const K& k) const; -template - iterator find(CompatibleKey const& k, CompatibleHash const& hash, - CompatiblePredicate const& eq); -template - const_iterator find(CompatibleKey const& k, CompatibleHash const& hash, - CompatiblePredicate const& eq) const; ``` [horizontal] Returns:;; An iterator pointing to an element with key equivalent to `k`, or `end()` if no such element exists. -Notes:;; The templated overloads containing `CompatibleKey`, `CompatibleHash` and `CompatiblePredicate` are non-standard extensions which allow you to use a compatible hash function and equality predicate for a key of a different type in order to avoid an expensive type cast. In general, its use is not encouraged and instead the `K` member function templates should be used. + -+ -The `template ` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. +Notes:;; The `template ` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type. ---