Merge pull request #188 from boostorg/feature/cfoa

Feature/cfoa
This commit is contained in:
joaquintides
2023-06-17 17:25:56 +02:00
committed by GitHub
125 changed files with 14468 additions and 1822 deletions

View File

@ -11,6 +11,7 @@ local triggers =
local ubsan = { UBSAN: '1', UBSAN_OPTIONS: 'print_stacktrace=1' };
local asan = { ASAN: '1' };
local tsan = { TSAN: '1' };
local linux_pipeline(name, image, environment, packages = "", sources = [], arch = "amd64") =
{
@ -157,16 +158,29 @@ local windows_pipeline(name, image, environment, arch = "amd64") =
),
linux_pipeline(
"Linux 18.04 GCC 8 32/64",
"Linux 18.04 GCC 8 32/64 (03,11)",
"cppalliance/droneubuntu1804:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-8', CXXSTD: '03,11,14,17', ADDRMD: '32,64' },
{ TOOLSET: 'gcc', COMPILER: 'g++-8', CXXSTD: '03,11', ADDRMD: '32,64' },
"g++-8-multilib",
),
linux_pipeline(
"Linux 20.04 GCC 9* 32/64",
"Linux 18.04 GCC 8 32/64 (14,17)",
"cppalliance/droneubuntu1804:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-8', CXXSTD: '14,17', ADDRMD: '32,64' },
"g++-8-multilib",
),
linux_pipeline(
"Linux 20.04 GCC 9* 32/64 (03,11,14)",
"cppalliance/droneubuntu2004:1",
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '03,11,14,17,2a', ADDRMD: '32,64' },
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '03,11,14', ADDRMD: '32,64' },
),
linux_pipeline(
"Linux 20.04 GCC 9* 32/64 (17,2a)",
"cppalliance/droneubuntu2004:1",
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '17,2a', ADDRMD: '32,64' },
),
linux_pipeline(
@ -177,36 +191,77 @@ local windows_pipeline(name, image, environment, arch = "amd64") =
),
linux_pipeline(
"Linux 20.04 GCC 9* S390x",
"Linux 20.04 GCC 9* S390x (03,11,14)",
"cppalliance/droneubuntu2004:multiarch",
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '03,11,14,17,2a' },
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '03,11,14' },
arch="s390x",
),
linux_pipeline(
"Linux 20.04 GCC 10 32/64",
"Linux 20.04 GCC 9* S390x (17,2a)",
"cppalliance/droneubuntu2004:multiarch",
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '17,2a' },
arch="s390x",
),
linux_pipeline(
"Linux 20.04 GCC 10 32/64 (03,11,14)",
"cppalliance/droneubuntu2004:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-10', CXXSTD: '03,11,14,17,20', ADDRMD: '32,64' },
{ TOOLSET: 'gcc', COMPILER: 'g++-10', CXXSTD: '03,11,14', ADDRMD: '32,64' },
"g++-10-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 11* 32/64",
"Linux 20.04 GCC 10 32/64 (17,20)",
"cppalliance/droneubuntu2004:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-10', CXXSTD: '17,20', ADDRMD: '32,64' },
"g++-10-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 11* 32/64 (03,11,14)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '03,11,14,17,2a', ADDRMD: '32,64' },
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '03,11,14', ADDRMD: '32,64' },
),
linux_pipeline(
"Linux 22.04 GCC 11* 32/64 (17,2a)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++', CXXSTD: '17,2a', ADDRMD: '32,64' },
),
linux_pipeline(
"Linux 22.04 GCC 12 32 ASAN (03,11,14)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '03,11,14', ADDRMD: '32' } + asan,
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '03,11', ADDRMD: '32' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 32 ASAN (17,20,2b)",
"Linux 22.04 GCC 12 32 ASAN (14)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '17,20,2b', ADDRMD: '32' } + asan,
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '14', ADDRMD: '32' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 32 ASAN (17)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '17', ADDRMD: '32' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 32 ASAN (20)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '20', ADDRMD: '32' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 32 ASAN (2b)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '2b', ADDRMD: '32' } + asan,
"g++-12-multilib",
),
@ -218,12 +273,47 @@ local windows_pipeline(name, image, environment, arch = "amd64") =
),
linux_pipeline(
"Linux 22.04 GCC 12 64 ASAN (17,20,2b)",
"Linux 22.04 GCC 12 64 ASAN (17)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '17,20,2b', ADDRMD: '64' } + asan,
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '17', ADDRMD: '64' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 64 ASAN (20)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '20', ADDRMD: '64' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 64 ASAN (2b)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '2b', ADDRMD: '64' } + asan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 22.04 GCC 12 64 TSAN (11,14,17,20,2b)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-12', CXXSTD: '11,14,17,20,2b', ADDRMD: '64', TARGET: 'libs/unordered/test//cfoa_tests' } + tsan,
"g++-12-multilib",
),
linux_pipeline(
"Linux 23.04 GCC 13 32/64 (03,11,14)",
"cppalliance/droneubuntu2304:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-13', CXXSTD: '03,11,14', ADDRMD: '32,64' },
"g++-13 g++-13-multilib",
),
linux_pipeline(
"Linux 23.04 GCC 13 32/64 (17,20,2b)",
"cppalliance/droneubuntu2304:1",
{ TOOLSET: 'gcc', COMPILER: 'g++-13', CXXSTD: '17,20,2b', ADDRMD: '32,64' },
"g++-13 g++-13-multilib",
),
linux_pipeline(
"Linux 16.04 Clang 3.5",
"cppalliance/droneubuntu1604:1",
@ -330,19 +420,40 @@ local windows_pipeline(name, image, environment, arch = "amd64") =
),
linux_pipeline(
"Linux 22.04 Clang 14 UBSAN",
"Linux 22.04 Clang 14 UBSAN (03,11,14)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'clang', COMPILER: 'clang++-14', CXXSTD: '03,11,14,17,20' } + ubsan,
{ TOOLSET: 'clang', COMPILER: 'clang++-14', CXXSTD: '03,11,14' } + ubsan,
"clang-14",
),
linux_pipeline(
"Linux 22.04 Clang 14 ASAN",
"Linux 22.04 Clang 14 UBSAN (17,20)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'clang', COMPILER: 'clang++-14', CXXSTD: '03,11,14,17,20' } + asan,
{ TOOLSET: 'clang', COMPILER: 'clang++-14', CXXSTD: '17,20' } + ubsan,
"clang-14",
),
linux_pipeline(
"Linux 22.04 Clang 14 ASAN (03,11,14)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'clang', COMPILER: 'clang++-14', CXXSTD: '03,11,14' } + asan,
"clang-14",
),
linux_pipeline(
"Linux 22.04 Clang 14 ASAN (17,20)",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'clang', COMPILER: 'clang++-14', CXXSTD: '17,20' } + asan,
"clang-14",
),
linux_pipeline(
"Linux 22.04 Clang 14 libc++ 64 TSAN",
"cppalliance/droneubuntu2204:1",
{ TOOLSET: 'clang', COMPILER: 'clang++-14', ADDRMD: '64', TARGET: 'libs/unordered/test//cfoa_tests', CXXSTD: '11,14,17,20', STDLIB: 'libc++' } + tsan,
"clang-14 libc++-14-dev libc++abi-14-dev",
),
linux_pipeline(
"Linux 22.04 Clang 15",
"cppalliance/droneubuntu2204:1",
@ -352,8 +463,18 @@ local windows_pipeline(name, image, environment, arch = "amd64") =
),
macos_pipeline(
"MacOS 10.15 Xcode 12.2 UBSAN",
{ TOOLSET: 'clang', COMPILER: 'clang++', CXXSTD: '03,11,14,1z' } + ubsan,
"MacOS 10.15 Xcode 12.2 UBSAN (03,11)",
{ TOOLSET: 'clang', COMPILER: 'clang++', CXXSTD: '03,11' } + ubsan,
),
macos_pipeline(
"MacOS 10.15 Xcode 12.2 UBSAN (14)",
{ TOOLSET: 'clang', COMPILER: 'clang++', CXXSTD: '14' } + ubsan,
),
macos_pipeline(
"MacOS 10.15 Xcode 12.2 UBSAN (1z)",
{ TOOLSET: 'clang', COMPILER: 'clang++', CXXSTD: '1z' } + ubsan,
),
macos_pipeline(
@ -362,6 +483,12 @@ local windows_pipeline(name, image, environment, arch = "amd64") =
xcode_version = "13.4.1", osx_version = "monterey", arch = "arm64",
),
macos_pipeline(
"MacOS 12.4 Xcode 13.4.1 TSAN",
{ TOOLSET: 'clang', COMPILER: 'clang++', CXXSTD: '11,14,1z', TARGET: 'libs/unordered/test//cfoa_tests' } + tsan,
xcode_version = "13.4.1", osx_version = "monterey", arch = "arm64",
),
windows_pipeline(
"Windows VS2015 msvc-14.0",
"cppalliance/dronevs2015",

View File

@ -7,6 +7,8 @@
set -ex
export PATH=~/.local/bin:/usr/local/bin:$PATH
: ${TARGET:="libs/$LIBRARY/test"}
DRONE_BUILD_DIR=$(pwd)
BOOST_BRANCH=develop
@ -22,4 +24,4 @@ python tools/boostdep/depinst/depinst.py $LIBRARY
./b2 -d0 headers
echo "using $TOOLSET : : $COMPILER ;" > ~/user-config.jam
./b2 -j3 libs/$LIBRARY/test toolset=$TOOLSET cxxstd=$CXXSTD variant=debug,release ${ADDRMD:+address-model=$ADDRMD} ${UBSAN:+undefined-sanitizer=norecover debug-symbols=on} ${ASAN:+address-sanitizer=norecover debug-symbols=on} ${LINKFLAGS:+linkflags=$LINKFLAGS}
./b2 -j3 $TARGET toolset=$TOOLSET cxxstd=$CXXSTD variant=debug,release ${ADDRMD:+address-model=$ADDRMD} ${STDLIB:+stdlib=$STDLIB} ${UBSAN:+undefined-sanitizer=norecover debug-symbols=on} ${ASAN:+address-sanitizer=norecover debug-symbols=on} ${TSAN:+thread-sanitizer=norecover debug-symbols=on} ${LINKFLAGS:+linkflags=$LINKFLAGS}

View File

@ -52,7 +52,10 @@ jobs:
- { name: "gcc-12 w/ sanitizers (17,20,2b)", sanitize: yes,
compiler: gcc-12, cxxstd: '17,20,2b', os: ubuntu-22.04, ccache_key: "san2" }
- { name: Collect coverage, coverage: yes,
compiler: gcc-8, cxxstd: '03,11', os: ubuntu-20.04, install: 'g++-8-multilib', address-model: '32,64', ccache_key: "cov" }
compiler: gcc-12, cxxstd: '03,20', os: ubuntu-22.04, install: 'g++-12-multilib', address-model: '32,64', ccache_key: "cov" }
- { name: "cfoa tsan (gcc)", cxxstd: '11,14,17,20,2b', os: ubuntu-22.04, compiler: gcc-12,
targets: 'libs/unordered/test//cfoa_tests', thread-sanitize: yes }
# Linux, clang, libc++
- { compiler: clang-7, cxxstd: '03,11,14,17', os: ubuntu-20.04, stdlib: libc++, install: 'clang-7 libc++-7-dev libc++abi-7-dev' }
@ -65,15 +68,21 @@ jobs:
compiler: clang-12, cxxstd: '17,20,2b', os: ubuntu-20.04, stdlib: libc++, install: 'clang-12 libc++-12-dev libc++abi-12-dev', ccache_key: "san2" }
- { compiler: clang-13, cxxstd: '03,11,14,17,20,2b', os: ubuntu-22.04, stdlib: libc++, install: 'clang-13 libc++-13-dev libc++abi-13-dev' }
- { compiler: clang-14, cxxstd: '03,11,14,17,20,2b', os: ubuntu-22.04, stdlib: libc++, install: 'clang-14 libc++-14-dev libc++abi-14-dev' }
# not using libc++ because of https://github.com/llvm/llvm-project/issues/52771
- { name: "clang-14 w/ sanitizers (03,11,14)", sanitize: yes,
compiler: clang-14, cxxstd: '03,11,14', os: ubuntu-22.04, ccache_key: "san1" }
- { name: "clang-14 w/ sanitizers (17,20,2b)", sanitize: yes,
compiler: clang-14, cxxstd: '17,20,2b', os: ubuntu-22.04, ccache_key: "san2" }
- { name: "cfoa tsan (clang)", cxxstd: '11,14,17,20,2b', os: ubuntu-22.04, compiler: clang-14,
targets: 'libs/unordered/test//cfoa_tests', thread-sanitize: yes,
stdlib: libc++, install: 'clang-14 libc++-14-dev libc++abi-14-dev' }
# OSX, clang
- { compiler: clang, cxxstd: '03,11,14,17,2a', os: macos-11, }
- { compiler: clang, cxxstd: '03,11,14,17,2a', os: macos-12, sanitize: yes }
- { compiler: clang, cxxstd: '11,14,17,2a', os: macos-12, thread-sanitize: yes, targets: 'libs/unordered/test//cfoa_tests' }
timeout-minutes: 180
runs-on: ${{matrix.os}}
@ -184,6 +193,8 @@ jobs:
B2_COMPILER: ${{matrix.compiler}}
B2_CXXSTD: ${{matrix.cxxstd}}
B2_SANITIZE: ${{matrix.sanitize}}
B2_TSAN: ${{matrix.thread-sanitize}}
B2_TARGETS: ${{matrix.targets}}
B2_STDLIB: ${{matrix.stdlib}}
# More entries can be added in the same way, see the B2_ARGS assignment in ci/enforce.sh for the possible keys.
# B2_DEFINES: ${{matrix.defines}}
@ -197,7 +208,7 @@ jobs:
- name: Run tests
if: '!matrix.coverity'
run: ci/build.sh
run: B2_TARGETS=${{matrix.targets}} ci/build.sh
- name: Upload coverage
if: matrix.coverage

View File

@ -22,6 +22,7 @@ target_link_libraries(boost_unordered
Boost::mp11
Boost::predef
Boost::preprocessor
Boost::static_assert
Boost::throw_exception
Boost::tuple
Boost::type_traits

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

BIN
doc/diagrams/cfoa.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB

View File

@ -13,8 +13,10 @@
include::unordered/intro.adoc[]
include::unordered/buckets.adoc[]
include::unordered/hash_equality.adoc[]
include::unordered/comparison.adoc[]
include::unordered/regular.adoc[]
include::unordered/concurrent.adoc[]
include::unordered/compliance.adoc[]
include::unordered/structures.adoc[]
include::unordered/benchmarks.adoc[]
include::unordered/rationale.adoc[]
include::unordered/ref.adoc[]

View File

@ -431,3 +431,263 @@ h|unsuccessful lookup
|===
== boost::concurrent_flat_map
All benchmarks were created using:
* `https://spec.oneapi.io/versions/latest/elements/oneTBB/source/containers/concurrent_hash_map_cls.html[oneapi::tbb::concurrent_hash_map^]<int, int>`
* `https://github.com/greg7mdp/gtl/blob/main/docs/phmap.md[gtl::parallel_flat_hash_map^]<int, int>` with 64 submaps
* `boost::concurrent_flat_map<int, int>`
The source code can be https://github.com/boostorg/boost_unordered_benchmarks/tree/boost_concurrent_flat_map[found here^].
The benchmarks exercise a number of threads _T_ (between 1 and 16) concurrently performing operations
randomly chosen among **update**, **successful lookup** and **unsuccessful lookup**. The keys used in the
operations follow a https://en.wikipedia.org/wiki/Zipf%27s_law#Formal_definition[Zipf distribution^]
with different _skew_ parameters: the higher the skew, the more concentrated are the keys in the lower values
of the covered range.
=== GCC 12, x64
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x64/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===
=== Clang 15, x64
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x64/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===
=== Visual Studio 2022, x64
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x64/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===
=== Clang 12, ARM64
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-arm64/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===
=== GCC 12, x86
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/gcc-x86/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===
=== Clang 15, x86
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/clang-x86/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===
=== Visual Studio 2022, x86
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.500k%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.500k%2C%200.01.png]
|image::benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.500k%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.500k%2C%200.5.png]
|image::benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.500k%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.500k%2C%200.99.png]
h|500k updates, 4.5M lookups +
skew=0.01
h|500k updates, 4.5M lookups +
skew=0.5
h|500k updates, 4.5M lookups +
skew=0.99
|===
[caption=]
[cols="3*^.^a", frame=all, grid=all]
|===
|image::benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.5M%2C%200.01.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.5M%2C%200.01.png]
|image::benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.5M%2C%200.5.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.5M%2C%200.5.png]
|image::benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.5M%2C%200.99.png[width=250,window=_blank,link=../diagrams/benchmarks-concurrent_map/vs-x86/Parallel%20workload.xlsx.5M%2C%200.99.png]
h|5M updates, 45M lookups +
skew=0.01
h|5M updates, 45M lookups +
skew=0.5
h|5M updates, 45M lookups +
skew=0.99
|===

View File

@ -2,9 +2,9 @@
:idprefix: buckets_
:imagesdir: ../diagrams
= The Data Structure
= Basics of Hash Tables
The containers are made up of a number of 'buckets', each of which can contain
The containers are made up of a number of _buckets_, each of which can contain
any number of elements. For example, the following diagram shows a <<unordered_set,`boost::unordered_set`>> with 7 buckets containing 5 elements, `A`,
`B`, `C`, `D` and `E` (this is just for illustration, containers will typically
have more buckets).
@ -12,8 +12,7 @@ have more buckets).
image::buckets.png[]
In order to decide which bucket to place an element in, the container applies
the hash function, `Hash`, to the element's key (for `unordered_set` and
`unordered_multiset` the key is the whole element, but is referred to as the key
the hash function, `Hash`, to the element's key (for sets the key is the whole element, but is referred to as the key
so that the same terminology can be used for sets and maps). This returns a
value of type `std::size_t`. `std::size_t` has a much greater range of values
then the number of buckets, so the container applies another transformation to
@ -53,8 +52,7 @@ h|*Method* h|*Description*
|`size_type bucket_count() const`
|The number of buckets.
2+^h| *Closed-addressing containers only* +
`boost::unordered_[multi]set`, `boost::unordered_[multi]map`
2+^h| *Closed-addressing containers only*
h|*Method* h|*Description*
|`size_type max_bucket_count() const`
@ -80,7 +78,7 @@ h|*Method* h|*Description*
|===
== Controlling the number of buckets
== Controlling the Number of Buckets
As more elements are added to an unordered associative container, the number
of collisions will increase causing performance to degrade.
@ -90,8 +88,8 @@ calling `rehash`.
The standard leaves a lot of freedom to the implementer to decide how the
number of buckets is chosen, but it does make some requirements based on the
container's 'load factor', the number of elements divided by the number of buckets.
Containers also have a 'maximum load factor' which they should try to keep the
container's _load factor_, the number of elements divided by the number of buckets.
Containers also have a _maximum load factor_ which they should try to keep the
load factor below.
You can't control the bucket count directly but there are two ways to
@ -133,9 +131,7 @@ h|*Method* h|*Description*
|`void rehash(size_type n)`
|Changes the number of buckets so that there at least `n` buckets, and so that the load factor is less than the maximum load factor.
2+^h| *Open-addressing containers only* +
`boost::unordered_flat_set`, `boost::unordered_flat_map` +
`boost::unordered_node_set`, `boost::unordered_node_map` +
2+^h| *Open-addressing and concurrent containers only*
h|*Method* h|*Description*
|`size_type max_load() const`
@ -143,7 +139,7 @@ h|*Method* h|*Description*
|===
A note on `max_load` for open-addressing containers: the maximum load will be
A note on `max_load` for open-addressing and concurrent containers: the maximum load will be
(`max_load_factor() * bucket_count()`) right after `rehash` or on container creation, but may
slightly decrease when erasing elements in high-load situations. For instance, if we
have a <<unordered_flat_map,`boost::unordered_flat_map`>> with `size()` almost
@ -151,165 +147,4 @@ at `max_load()` level and then erase 1,000 elements, `max_load()` may decrease b
few dozen elements. This is done internally by Boost.Unordered in order
to keep its performance stable, and must be taken into account when planning for rehash-free insertions.
== Iterator Invalidation
It is not specified how member functions other than `rehash` and `reserve` affect
the bucket count, although `insert` can only invalidate iterators
when the insertion causes the container's load to be greater than the maximum allowed.
For most implementations this means that `insert` will only
change the number of buckets when this happens. Iterators can be
invalidated by calls to `insert`, `rehash` and `reserve`.
As for pointers and references,
they are never invalidated for node-based containers
(`boost::unordered_[multi]set`, `boost::unordered_[multi]map`, `boost::unordered_node_set`, `boost::unordered_node_map`),
but they will when rehashing occurs for
`boost::unordered_flat_set` and `boost::unordered_flat_map`: this is because
these containers store elements directly into their holding buckets, so
when allocating a new bucket array the elements must be transferred by means of move construction.
In a similar manner to using `reserve` for ``vector``s, it can be a good idea
to call `reserve` before inserting a large number of elements. This will get
the expensive rehashing out of the way and let you store iterators, safe in
the knowledge that they won't be invalidated. If you are inserting `n`
elements into container `x`, you could first call:
```
x.reserve(n);
```
Note:: `reserve(n)` reserves space for at least `n` elements, allocating enough buckets
so as to not exceed the maximum load factor.
+
Because the maximum load factor is defined as the number of elements divided by the total
number of available buckets, this function is logically equivalent to:
+
```
x.rehash(std::ceil(n / x.max_load_factor()))
```
+
See the <<unordered_map_rehash,reference for more details>> on the `rehash` function.
== Fast Closed Addressing Implementation
++++
<style>
.imageblock > .title {
text-align: inherit;
}
</style>
++++
Boost.Unordered sports one of the fastest implementations of closed addressing, also commonly known as https://en.wikipedia.org/wiki/Hash_table#Separate_chaining[separate chaining]. An example figure representing the data structure is below:
[#img-bucket-groups,.text-center]
.A simple bucket group approach
image::bucket-groups.png[align=center]
An array of "buckets" is allocated and each bucket in turn points to its own individual linked list. This makes meeting the standard requirements of bucket iteration straight-forward. Unfortunately, iteration of the entire container is often times slow using this layout as each bucket must be examined for occupancy, yielding a time complexity of `O(bucket_count() + size())` when the standard requires complexity to be `O(size())`.
Canonical standard implementations will wind up looking like the diagram below:
[.text-center]
.The canonical standard approach
image::singly-linked.png[align=center,link=../diagrams/singly-linked.png,window=_blank]
It's worth noting that this approach is only used by pass:[libc++] and pass:[libstdc++]; the MSVC Dinkumware implementation uses a different one. A more detailed analysis of the standard containers can be found http://bannalia.blogspot.com/2013/10/implementation-of-c-unordered.html[here].
This unusually laid out data structure is chosen to make iteration of the entire container efficient by inter-connecting all of the nodes into a singly-linked list. One might also notice that buckets point to the node _before_ the start of the bucket's elements. This is done so that removing elements from the list can be done efficiently without introducing the need for a doubly-linked list. Unfortunately, this data structure introduces a guaranteed extra indirection. For example, to access the first element of a bucket, something like this must be done:
```c++
auto const idx = get_bucket_idx(hash_function(key));
node* p = buckets[idx]; // first load
node* n = p->next; // second load
if (n && is_in_bucket(n, idx)) {
value_type const& v = *n; // third load
// ...
}
```
With a simple bucket group layout, this is all that must be done:
```c++
auto const idx = get_bucket_idx(hash_function(key));
node* n = buckets[idx]; // first load
if (n) {
value_type const& v = *n; // second load
// ...
}
```
In practice, the extra indirection can have a dramatic performance impact to common operations such as `insert`, `find` and `erase`. But to keep iteration of the container fast, Boost.Unordered introduces a novel data structure, a "bucket group". A bucket group is a fixed-width view of a subsection of the buckets array. It contains a bitmask (a `std::size_t`) which it uses to track occupancy of buckets and contains two pointers so that it can form a doubly-linked list with non-empty groups. An example diagram is below:
[#img-fca-layout]
.The new layout used by Boost
image::fca.png[align=center]
Thus container-wide iteration is turned into traversing the non-empty bucket groups (an operation with constant time complexity) which reduces the time complexity back to `O(size())`. In total, a bucket group is only 4 words in size and it views `sizeof(std::size_t) * CHAR_BIT` buckets meaning that for all common implementations, there's only 4 bits of space overhead per bucket introduced by the bucket groups.
A more detailed description of Boost.Unordered's closed-addressing implementation is
given in an
https://bannalia.blogspot.com/2022/06/advancing-state-of-art-for.html[external article].
For more information on implementation rationale, read the
xref:#rationale_boostunordered_multiset_and_boostunordered_multimap[corresponding section].
== Open Addressing Implementation
The diagram shows the basic internal layout of `boost::unordered_flat_map`/`unordered_node_map` and
`boost:unordered_flat_set`/`unordered_node_set`.
[#img-foa-layout]
.Open-addressing layout used by Boost.Unordered.
image::foa.png[align=center]
As with all open-addressing containers, elements (or pointers to the element nodes in the case of
`boost::unordered_node_map` and `boost::unordered_node_set`) are stored directly in the bucket array.
This array is logically divided into 2^_n_^ _groups_ of 15 elements each.
In addition to the bucket array, there is an associated _metadata array_ with 2^_n_^
16-byte words.
[#img-foa-metadata]
.Breakdown of a metadata word.
image::foa-metadata.png[align=center]
A metadata word is divided into 15 _h_~_i_~ bytes (one for each associated
bucket), and an _overflow byte_ (_ofw_ in the diagram). The value of _h_~_i_~ is:
- 0 if the corresponding bucket is empty.
- 1 to encode a special empty bucket called a _sentinel_, which is used internally to
stop iteration when the container has been fully traversed.
- If the bucket is occupied, a _reduced hash value_ obtained from the hash value of
the element.
When looking for an element with hash value _h_, SIMD technologies such as
https://en.wikipedia.org/wiki/SSE2[SSE2] and
https://en.wikipedia.org/wiki/ARM_architecture_family#Advanced_SIMD_(Neon)[Neon] allow us
to very quickly inspect the full metadata word and look for the reduced value of _h_ among all the
15 buckets with just a handful of CPU instructions: non-matching buckets can be
readily discarded, and those whose reduced hash value matches need be inspected via full
comparison with the corresponding element. If the looked-for element is not present,
the overflow byte is inspected:
- If the bit in the position _h_ mod 8 is zero, lookup terminates (and the
element is not present).
- If the bit is set to 1 (the group has been _overflowed_), further groups are
checked using https://en.wikipedia.org/wiki/Quadratic_probing[_quadratic probing_], and
the process is repeated.
Insertion is algorithmically similar: empty buckets are located using SIMD,
and when going past a full group its corresponding overflow bit is set to 1.
In architectures without SIMD support, the logical layout stays the same, but the metadata
word is codified using a technique we call _bit interleaving_: this layout allows us
to emulate SIMD with reasonably good performance using only standard arithmetic and
logical operations.
[#img-foa-metadata-interleaving]
.Bit-interleaved metadata word.
image::foa-metadata-interleaving.png[align=center]
A more detailed description of Boost.Unordered's open-addressing implementation is
given in an
https://bannalia.blogspot.com/2022/11/inside-boostunorderedflatmap.html[external article].
For more information on implementation rationale, read the
xref:#rationale_boostunordered_flat_set_and_boostunordered_flat_map[corresponding section].

View File

@ -6,8 +6,9 @@
:github-pr-url: https://github.com/boostorg/unordered/pull
:cpp: C++
== Release 1.83.0
== Release 1.83.0 - Major update
* Added `boost::concurrent_flat_map`, a fast, thread-safe hashmap based on open addressing.
* Sped up iteration of open-addressing containers.
== Release 1.82.0 - Major update

View File

@ -5,7 +5,7 @@
:cpp: C++
== Closed-addressing containers
== Closed-addressing Containers
`unordered_[multi]set` and `unordered_[multi]map` are intended to provide a conformant
implementation of the {cpp}20 standard that will work with {cpp}98 upwards.
@ -13,7 +13,7 @@ This wide compatibility does mean some compromises have to be made.
With a compiler and library that fully support {cpp}11, the differences should
be minor.
=== Move emulation
=== Move Emulation
Support for move semantics is implemented using Boost.Move. If rvalue
references are available it will use them, but if not it uses a close,
@ -25,7 +25,7 @@ but imperfect emulation. On such compilers:
* The containers themselves are not movable.
* Argument forwarding is not perfect.
=== Use of allocators
=== Use of Allocators
{cpp}11 introduced a new allocator system. It's backwards compatible due to
the lax requirements for allocators in the old standard, but might need
@ -58,7 +58,7 @@ Due to imperfect move emulation, some assignments might check
`propagate_on_container_copy_assignment` on some compilers and
`propagate_on_container_move_assignment` on others.
=== Construction/Destruction using allocators
=== Construction/Destruction Using Allocators
The following support is required for full use of {cpp}11 style
construction/destruction:
@ -117,7 +117,7 @@ Variadic constructor arguments for `emplace` are only used when both
rvalue references and variadic template parameters are available.
Otherwise `emplace` can only take up to 10 constructors arguments.
== Open-addressing containers
== Open-addressing Containers
The C++ standard does not currently provide any open-addressing container
specification to adhere to, so `boost::unordered_flat_set`/`unordered_node_set` and
@ -144,4 +144,61 @@ The main differences with C++ unordered associative containers are:
** Pointer stability is not kept under rehashing.
** There is no API for node extraction/insertion.
== Concurrent Containers
There is currently no specification in the C++ standard for this or any other concurrent
data structure. `boost::concurrent_flat_map` takes the same template parameters as `std::unordered_map`
and all the maps provided by Boost.Unordered, and its API is modelled after that of
`boost::unordered_flat_map` with the crucial difference that iterators are not provided
due to their inherent problems in concurrent scenarios (high contention, prone to deadlocking):
so, `boost::concurrent_flat_map` is technically not a
https://en.cppreference.com/w/cpp/named_req/Container[Container^], although
it meets all the requirements of https://en.cppreference.com/w/cpp/named_req/AllocatorAwareContainer[AllocatorAware^]
containers except those implying iterators.
In a non-concurrent unordered container, iterators serve two main purposes:
* Access to an element previously located via lookup.
* Container traversal.
In place of iterators, `boost::concurrent_flat_map` uses _internal visitation_
facilities as a thread-safe substitute. Classical operations returning an iterator to an
element already existing in the container, like for instance:
[source,c++]
----
iterator find(const key_type& k);
std::pair<iterator, bool> insert(const value_type& obj);
----
are transformed to accept a _visitation function_ that is passed such element:
[source,c++]
----
template<class F> size_t visit(const key_type& k, F f);
template<class F> bool insert_or_visit(const value_type& obj, F f);
----
(In the second case `f` is only invoked if there's an equivalent element
to `obj` in the table, not if insertion is successful). Container traversal
is served by:
[source,c++]
----
template<class F> size_t visit_all(F f);
----
of which there are parallelized versions in C++17 compilers with parallel
algorithm support. In general, the interface of `boost::concurrent_flat_map`
is derived from that of `boost::unordered_flat_map` by a fairly straightforward
process of replacing iterators with visitation where applicable. If
`iterator` and `const_iterator` provide mutable and const access to elements,
respectively, here visitation is granted mutable or const access depending on
the constness of the member function used (there are also `*cvisit` overloads for
explicit const visitation).
The one notable operation not provided is `operator[]`/`at`, which can be
replaced, if in a more convoluted manner, by
xref:#concurrent_flat_map_try_emplace_or_cvisit[`try_emplace_or_visit`].
//-

View File

@ -0,0 +1,182 @@
[#concurrent]
= Concurrent Containers
:idprefix: concurrent_
Boost.Unordered currently provides just one concurrent container named `boost::concurrent_flat_map`.
`boost::concurrent_flat_map` is a hash table that allows concurrent write/read access from
different threads without having to implement any synchronzation mechanism on the user's side.
[source,c++]
----
std::vector<int> input;
boost::concurrent_flat_map<int,int> m;
...
// process input in parallel
const int num_threads = 8;
std::vector<std::jthread> threads;
std::size_t chunk = input.size() / num_threads; // how many elements per thread
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back([&,i] {
// calculate the portion of input this thread takes care of
std::size_t start = i * chunk;
std::size_t end = (i == num_threads - 1)? input.size(): (i + 1) * chunk;
for (std::size_t n = start; n < end; ++n) {
m.emplace(input[n], calculation(input[n]));
}
});
}
----
In the example above, threads access `m` without synchronization, just as we'd do in a
single-threaded scenario. In an ideal setting, if a given workload is distributed among
_N_ threads, execution is _N_ times faster than with one thread —this limit is
never attained in practice due to synchronization overheads and _contention_ (one thread
waiting for another to leave a locked portion of the map), but `boost::concurrent_flat_map`
is designed to perform with very little overhead and typically achieves _linear scaling_
(that is, performance is proportional to the number of threads up to the number of
logical cores in the CPU).
== Visitation-based API
The first thing a new user of `boost::concurrent_flat_map` will notice is that this
class _does not provide iterators_ (which makes it technically
not a https://en.cppreference.com/w/cpp/named_req/Container[Container^]
in the C++ standard sense). The reason for this is that iterators are inherently
thread-unsafe. Consider this hypothetical code:
[source,c++]
----
auto it = m.find(k); // A: get an iterator pointing to the element with key k
if (it != m.end() ) {
some_function(*it); // B: use the value of the element
}
----
In a multithreaded scenario, the iterator `it` may be invalid at point B if some other
thread issues an `m.erase(k)` operation between A and B. There are designs that
can remedy this by making iterators lock the element they point to, but this
approach lends itself to high contention and can easily produce deadlocks in a program.
`operator[]` has similar concurrency issues, and is not provided by
`boost::concurrent_flat_map` either. Instead, element access is done through
so-called _visitation functions_:
[source,c++]
----
m.visit(k, [](const auto& x) { // x is the element with key k (if it exists)
some_function(x); // use it
});
----
The visitation function passed by the user (in this case, a lambda function)
is executed internally by `boost::concurrent_flat_map` in
a thread-safe manner, so it can access the element without worrying about other
threads interfering in the process.
On the other hand, a visitation function can _not_ access the container itself:
[source,c++]
----
m.visit(k, [&](const auto& x) {
some_function(x, m.size()); // forbidden: m can't be accessed inside visitation
});
----
Access to a different container is allowed, though:
[source,c++]
----
m.visit(k, [&](const auto& x) {
if (some_function(x)) {
m2.insert(x); // OK, m2 is a different boost::concurrent_flat_map
}
});
----
But, in general, visitation functions should be as lightweight as possible to
reduce contention and increase parallelization. In some cases, moving heavy work
outside of visitation may be beneficial:
[source,c++]
----
std::optional<value_type> o;
bool found = m.visit(k, [&](const auto& x) {
o = x;
});
if (found) {
some_heavy_duty_function(*o);
}
----
Visitation is prominent in the API provided by `boost::concurrent_flat_map`, and
many classical operations have visitation-enabled variations:
[source,c++]
----
m.insert_or_visit(x, [](auto& y) {
// if insertion failed because of an equivalent element y,
// do something with it, for instance:
++y.second; // increment the mapped part of the element
});
----
Note that in this last example the visitation function could actually _modify_
the element: as a general rule, operations on a `boost::concurrent_flat_map` `m`
will grant visitation functions const/non-const access to the element depending on whether
`m` is const/non-const. Const access can be always be explicitly requested
by using `cvisit` overloads (for instance, `insert_or_cvisit`) and may result
in higher parallelization. Consult the xref:#concurrent_flat_map[reference]
for a complete list of available operations.
== Whole-Table Visitation
In the absence of iterators, `boost::concurrent_flat_map` provides `visit_all`
as an alternative way to process all the elements in the map:
[source,c++]
----
m.visit_all([](auto& x) {
x.second = 0; // reset the mapped part of the element
});
----
In C++17 compilers implementing standard parallel algorithms, whole-table
visitation can be parallelized:
[source,c++]
----
m.visit_all(std::execution::par, [](auto& x) { // run in parallel
x.second = 0; // reset the mapped part of the element
});
----
There is another whole-table visitation operation, `erase_if`:
[source,c++]
----
m.erase_if([](auto& x) {
return x.second == 0; // erase the elements whose mapped value is zero
});
----
`erase_if` can also be parallelized. Note that, in order to increase efficiency,
these operations do not block the table during execution: this implies that elements
may be inserted, modified or erased by other threads during visitation. It is
advisable not to assume too much about the exact global state of a `boost::concurrent_flat_map`
at any point in your program.
== Blocking Operations
``boost::concurrent_flat_map``s can be copied, assigned, cleared and merged just like any
Boost.Unordered container. Unlike most other operations, these are _blocking_,
that is, all other threads are prevented from accesing the tables involved while a copy, assignment,
clear or merge operation is in progress. Blocking is taken care of automatically by the library
and the user need not take any special precaution, but overall performance may be affected.
Another blocking operation is _rehashing_, which happens explicitly via `rehash`/`reserve`
or during insertion when the table's load hits `max_load()`. As with non-concurrent containers,
reserving space in advance of bulk insertions will generally speed up the process.

File diff suppressed because it is too large Load Diff

View File

@ -20,14 +20,14 @@ class unordered_map;
The hash function comes first as you might want to change the hash function
but not the equality predicate. For example, if you wanted to use the
http://www.isthe.com/chongo/tech/comp/fnv/[FNV-1 hash^] you could write:
https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function#FNV-1a_hash[FNV-1a hash^] you could write:
```
boost::unordered_map<std::string, int, hash::fnv_1>
boost::unordered_map<std::string, int, hash::fnv_1a>
dictionary;
```
There is an link:../../examples/fnv1.hpp[implementation of FNV-1^] in the examples directory.
There is an link:../../examples/fnv1.hpp[implementation of FNV-1a^] in the examples directory.
If you wish to use a different equality function, you will also need to use a matching hash function. For example, to implement a case insensitive dictionary you need to define a case insensitive equality predicate and hash function:

View File

@ -4,26 +4,65 @@
:idprefix: intro_
:cpp: C++
For accessing data based on key lookup, the {cpp} standard library offers `std::set`,
`std::map`, `std::multiset` and `std::multimap`. These are generally
implemented using balanced binary trees so that lookup time has
logarithmic complexity. That is generally okay, but in many cases a
link:https://en.wikipedia.org/wiki/Hash_table[hash table^] can perform better, as accessing data has constant complexity,
on average. The worst case complexity is linear, but that occurs rarely and
with some care, can be avoided.
link:https://en.wikipedia.org/wiki/Hash_table[Hash tables^] are extremely popular
computer data structures and can be found under one form or another in virtually any programming
language. Whereas other associative structures such as rb-trees (used in {cpp} by `std::set` and `std::map`)
have logarithmic-time complexity for insertion and lookup, hash tables, if configured properly,
perform these operations in constant time on average, and are generally much faster.
Also, the existing containers require a 'less than' comparison object
to order their elements. For some data types this is impossible to implement
or isn't practical. In contrast, a hash table only needs an equality function
and a hash function for the key.
{cpp} introduced __unordered associative containers__ `std::unordered_set`, `std::unordered_map`,
`std::unordered_multiset` and `std::unordered_multimap` in {cpp}11, but research on hash tables
hasn't stopped since: advances in CPU architectures such as
more powerful caches, link:https://en.wikipedia.org/wiki/Single_instruction,_multiple_data[SIMD] operations
and increasingly available link:https://en.wikipedia.org/wiki/Multi-core_processor[multicore processors]
open up possibilities for improved hash-based data structures and new use cases that
are simply beyond reach of unordered associative containers as specified in 2011.
With this in mind, unordered associative containers were added to the {cpp}
standard. Boost.Unordered provides an implementation of the containers described in {cpp}11,
with some <<compliance,deviations from the standard>> in
order to work with non-{cpp}11 compilers and libraries.
Boost.Unordered offers a catalog of hash containers with different standards compliance levels,
performances and intented usage scenarios:
[caption=, title='Table {counter:table-counter}. Boost.Unordered containers']
[cols="1,1,.^1", frame=all, grid=all]
|===
^h|
^h|*Node-based*
^h|*Flat*
^.^h|*Closed addressing*
^m|
boost::unordered_set +
boost::unordered_map +
boost::unordered_multiset +
boost::unordered_multimap
^|
^.^h|*Open addressing*
^m| boost::unordered_node_set +
boost::unordered_node_map
^m| boost::unordered_flat_set +
boost::unordered_flat_map
^.^h|*Concurrent*
^|
^| `boost::concurrent_flat_map`
|===
* **Closed-addressing containers** are fully compliant with the C++ specification
for unordered associative containers and feature one of the fastest implementations
in the market within the technical constraints imposed by the required standard interface.
* **Open-addressing containers** rely on much faster data structures and algorithms
(more than 2 times faster in typical scenarios) while slightly diverging from the standard
interface to accommodate the implementation.
There are two variants: **flat** (the fastest) and **node-based**, which
provide pointer stability under rehashing at the expense of being slower.
* Finally, `boost::concurrent_flat_map` (the only **concurrent container** provided
at present) is a hashmap designed and implemented to be used in high-performance
multithreaded scenarios. Its interface is radically different from that of regular C++ containers.
All sets and maps in Boost.Unordered are instantiatied similarly as
`std::unordered_set` and `std::unordered_map`, respectively:
`unordered_set` and `unordered_multiset` are defined in the header
`<boost/unordered/unordered_set.hpp>`
[source,c++]
----
namespace boost {
@ -32,178 +71,21 @@ namespace boost {
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<Key> >
class unordered_set;
class unordered_set;
// same for unordered_multiset, unordered_flat_set, unordered_node_set
template<
class Key,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<Key> >
class unordered_multiset;
}
----
`unordered_map` and `unordered_multimap` are defined in the header
`<boost/unordered/unordered_map.hpp>`
[source,c++]
----
namespace boost {
template <
class Key, class Mapped,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<std::pair<Key const, Mapped> > >
class unordered_map;
template<
class Key, class Mapped,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<std::pair<Key const, Mapped> > >
class unordered_multimap;
// same for unordered_multimap, unordered_flat_map, unordered_node_map
// and concurrent_flat_map
}
----
These containers, and all other implementations of standard unordered associative
containers, use an approach to its internal data structure design called
*closed addressing*. Starting in Boost 1.81, Boost.Unordered also provides containers
`boost::unordered_flat_set` and `boost::unordered_flat_map`, which use a
different data structure strategy commonly known as *open addressing* and depart in
a small number of ways from the standard so as to offer much better performance
in exchange (more than 2 times faster in typical scenarios):
[source,c++]
----
// #include <boost/unordered/unordered_flat_set.hpp>
//
// Note: no multiset version
namespace boost {
template <
class Key,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<Key> >
class unordered_flat_set;
}
----
[source,c++]
----
// #include <boost/unordered/unordered_flat_map.hpp>
//
// Note: no multimap version
namespace boost {
template <
class Key, class Mapped,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<std::pair<Key const, Mapped> > >
class unordered_flat_map;
}
----
Starting in Boost 1.82, the containers `boost::unordered_node_set` and `boost::unordered_node_map`
are introduced: they use open addressing like `boost::unordered_flat_set` and `boost::unordered_flat_map`,
but internally store element _nodes_, like `boost::unordered_set` and `boost::unordered_map`,
which provide stability of pointers and references to the elements:
[source,c++]
----
// #include <boost/unordered/unordered_node_set.hpp>
//
// Note: no multiset version
namespace boost {
template <
class Key,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<Key> >
class unordered_node_set;
}
----
[source,c++]
----
// #include <boost/unordered/unordered_node_map.hpp>
//
// Note: no multimap version
namespace boost {
template <
class Key, class Mapped,
class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Alloc = std::allocator<std::pair<Key const, Mapped> > >
class unordered_node_map;
}
----
These are all the containers provided by Boost.Unordered:
[caption=, title='Table {counter:table-counter}. Boost.Unordered containers']
[cols="1,1,.^1", frame=all, grid=rows]
|===
^h|
^h|*Node-based*
^h|*Flat*
^.^h|*Closed addressing*
^| `boost::unordered_set` +
`boost::unordered_map` +
`boost::unordered_multiset` +
`boost::unordered_multimap`
^|
^.^h|*Open addressing*
^| `boost::unordered_node_set` +
`boost::unordered_node_map`
^| `boost::unordered_flat_set` +
`boost::unordered_flat_map`
|===
Closed-addressing containers are pass:[C++]98-compatible. Open-addressing containers require a
reasonably compliant pass:[C++]11 compiler.
Boost.Unordered containers are used in a similar manner to the normal associative
containers:
[source,cpp]
----
typedef boost::unordered_map<std::string, int> map;
map x;
x["one"] = 1;
x["two"] = 2;
x["three"] = 3;
assert(x.at("one") == 1);
assert(x.find("missing") == x.end());
----
But since the elements aren't ordered, the output of:
[source,c++]
----
for(const map::value_type& i: x) {
std::cout<<i.first<<","<<i.second<<"\n";
}
----
can be in any order. For example, it might be:
[source]
----
two,2
one,1
three,3
----
To store an object in an unordered associative container requires both a
Storing an object in an unordered associative container requires both a
key equality function and a hash function. The default function objects in
the standard containers support a few basic types including integer types,
floating point types, pointer types, and the standard strings. Since
@ -213,6 +95,3 @@ you have to extend Boost.Hash to support the type or use
your own custom equality predicates and hash functions. See the
<<hash_equality,Equality Predicates and Hash Functions>> section
for more details.
There are other differences, which are listed in the
<<comparison,Comparison with Associative Containers>> section.

View File

@ -4,7 +4,7 @@
= Implementation Rationale
== Closed-addressing containers
== Closed-addressing Containers
`boost::unordered_[multi]set` and `boost::unordered_[multi]map`
adhere to the standard requirements for unordered associative
@ -74,7 +74,7 @@ Since release 1.80.0, prime numbers are chosen for the number of buckets in
tandem with sophisticated modulo arithmetic. This removes the need for "mixing"
the result of the user's hash function as was used for release 1.79.0.
== Open-addresing containers
== Open-addresing Containers
The C++ standard specification of unordered associative containers impose
severe limitations on permissible implementations, the most important being
@ -86,7 +86,7 @@ The design of `boost::unordered_flat_set`/`unordered_node_set` and `boost::unord
guided by Peter Dimov's https://pdimov.github.io/articles/unordered_dev_plan.html[Development Plan for Boost.Unordered^].
We discuss here the most relevant principles.
=== Hash function
=== Hash Function
Given its rich functionality and cross-platform interoperability,
`boost::hash` remains the default hash function of open-addressing containers.
@ -105,10 +105,10 @@ whereas in 32 bits _C_ = 0xE817FB2Du has been obtained from https://arxiv.org/ab
When using a hash function directly suitable for open addressing, post-mixing can be opted out by via a dedicated <<hash_traits_hash_is_avalanching,`hash_is_avalanching`>>trait.
`boost::hash` specializations for string types are marked as avalanching.
=== Platform interoperability
=== Platform Interoperability
The observable behavior of `boost::unordered_flat_set`/`unordered_node_set` and `boost::unordered_flat_map`/`unordered_node_map` is deterministically
identical across different compilers as long as their ``std::size_type``s are the same size and the user-provided
identical across different compilers as long as their ``std::size_t``s are the same size and the user-provided
hash function and equality predicate are also interoperable
&#8212;this includes elements being ordered in exactly the same way for the same sequence of
operations.
@ -117,3 +117,25 @@ Although the implementation internally uses SIMD technologies, such as https://e
and https://en.wikipedia.org/wiki/ARM_architecture_family#Advanced_SIMD_(NEON)[Neon^], when available,
this does not affect interoperatility. For instance, the behavior is the same
for Visual Studio on an x64-mode Intel CPU with SSE2 and for GCC on an IBM s390x without any supported SIMD technology.
== Concurrent Containers
The same data structure used by Boost.Unordered open-addressing containers has been chosen
also as the foundation of `boost::concurrent_flat_map`:
* Open-addressing is faster than closed-addressing alternatives, both in non-concurrent and
concurrent scenarios.
* Open-addressing layouts are eminently suitable for concurrent access and modification
with minimal locking. In particular, the metadata array can be used for implementations of
lookup that are lock-free up to the last step of actual element comparison.
* Layout compatibility with Boost.Unordered flat containers allows for fast transfer
of all elements between `boost::concurrent_flat_map` and `boost::unordered_flat_map`.
(This feature has not been implemented yet.)
=== Hash Function and Platform Interoperability
`boost::concurrent_flat_map` makes the same decisions and provides the same guarantees
as Boost.Unordered open-addressing containers with regards to
xref:#rationale_hash_function[hash function defaults] and
xref:#rationale_platform_interoperability[platform interoperability].

View File

@ -10,3 +10,4 @@ include::unordered_flat_map.adoc[]
include::unordered_flat_set.adoc[]
include::unordered_node_map.adoc[]
include::unordered_node_set.adoc[]
include::concurrent_flat_map.adoc[]

View File

@ -1,8 +1,99 @@
[#regular]
= Regular Containers
:idprefix: regular_
Boost.Unordered closed-addressing containers (`boost::unordered_set`, `boost::unordered_map`,
`boost::unordered_multiset` and `boost::unordered_multimap`) are fully conformant with the
C++ specification for unordered associative containers, so for those who know how to use
`std::unordered_set`, `std::unordered_map`, etc., their homonyms in Boost.Unordered are
drop-in replacements. The interface of open-addressing containers (`boost::unordered_node_set`,
`boost::unordered_node_map`, `boost::unordered_flat_set` and `boost::unordered_flat_map`)
is very similar, but they present some minor differences listed in the dedicated
xref:#compliance_open_addressing_containers[standard compliance section].
For readers without previous experience with hash containers but familiar
with normal associative containers (`std::set`, `std::map`,
`std::multiset` and `std::multimap`), Boost.Unordered containers are used in a similar manner:
[source,cpp]
----
typedef boost::unordered_map<std::string, int> map;
map x;
x["one"] = 1;
x["two"] = 2;
x["three"] = 3;
assert(x.at("one") == 1);
assert(x.find("missing") == x.end());
----
But since the elements aren't ordered, the output of:
[source,c++]
----
for(const map::value_type& i: x) {
std::cout<<i.first<<","<<i.second<<"\n";
}
----
can be in any order. For example, it might be:
[source]
----
two,2
one,1
three,3
----
There are other differences, which are listed in the
<<comparison,Comparison with Associative Containers>> section.
== Iterator Invalidation
It is not specified how member functions other than `rehash` and `reserve` affect
the bucket count, although `insert` can only invalidate iterators
when the insertion causes the container's load to be greater than the maximum allowed.
For most implementations this means that `insert` will only
change the number of buckets when this happens. Iterators can be
invalidated by calls to `insert`, `rehash` and `reserve`.
As for pointers and references,
they are never invalidated for node-based containers
(`boost::unordered_[multi]set`, `boost::unordered_[multi]map`, `boost::unordered_node_set`, `boost::unordered_node_map`),
but they will be when rehashing occurs for
`boost::unordered_flat_set` and `boost::unordered_flat_map`: this is because
these containers store elements directly into their holding buckets, so
when allocating a new bucket array the elements must be transferred by means of move construction.
In a similar manner to using `reserve` for ``vector``s, it can be a good idea
to call `reserve` before inserting a large number of elements. This will get
the expensive rehashing out of the way and let you store iterators, safe in
the knowledge that they won't be invalidated. If you are inserting `n`
elements into container `x`, you could first call:
```
x.reserve(n);
```
Note:: `reserve(n)` reserves space for at least `n` elements, allocating enough buckets
so as to not exceed the maximum load factor.
+
Because the maximum load factor is defined as the number of elements divided by the total
number of available buckets, this function is logically equivalent to:
+
```
x.rehash(std::ceil(n / x.max_load_factor()))
```
+
See the <<unordered_map_rehash,reference for more details>> on the `rehash` function.
[#comparison]
:idprefix: comparison_
= Comparison with Associative Containers
== Comparison with Associative Containers
[caption=, title='Table {counter:table-counter} Interface differences']
[cols="1,1", frame=all, grid=rows]
@ -32,7 +123,7 @@
|`iterator`, `const_iterator` are of at least the forward category.
|Iterators, pointers and references to the container's elements are never invalidated.
|<<buckets_iterator_invalidation,Iterators can be invalidated by calls to insert or rehash>>. +
|<<regular_iterator_invalidation,Iterators can be invalidated by calls to insert or rehash>>. +
**Node-based containers:** Pointers and references to the container's elements are never invalidated. +
**Flat containers:** Pointers and references to the container's elements are invalidated when rehashing occurs.

View File

@ -0,0 +1,179 @@
[#structures]
= Data Structures
:idprefix: structures_
== Closed-addressing Containers
++++
<style>
.imageblock > .title {
text-align: inherit;
}
</style>
++++
Boost.Unordered sports one of the fastest implementations of closed addressing, also commonly known as https://en.wikipedia.org/wiki/Hash_table#Separate_chaining[separate chaining]. An example figure representing the data structure is below:
[#img-bucket-groups,.text-center]
.A simple bucket group approach
image::bucket-groups.png[align=center]
An array of "buckets" is allocated and each bucket in turn points to its own individual linked list. This makes meeting the standard requirements of bucket iteration straight-forward. Unfortunately, iteration of the entire container is often times slow using this layout as each bucket must be examined for occupancy, yielding a time complexity of `O(bucket_count() + size())` when the standard requires complexity to be `O(size())`.
Canonical standard implementations will wind up looking like the diagram below:
[.text-center]
.The canonical standard approach
image::singly-linked.png[align=center,link=../diagrams/singly-linked.png,window=_blank]
It's worth noting that this approach is only used by pass:[libc++] and pass:[libstdc++]; the MSVC Dinkumware implementation uses a different one. A more detailed analysis of the standard containers can be found http://bannalia.blogspot.com/2013/10/implementation-of-c-unordered.html[here].
This unusually laid out data structure is chosen to make iteration of the entire container efficient by inter-connecting all of the nodes into a singly-linked list. One might also notice that buckets point to the node _before_ the start of the bucket's elements. This is done so that removing elements from the list can be done efficiently without introducing the need for a doubly-linked list. Unfortunately, this data structure introduces a guaranteed extra indirection. For example, to access the first element of a bucket, something like this must be done:
```c++
auto const idx = get_bucket_idx(hash_function(key));
node* p = buckets[idx]; // first load
node* n = p->next; // second load
if (n && is_in_bucket(n, idx)) {
value_type const& v = *n; // third load
// ...
}
```
With a simple bucket group layout, this is all that must be done:
```c++
auto const idx = get_bucket_idx(hash_function(key));
node* n = buckets[idx]; // first load
if (n) {
value_type const& v = *n; // second load
// ...
}
```
In practice, the extra indirection can have a dramatic performance impact to common operations such as `insert`, `find` and `erase`. But to keep iteration of the container fast, Boost.Unordered introduces a novel data structure, a "bucket group". A bucket group is a fixed-width view of a subsection of the buckets array. It contains a bitmask (a `std::size_t`) which it uses to track occupancy of buckets and contains two pointers so that it can form a doubly-linked list with non-empty groups. An example diagram is below:
[#img-fca-layout]
.The new layout used by Boost
image::fca.png[align=center]
Thus container-wide iteration is turned into traversing the non-empty bucket groups (an operation with constant time complexity) which reduces the time complexity back to `O(size())`. In total, a bucket group is only 4 words in size and it views `sizeof(std::size_t) * CHAR_BIT` buckets meaning that for all common implementations, there's only 4 bits of space overhead per bucket introduced by the bucket groups.
A more detailed description of Boost.Unordered's closed-addressing implementation is
given in an
https://bannalia.blogspot.com/2022/06/advancing-state-of-art-for.html[external article].
For more information on implementation rationale, read the
xref:#rationale_closed_addressing_containers[corresponding section].
== Open-addressing Containers
The diagram shows the basic internal layout of `boost::unordered_flat_map`/`unordered_node_map` and
`boost:unordered_flat_set`/`unordered_node_set`.
[#img-foa-layout]
.Open-addressing layout used by Boost.Unordered.
image::foa.png[align=center]
As with all open-addressing containers, elements (or pointers to the element nodes in the case of
`boost::unordered_node_map` and `boost::unordered_node_set`) are stored directly in the bucket array.
This array is logically divided into 2^_n_^ _groups_ of 15 elements each.
In addition to the bucket array, there is an associated _metadata array_ with 2^_n_^
16-byte words.
[#img-foa-metadata]
.Breakdown of a metadata word.
image::foa-metadata.png[align=center]
A metadata word is divided into 15 _h_~_i_~ bytes (one for each associated
bucket), and an _overflow byte_ (_ofw_ in the diagram). The value of _h_~_i_~ is:
- 0 if the corresponding bucket is empty.
- 1 to encode a special empty bucket called a _sentinel_, which is used internally to
stop iteration when the container has been fully traversed.
- If the bucket is occupied, a _reduced hash value_ obtained from the hash value of
the element.
When looking for an element with hash value _h_, SIMD technologies such as
https://en.wikipedia.org/wiki/SSE2[SSE2] and
https://en.wikipedia.org/wiki/ARM_architecture_family#Advanced_SIMD_(Neon)[Neon] allow us
to very quickly inspect the full metadata word and look for the reduced value of _h_ among all the
15 buckets with just a handful of CPU instructions: non-matching buckets can be
readily discarded, and those whose reduced hash value matches need be inspected via full
comparison with the corresponding element. If the looked-for element is not present,
the overflow byte is inspected:
- If the bit in the position _h_ mod 8 is zero, lookup terminates (and the
element is not present).
- If the bit is set to 1 (the group has been _overflowed_), further groups are
checked using https://en.wikipedia.org/wiki/Quadratic_probing[_quadratic probing_], and
the process is repeated.
Insertion is algorithmically similar: empty buckets are located using SIMD,
and when going past a full group its corresponding overflow bit is set to 1.
In architectures without SIMD support, the logical layout stays the same, but the metadata
word is codified using a technique we call _bit interleaving_: this layout allows us
to emulate SIMD with reasonably good performance using only standard arithmetic and
logical operations.
[#img-foa-metadata-interleaving]
.Bit-interleaved metadata word.
image::foa-metadata-interleaving.png[align=center]
A more detailed description of Boost.Unordered's open-addressing implementation is
given in an
https://bannalia.blogspot.com/2022/11/inside-boostunorderedflatmap.html[external article].
For more information on implementation rationale, read the
xref:#rationale_open_addresing_containers[corresponding section].
== Concurrent Containers
`boost::concurrent_flat_map` uses the basic
xref:#structures_open_addressing_containers[open-addressing layout] described above
augmented with synchronization mechanisms.
[#img-cfoa-layout]
.Concurrent open-addressing layout used by Boost.Unordered.
image::cfoa.png[align=center]
Two levels of synchronization are used:
* Container level: A read-write mutex is used to control access from any operation
to the container. Typically, such access is in read mode (that is, concurrent) even
for modifying operations, so for most practical purposes there is no thread
contention at this level. Access is only in write mode (blocking) when rehashing or
performing container-wide operations such as swapping or assignment.
* Group level: Each 15-slot group is equipped with an 8-byte word containing:
** A read-write spinlock for synchronized access to any element in the group.
** An atomic _insertion counter_ used for optimistic insertion as described
below.
By using atomic operations to access the group metadata, lookup is (group-level)
lock-free up to the point where an actual comparison needs to be done with an element
that has been previously SIMD-matched: only then it's the group's spinlock used.
Insertion uses the following _optimistic algorithm_:
* The value of the insertion counter for the initial group in the probe
sequence is locally recorded (let's call this value `c0`).
* Lookup is as described above. If lookup finds no equivalent element,
search for an available slot for insertion successively locks/unlocks
each group in the probing sequence.
* When an available slot is located, it is preemptively occupied (its
reduced hash value is set) and the insertion counter is atomically
incremented: if no other thread has incremented the counter during the
whole operation (which is checked by comparing with `c0`), then we're
good to go and complete the insertion, otherwise we roll back and start
over.
This algorithm has very low contention both at the lookup and actual
insertion phases in exchange for the possibility that computations have
to be started over if some other thread interferes in the process by
performing a succesful insertion beginning at the same group. In
practice, the start-over frequency is extremely small, measured in the range
of parts per million for some of our benchmarks.
For more information on implementation rationale, read the
xref:#rationale_concurrent_containers[corresponding section].

View File

@ -1,5 +1,5 @@
[#unordered_flat_map]
== Class template unordered_flat_map
== Class Template unordered_flat_map
:idprefix: unordered_flat_map_
@ -280,6 +280,7 @@ namespace boost {
unordered_flat_map<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class T, class H, class P, class A, class Predicate>
typename unordered_flat_map<K, T, H, P, A>::size_type
xref:#unordered_flat_map_erase_if[erase_if](unordered_flat_map<K, T, H, P, A>& c, Predicate pred);
@ -859,7 +860,7 @@ void insert(std::initializer_list<value_type>);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into the container from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, pointers and references, but only if the insert causes the load to be greater than the maximum load.
@ -875,7 +876,7 @@ template<class K, class... Args>
std::pair<iterator, bool> try_emplace(K&& k, Args&&... args);
```
Inserts a new node into the container if there is no existing element with key `k` contained within it.
Inserts a new element into the container if there is no existing element with key `k` contained within it.
If there is an existing element with key `k` this function does nothing.
@ -904,7 +905,7 @@ unlike xref:#unordered_flat_map_emplace[emplace], which simply forwards all argu
Can invalidate iterators pointers and references, but only if the insert causes the load to be greater than the maximum load.
The `template <class K, class... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K, class\... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
--
@ -920,7 +921,7 @@ template<class K, class... Args>
iterator try_emplace(const_iterator hint, K&& k, Args&&... args);
```
Inserts a new node into the container if there is no existing element with key `k` contained within it.
Inserts a new element into the container if there is no existing element with key `k` contained within it.
If there is an existing element with key `k` this function does nothing.
@ -949,7 +950,7 @@ unlike xref:#unordered_flat_map_emplace_hint[emplace_hint], which simply forward
Can invalidate iterators pointers and references, but only if the insert causes the load to be greater than the maximum load.
The `template <class K, class... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K, class\... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
--
@ -1160,7 +1161,7 @@ template<class K>
[horizontal]
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `end()` if no such element exists.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1173,7 +1174,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1186,7 +1187,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1202,7 +1203,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_flat_set]
== Class template unordered_flat_set
== Class Template unordered_flat_set
:idprefix: unordered_flat_set_
@ -234,6 +234,7 @@ namespace boost {
unordered_flat_set<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class T, class H, class P, class A, class Predicate>
typename unordered_flat_set<K, T, H, P, A>::size_type
xref:#unordered_flat_set_erase_if[erase_if](unordered_flat_set<K, T, H, P, A>& c, Predicate pred);
@ -837,7 +838,7 @@ void insert(std::initializer_list<value_type>);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into the container from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, pointers and references, but only if the insert causes the load to be greater than the maximum load.
@ -971,7 +972,7 @@ template<class K>
[horizontal]
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `end()` if no such element exists.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -984,7 +985,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -997,7 +998,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1013,7 +1014,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_map]
== Class template unordered_map
== Class Template unordered_map
:idprefix: unordered_map_
@ -286,6 +286,7 @@ namespace boost {
unordered_map<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class T, class H, class P, class A, class Predicate>
typename unordered_map<K, T, H, P, A>::size_type
xref:#unordered_map_erase_if[erase_if](unordered_map<K, T, H, P, A>& c, Predicate pred);
@ -995,7 +996,7 @@ void insert(std::initializer_list<value_type>);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into `X` from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, but only if the insert causes the load factor to be greater to or equal to the maximum load factor. +
+
@ -1009,11 +1010,11 @@ template<class... Args>
std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args);
template<class... Args>
std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args);
template <class K, class... Args>
template<class K, class... Args>
std::pair<iterator, bool> try_emplace(K&& k, Args&&... args)
```
Inserts a new node into the container if there is no existing element with key `k` contained within it.
Inserts a new element into the container if there is no existing element with key `k` contained within it.
If there is an existing element with key `k` this function does nothing.
@ -1043,7 +1044,7 @@ Can invalidate iterators, but only if the insert causes the load factor to be gr
Pointers and references to elements are never invalidated.
The `template <class K, class... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K, class\... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
If the compiler doesn't support variadic template arguments or rvalue references, this is emulated for up to `10` arguments, with no support for rvalue references or move semantics.
@ -1062,7 +1063,7 @@ template<class K, class... Args>
iterator try_emplace(const_iterator hint, K&& k, Args&&... args);
```
Inserts a new node into the container if there is no existing element with key `k` contained within it.
Inserts a new element into the container if there is no existing element with key `k` contained within it.
If there is an existing element with key `k` this function does nothing.
@ -1094,7 +1095,7 @@ Can invalidate iterators, but only if the insert causes the load factor to be gr
Pointers and references to elements are never invalidated.
The `template <class K, class... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K, class\... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
If the compiler doesn't support variadic template arguments or rvalue references, this is emulated for up to `10` arguments, with no support for rvalue references or move semantics.
@ -1466,7 +1467,7 @@ template<typename CompatibleKey, typename CompatibleHash, typename CompatiblePre
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `b.end()` if no such element exists.
Notes:;; The templated overloads containing `CompatibleKey`, `CompatibleHash` and `CompatiblePredicate` are non-standard extensions which allow you to use a compatible hash function and equality predicate for a key of a different type in order to avoid an expensive type cast. In general, its use is not encouraged and instead the `K` member function templates should be used. +
+
The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1479,7 +1480,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1492,7 +1493,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1508,7 +1509,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_multimap]
== Class template unordered_multimap
== Class Template unordered_multimap
:idprefix: unordered_multimap_
@ -253,6 +253,7 @@ namespace boost {
unordered_multimap<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class T, class H, class P, class A, class Predicate>
typename unordered_multimap<K, T, H, P, A>::size_type
xref:#unordered_multimap_erase_if[erase_if](unordered_multimap<K, T, H, P, A>& c, Predicate pred);
@ -941,7 +942,7 @@ void insert(std::initializer_list<value_type> il);
Inserts a range of elements into the container.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into `X` from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, but only if the insert causes the load factor to be greater to or equal to the maximum load factor. +
+
@ -1223,7 +1224,7 @@ template<typename CompatibleKey, typename CompatibleHash, typename CompatiblePre
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `b.end()` if no such element exists.
Notes:;; The templated overloads containing `CompatibleKey`, `CompatibleHash` and `CompatiblePredicate` are non-standard extensions which allow you to use a compatible hash function and equality predicate for a key of a different type in order to avoid an expensive type cast. In general, its use is not encouraged and instead the `K` member function templates should be used. +
+
The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1236,7 +1237,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1249,7 +1250,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1265,7 +1266,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_multiset]
== Class template unordered_multiset
== Class Template unordered_multiset
:idprefix: unordered_multiset_
@ -244,6 +244,7 @@ namespace boost {
unordered_multiset<Key, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class H, class P, class A, class Predicate>
typename unordered_multiset<K, H, P, A>::size_type
xref:#unordered_multiset_erase_if[erase_if](unordered_multiset<K, H, P, A>& c, Predicate pred);
@ -899,7 +900,7 @@ void insert(std::initializer_list<value_type> il);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into `X` from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, but only if the insert causes the load factor to be greater to or equal to the maximum load factor. +
+
@ -1181,7 +1182,7 @@ template<typename CompatibleKey, typename CompatibleHash, typename CompatiblePre
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `b.end()` if no such element exists.
Notes:;; The templated overloads containing `CompatibleKey`, `CompatibleHash` and `CompatiblePredicate` are non-standard extensions which allow you to use a compatible hash function and equality predicate for a key of a different type in order to avoid an expensive type cast. In general, its use is not encouraged and instead the `K` member function templates should be used. +
+
The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1194,7 +1195,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1207,7 +1208,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1223,7 +1224,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_node_map]
== Class template unordered_node_map
== Class Template unordered_node_map
:idprefix: unordered_node_map_
@ -284,6 +284,7 @@ namespace boost {
unordered_node_map<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class T, class H, class P, class A, class Predicate>
typename unordered_node_map<K, T, H, P, A>::size_type
xref:#unordered_node_map_erase_if[erase_if](unordered_node_map<K, T, H, P, A>& c, Predicate pred);
@ -893,7 +894,7 @@ void insert(std::initializer_list<value_type>);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into the container from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, but only if the insert causes the load to be greater than the maximum load.
@ -945,7 +946,7 @@ template<class K, class... Args>
std::pair<iterator, bool> try_emplace(K&& k, Args&&... args);
```
Inserts a new node into the container if there is no existing element with key `k` contained within it.
Inserts a new element into the container if there is no existing element with key `k` contained within it.
If there is an existing element with key `k` this function does nothing.
@ -974,7 +975,7 @@ unlike xref:#unordered_node_map_emplace[emplace], which simply forwards all argu
Can invalidate iterators, but only if the insert causes the load to be greater than the maximum load.
The `template <class K, class... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K, class\... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
--
@ -990,7 +991,7 @@ template<class K, class... Args>
iterator try_emplace(const_iterator hint, K&& k, Args&&... args);
```
Inserts a new node into the container if there is no existing element with key `k` contained within it.
Inserts a new element into the container if there is no existing element with key `k` contained within it.
If there is an existing element with key `k` this function does nothing.
@ -1019,7 +1020,7 @@ unlike xref:#unordered_node_map_emplace_hint[emplace_hint], which simply forward
Can invalidate iterators, but only if the insert causes the load to be greater than the maximum load.
The `template <class K, class... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K, class\... Args>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs and neither `iterator` nor `const_iterator` are implicitly convertible from `K`. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
--
@ -1258,7 +1259,7 @@ template<class K>
[horizontal]
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `end()` if no such element exists.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1271,7 +1272,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1284,7 +1285,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1300,7 +1301,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_node_set]
== Class template unordered_node_set
== Class Template unordered_node_set
:idprefix: unordered_node_set_
@ -238,6 +238,7 @@ namespace boost {
unordered_node_set<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class T, class H, class P, class A, class Predicate>
typename unordered_node_set<K, T, H, P, A>::size_type
xref:#unordered_node_set_erase_if[erase_if](unordered_node_set<K, T, H, P, A>& c, Predicate pred);
@ -874,7 +875,7 @@ void insert(std::initializer_list<value_type>);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into the container from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, but only if the insert causes the load to be greater than the maximum load.
@ -1072,7 +1073,7 @@ template<class K>
[horizontal]
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `end()` if no such element exists.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1085,7 +1086,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1098,7 +1099,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1114,7 +1115,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -1,5 +1,5 @@
[#unordered_set]
== Class template unordered_set
== Class Template unordered_set
:idprefix: unordered_set_
@ -245,6 +245,7 @@ namespace boost {
unordered_set<Key, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
// Erasure
template<class K, class H, class P, class A, class Predicate>
typename unordered_set<K, H, P, A>::size_type
xref:#unordered_set_erase_if[erase_if](unordered_set<K, H, P, A>& c, Predicate pred);
@ -959,7 +960,7 @@ void insert(std::initializer_list<value_type>);
Inserts a range of elements into the container. Elements are inserted if and only if there is no element in the container with an equivalent key.
[horizontal]
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/EmplaceConstructible[EmplaceConstructible^] into `X` from `*first`.
Requires:;; `value_type` is https://en.cppreference.com/w/cpp/named_req/CopyInsertable[CopyInsertable^] into the container.
Throws:;; When inserting a single element, if an exception is thrown by an operation other than a call to `hasher` the function has no effect.
Notes:;; Can invalidate iterators, but only if the insert causes the load factor to be greater to or equal to the maximum load factor. +
+
@ -1248,7 +1249,7 @@ template<typename CompatibleKey, typename CompatibleHash, typename CompatiblePre
Returns:;; An iterator pointing to an element with key equivalent to `k`, or `b.end()` if no such element exists.
Notes:;; The templated overloads containing `CompatibleKey`, `CompatibleHash` and `CompatiblePredicate` are non-standard extensions which allow you to use a compatible hash function and equality predicate for a key of a different type in order to avoid an expensive type cast. In general, its use is not encouraged and instead the `K` member function templates should be used. +
+
The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1261,7 +1262,7 @@ template<class K>
[horizontal]
Returns:;; The number of elements with key equivalent to `k`.
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1274,7 +1275,7 @@ template<class K>
[horizontal]
Returns:;; A boolean indicating whether or not there is an element with key equal to `key` in the container
Notes:;; The `template <typename K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overload only participates in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---
@ -1290,7 +1291,7 @@ template<class K>
[horizontal]
Returns:;; A range containing all elements with key equivalent to `k`. If the container doesn't contain any such elements, returns `std::make_pair(b.end(), b.end())`.
Notes:;; The `template <typename K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
Notes:;; The `template<class K>` overloads only participate in overload resolution if `Hash::is_transparent` and `Pred::is_transparent` are valid member typedefs. The library assumes that `Hash` is callable with both `K` and `Key` and that `Pred` is transparent. This enables heterogeneous lookup which avoids the cost of instantiating an instance of the `Key` type.
---

View File

@ -0,0 +1,818 @@
/* Fast open-addressing concurrent hash table.
*
* Copyright 2023 Christian Mazakas.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#ifndef BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP
#define BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP
#include <boost/unordered/concurrent_flat_map_fwd.hpp>
#include <boost/unordered/detail/foa/concurrent_table.hpp>
#include <boost/unordered/detail/foa/flat_map_types.hpp>
#include <boost/unordered/detail/type_traits.hpp>
#include <boost/container_hash/hash.hpp>
#include <boost/core/allocator_access.hpp>
#include <boost/mp11/algorithm.hpp>
#include <boost/mp11/list.hpp>
#include <boost/type_traits/type_identity.hpp>
#include <functional>
#include <type_traits>
#include <utility>
#define BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F) \
static_assert(boost::unordered::detail::is_invocable<F, value_type&>::value, \
"The provided Callable must be invocable with value_type&");
#define BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F) \
static_assert( \
boost::unordered::detail::is_invocable<F, value_type const&>::value, \
"The provided Callable must be invocable with value_type const&");
#if BOOST_CXX_VERSION >= 202002L
#define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \
static_assert(!std::is_base_of<std::execution::parallel_unsequenced_policy, \
ExecPolicy>::value, \
"ExecPolicy must be sequenced."); \
static_assert( \
!std::is_base_of<std::execution::unsequenced_policy, ExecPolicy>::value, \
"ExecPolicy must be sequenced.");
#else
#define BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(P) \
static_assert(!std::is_base_of<std::execution::parallel_unsequenced_policy, \
ExecPolicy>::value, \
"ExecPolicy must be sequenced.");
#endif
#define BOOST_UNORDERED_COMMA ,
#define BOOST_UNORDERED_LAST_ARG(Arg, Args) \
mp11::mp_back<mp11::mp_list<Arg BOOST_UNORDERED_COMMA Args> >
#define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args) \
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(BOOST_UNORDERED_LAST_ARG(Arg, Args))
#define BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args) \
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE( \
BOOST_UNORDERED_LAST_ARG(Arg, Args))
namespace boost {
namespace unordered {
namespace detail {
template <class F, class... Args>
struct is_invocable
: std::is_constructible<std::function<void(Args...)>,
std::reference_wrapper<typename std::remove_reference<F>::type> >
{
};
} // namespace detail
template <class Key, class T, class Hash, class Pred, class Allocator>
class concurrent_flat_map
{
private:
template <class Key2, class T2, class Hash2, class Pred2,
class Allocator2>
friend class concurrent_flat_map;
using type_policy = detail::foa::flat_map_types<Key, T>;
detail::foa::concurrent_table<type_policy, Hash, Pred, Allocator> table_;
template <class K, class V, class H, class KE, class A>
bool friend operator==(concurrent_flat_map<K, V, H, KE, A> const& lhs,
concurrent_flat_map<K, V, H, KE, A> const& rhs);
template <class K, class V, class H, class KE, class A, class Predicate>
friend typename concurrent_flat_map<K, V, H, KE, A>::size_type erase_if(
concurrent_flat_map<K, V, H, KE, A>& set, Predicate pred);
public:
using key_type = Key;
using mapped_type = T;
using value_type = typename type_policy::value_type;
using init_type = typename type_policy::init_type;
using size_type = std::size_t;
using difference_type = std::ptrdiff_t;
using hasher = typename boost::type_identity<Hash>::type;
using key_equal = typename boost::type_identity<Pred>::type;
using allocator_type = typename boost::type_identity<Allocator>::type;
using reference = value_type&;
using const_reference = value_type const&;
using pointer = typename boost::allocator_pointer<allocator_type>::type;
using const_pointer =
typename boost::allocator_const_pointer<allocator_type>::type;
concurrent_flat_map()
: concurrent_flat_map(detail::foa::default_bucket_count)
{
}
explicit concurrent_flat_map(size_type n, const hasher& hf = hasher(),
const key_equal& eql = key_equal(),
const allocator_type& a = allocator_type())
: table_(n, hf, eql, a)
{
}
template <class InputIterator>
concurrent_flat_map(InputIterator f, InputIterator l,
size_type n = detail::foa::default_bucket_count,
const hasher& hf = hasher(), const key_equal& eql = key_equal(),
const allocator_type& a = allocator_type())
: table_(n, hf, eql, a)
{
this->insert(f, l);
}
concurrent_flat_map(concurrent_flat_map const& rhs)
: table_(rhs.table_,
boost::allocator_select_on_container_copy_construction(
rhs.get_allocator()))
{
}
concurrent_flat_map(concurrent_flat_map&& rhs)
: table_(std::move(rhs.table_))
{
}
template <class InputIterator>
concurrent_flat_map(
InputIterator f, InputIterator l, allocator_type const& a)
: concurrent_flat_map(f, l, 0, hasher(), key_equal(), a)
{
}
explicit concurrent_flat_map(allocator_type const& a)
: table_(detail::foa::default_bucket_count, hasher(), key_equal(), a)
{
}
concurrent_flat_map(
concurrent_flat_map const& rhs, allocator_type const& a)
: table_(rhs.table_, a)
{
}
concurrent_flat_map(concurrent_flat_map&& rhs, allocator_type const& a)
: table_(std::move(rhs.table_), a)
{
}
concurrent_flat_map(std::initializer_list<value_type> il,
size_type n = detail::foa::default_bucket_count,
const hasher& hf = hasher(), const key_equal& eql = key_equal(),
const allocator_type& a = allocator_type())
: concurrent_flat_map(n, hf, eql, a)
{
this->insert(il.begin(), il.end());
}
concurrent_flat_map(size_type n, const allocator_type& a)
: concurrent_flat_map(n, hasher(), key_equal(), a)
{
}
concurrent_flat_map(
size_type n, const hasher& hf, const allocator_type& a)
: concurrent_flat_map(n, hf, key_equal(), a)
{
}
template <typename InputIterator>
concurrent_flat_map(
InputIterator f, InputIterator l, size_type n, const allocator_type& a)
: concurrent_flat_map(f, l, n, hasher(), key_equal(), a)
{
}
template <typename InputIterator>
concurrent_flat_map(InputIterator f, InputIterator l, size_type n,
const hasher& hf, const allocator_type& a)
: concurrent_flat_map(f, l, n, hf, key_equal(), a)
{
}
concurrent_flat_map(
std::initializer_list<value_type> il, const allocator_type& a)
: concurrent_flat_map(
il, detail::foa::default_bucket_count, hasher(), key_equal(), a)
{
}
concurrent_flat_map(std::initializer_list<value_type> il, size_type n,
const allocator_type& a)
: concurrent_flat_map(il, n, hasher(), key_equal(), a)
{
}
concurrent_flat_map(std::initializer_list<value_type> il, size_type n,
const hasher& hf, const allocator_type& a)
: concurrent_flat_map(il, n, hf, key_equal(), a)
{
}
~concurrent_flat_map() = default;
concurrent_flat_map& operator=(concurrent_flat_map const& rhs)
{
table_ = rhs.table_;
return *this;
}
concurrent_flat_map& operator=(concurrent_flat_map&& rhs)
noexcept(boost::allocator_is_always_equal<Allocator>::type::value ||
boost::allocator_propagate_on_container_move_assignment<
Allocator>::type::value)
{
table_ = std::move(rhs.table_);
return *this;
}
concurrent_flat_map& operator=(std::initializer_list<value_type> ilist)
{
table_ = ilist;
return *this;
}
/// Capacity
///
size_type size() const noexcept { return table_.size(); }
size_type max_size() const noexcept { return table_.max_size(); }
BOOST_ATTRIBUTE_NODISCARD bool empty() const noexcept
{
return size() == 0;
}
template <class F>
BOOST_FORCEINLINE size_type visit(key_type const& k, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
return table_.visit(k, f);
}
template <class F>
BOOST_FORCEINLINE size_type visit(key_type const& k, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit(k, f);
}
template <class F>
BOOST_FORCEINLINE size_type cvisit(key_type const& k, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit(k, f);
}
template <class K, class F>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, size_type>::type
visit(K&& k, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
return table_.visit(std::forward<K>(k), f);
}
template <class K, class F>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, size_type>::type
visit(K&& k, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit(std::forward<K>(k), f);
}
template <class K, class F>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, size_type>::type
cvisit(K&& k, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit(std::forward<K>(k), f);
}
template <class F> size_type visit_all(F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
return table_.visit_all(f);
}
template <class F> size_type visit_all(F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.visit_all(f);
}
template <class F> size_type cvisit_all(F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.cvisit_all(f);
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
void>::type
visit_all(ExecPolicy&& p, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
table_.visit_all(p, f);
}
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
void>::type
visit_all(ExecPolicy&& p, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
table_.visit_all(p, f);
}
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
void>::type
cvisit_all(ExecPolicy&& p, F f) const
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
table_.cvisit_all(p, f);
}
#endif
/// Modifiers
///
template <class Ty>
BOOST_FORCEINLINE auto insert(Ty&& value)
-> decltype(table_.insert(std::forward<Ty>(value)))
{
return table_.insert(std::forward<Ty>(value));
}
BOOST_FORCEINLINE bool insert(init_type&& obj)
{
return table_.insert(std::move(obj));
}
template <class InputIterator>
void insert(InputIterator begin, InputIterator end)
{
for (auto pos = begin; pos != end; ++pos) {
table_.emplace(*pos);
}
}
void insert(std::initializer_list<value_type> ilist)
{
this->insert(ilist.begin(), ilist.end());
}
template <class M>
BOOST_FORCEINLINE bool insert_or_assign(key_type const& k, M&& obj)
{
return table_.try_emplace_or_visit(k, std::forward<M>(obj),
[&](value_type& m) { m.second = std::forward<M>(obj); });
}
template <class M>
BOOST_FORCEINLINE bool insert_or_assign(key_type&& k, M&& obj)
{
return table_.try_emplace_or_visit(std::move(k), std::forward<M>(obj),
[&](value_type& m) { m.second = std::forward<M>(obj); });
}
template <class K, class M>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, bool>::type
insert_or_assign(K&& k, M&& obj)
{
return table_.try_emplace_or_visit(std::forward<K>(k),
std::forward<M>(obj),
[&](value_type& m) { m.second = std::forward<M>(obj); });
}
template <class Ty, class F>
BOOST_FORCEINLINE auto insert_or_visit(Ty&& value, F f)
-> decltype(table_.insert_or_visit(std::forward<Ty>(value), f))
{
return table_.insert_or_visit(std::forward<Ty>(value), f);
}
template <class F>
BOOST_FORCEINLINE bool insert_or_visit(init_type&& obj, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
return table_.insert_or_visit(std::move(obj), f);
}
template <class InputIterator, class F>
void insert_or_visit(InputIterator first, InputIterator last, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
for (; first != last; ++first) {
table_.emplace_or_visit(*first, f);
}
}
template <class F>
void insert_or_visit(std::initializer_list<value_type> ilist, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE(F)
this->insert_or_visit(ilist.begin(), ilist.end(), f);
}
template <class Ty, class F>
BOOST_FORCEINLINE auto insert_or_cvisit(Ty&& value, F f)
-> decltype(table_.insert_or_cvisit(std::forward<Ty>(value), f))
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.insert_or_cvisit(std::forward<Ty>(value), f);
}
template <class F>
BOOST_FORCEINLINE bool insert_or_cvisit(init_type&& obj, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
return table_.insert_or_cvisit(std::move(obj), f);
}
template <class InputIterator, class F>
void insert_or_cvisit(InputIterator first, InputIterator last, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
for (; first != last; ++first) {
table_.emplace_or_cvisit(*first, f);
}
}
template <class F>
void insert_or_cvisit(std::initializer_list<value_type> ilist, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE(F)
this->insert_or_visit(ilist.begin(), ilist.end(), f);
}
template <class... Args> BOOST_FORCEINLINE bool emplace(Args&&... args)
{
return table_.emplace(std::forward<Args>(args)...);
}
template <class Arg, class... Args>
BOOST_FORCEINLINE bool emplace_or_visit(Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args...)
return table_.emplace_or_visit(
std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class Arg, class... Args>
BOOST_FORCEINLINE bool emplace_or_cvisit(Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...)
return table_.emplace_or_cvisit(
std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class... Args>
BOOST_FORCEINLINE bool try_emplace(key_type const& k, Args&&... args)
{
return table_.try_emplace(k, std::forward<Args>(args)...);
}
template <class... Args>
BOOST_FORCEINLINE bool try_emplace(key_type&& k, Args&&... args)
{
return table_.try_emplace(std::move(k), std::forward<Args>(args)...);
}
template <class K, class... Args>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, bool>::type
try_emplace(K&& k, Args&&... args)
{
return table_.try_emplace(
std::forward<K>(k), std::forward<Args>(args)...);
}
template <class Arg, class... Args>
BOOST_FORCEINLINE bool try_emplace_or_visit(
key_type const& k, Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args...)
return table_.try_emplace_or_visit(
k, std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class Arg, class... Args>
BOOST_FORCEINLINE bool try_emplace_or_cvisit(
key_type const& k, Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...)
return table_.try_emplace_or_cvisit(
k, std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class Arg, class... Args>
BOOST_FORCEINLINE bool try_emplace_or_visit(
key_type&& k, Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args...)
return table_.try_emplace_or_visit(
std::move(k), std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class Arg, class... Args>
BOOST_FORCEINLINE bool try_emplace_or_cvisit(
key_type&& k, Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...)
return table_.try_emplace_or_cvisit(
std::move(k), std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class K, class Arg, class... Args>
BOOST_FORCEINLINE bool try_emplace_or_visit(
K&& k, Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE(Arg, Args...)
return table_.try_emplace_or_visit(std::forward<K>(k),
std::forward<Arg>(arg), std::forward<Args>(args)...);
}
template <class K, class Arg, class... Args>
BOOST_FORCEINLINE bool try_emplace_or_cvisit(
K&& k, Arg&& arg, Args&&... args)
{
BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE(Arg, Args...)
return table_.try_emplace_or_cvisit(std::forward<K>(k),
std::forward<Arg>(arg), std::forward<Args>(args)...);
}
BOOST_FORCEINLINE size_type erase(key_type const& k)
{
return table_.erase(k);
}
template <class K>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, size_type>::type
erase(K&& k)
{
return table_.erase(std::forward<K>(k));
}
template <class F>
BOOST_FORCEINLINE size_type erase_if(key_type const& k, F f)
{
return table_.erase_if(k, f);
}
template <class K, class F>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value &&
!detail::is_execution_policy<K>::value,
size_type>::type
erase_if(K&& k, F f)
{
return table_.erase_if(std::forward<K>(k), f);
}
#if defined(BOOST_UNORDERED_PARALLEL_ALGORITHMS)
template <class ExecPolicy, class F>
typename std::enable_if<detail::is_execution_policy<ExecPolicy>::value,
void>::type
erase_if(ExecPolicy&& p, F f)
{
BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY(ExecPolicy)
table_.erase_if(p, f);
}
#endif
template <class F> size_type erase_if(F f) { return table_.erase_if(f); }
void swap(concurrent_flat_map& other) noexcept(
boost::allocator_is_always_equal<Allocator>::type::value ||
boost::allocator_propagate_on_container_swap<Allocator>::type::value)
{
return table_.swap(other.table_);
}
void clear() noexcept { table_.clear(); }
template <typename H2, typename P2>
size_type merge(concurrent_flat_map<Key, T, H2, P2, Allocator>& x)
{
BOOST_ASSERT(get_allocator() == x.get_allocator());
return table_.merge(x.table_);
}
template <typename H2, typename P2>
size_type merge(concurrent_flat_map<Key, T, H2, P2, Allocator>&& x)
{
return merge(x);
}
BOOST_FORCEINLINE size_type count(key_type const& k) const
{
return table_.count(k);
}
template <class K>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, size_type>::type
count(K const& k)
{
return table_.count(k);
}
BOOST_FORCEINLINE bool contains(key_type const& k) const
{
return table_.contains(k);
}
template <class K>
BOOST_FORCEINLINE typename std::enable_if<
detail::are_transparent<K, hasher, key_equal>::value, bool>::type
contains(K const& k) const
{
return table_.contains(k);
}
/// Hash Policy
///
size_type bucket_count() const noexcept { return table_.capacity(); }
float load_factor() const noexcept { return table_.load_factor(); }
float max_load_factor() const noexcept
{
return table_.max_load_factor();
};
void max_load_factor(float) {}
size_type max_load() const noexcept { return table_.max_load(); }
void rehash(size_type n) { table_.rehash(n); }
void reserve(size_type n) { table_.reserve(n); }
/// Observers
///
allocator_type get_allocator() const noexcept
{
return table_.get_allocator();
}
hasher hash_function() const { return table_.hash_function(); }
key_equal key_eq() const { return table_.key_eq(); }
};
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
bool operator==(
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs,
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs)
{
return lhs.table_ == rhs.table_;
}
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
bool operator!=(
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs,
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs)
{
return !(lhs == rhs);
}
template <class Key, class T, class Hash, class Pred, class Alloc>
void swap(concurrent_flat_map<Key, T, Hash, Pred, Alloc>& x,
concurrent_flat_map<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)))
{
x.swap(y);
}
template <class K, class T, class H, class P, class A, class Predicate>
typename concurrent_flat_map<K, T, H, P, A>::size_type erase_if(
concurrent_flat_map<K, T, H, P, A>& c, Predicate pred)
{
return c.table_.erase_if(pred);
}
#if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES
template <class InputIterator,
class Hash =
boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >,
class Pred =
std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >,
class Allocator = std::allocator<
boost::unordered::detail::iter_to_alloc_t<InputIterator> >,
class = boost::enable_if_t<detail::is_input_iterator_v<InputIterator> >,
class = boost::enable_if_t<detail::is_hash_v<Hash> >,
class = boost::enable_if_t<detail::is_pred_v<Pred> >,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(InputIterator, InputIterator,
std::size_t = boost::unordered::detail::foa::default_bucket_count,
Hash = Hash(), Pred = Pred(), Allocator = Allocator())
-> concurrent_flat_map<
boost::unordered::detail::iter_key_t<InputIterator>,
boost::unordered::detail::iter_val_t<InputIterator>, Hash, Pred,
Allocator>;
template <class Key, class T,
class Hash = boost::hash<boost::remove_const_t<Key> >,
class Pred = std::equal_to<boost::remove_const_t<Key> >,
class Allocator = std::allocator<std::pair<const Key, T> >,
class = boost::enable_if_t<detail::is_hash_v<Hash> >,
class = boost::enable_if_t<detail::is_pred_v<Pred> >,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(std::initializer_list<std::pair<Key, T> >,
std::size_t = boost::unordered::detail::foa::default_bucket_count,
Hash = Hash(), Pred = Pred(), Allocator = Allocator())
-> concurrent_flat_map<boost::remove_const_t<Key>, T, Hash, Pred,
Allocator>;
template <class InputIterator, class Allocator,
class = boost::enable_if_t<detail::is_input_iterator_v<InputIterator> >,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(InputIterator, InputIterator, std::size_t, Allocator)
-> concurrent_flat_map<
boost::unordered::detail::iter_key_t<InputIterator>,
boost::unordered::detail::iter_val_t<InputIterator>,
boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >,
std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >,
Allocator>;
template <class InputIterator, class Allocator,
class = boost::enable_if_t<detail::is_input_iterator_v<InputIterator> >,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(InputIterator, InputIterator, Allocator)
-> concurrent_flat_map<
boost::unordered::detail::iter_key_t<InputIterator>,
boost::unordered::detail::iter_val_t<InputIterator>,
boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >,
std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >,
Allocator>;
template <class InputIterator, class Hash, class Allocator,
class = boost::enable_if_t<detail::is_hash_v<Hash> >,
class = boost::enable_if_t<detail::is_input_iterator_v<InputIterator> >,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(
InputIterator, InputIterator, std::size_t, Hash, Allocator)
-> concurrent_flat_map<
boost::unordered::detail::iter_key_t<InputIterator>,
boost::unordered::detail::iter_val_t<InputIterator>, Hash,
std::equal_to<boost::unordered::detail::iter_key_t<InputIterator> >,
Allocator>;
template <class Key, class T, class Allocator,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(std::initializer_list<std::pair<Key, T> >, std::size_t,
Allocator) -> concurrent_flat_map<boost::remove_const_t<Key>, T,
boost::hash<boost::remove_const_t<Key> >,
std::equal_to<boost::remove_const_t<Key> >, Allocator>;
template <class Key, class T, class Allocator,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(std::initializer_list<std::pair<Key, T> >, Allocator)
-> concurrent_flat_map<boost::remove_const_t<Key>, T,
boost::hash<boost::remove_const_t<Key> >,
std::equal_to<boost::remove_const_t<Key> >, Allocator>;
template <class Key, class T, class Hash, class Allocator,
class = boost::enable_if_t<detail::is_hash_v<Hash> >,
class = boost::enable_if_t<detail::is_allocator_v<Allocator> > >
concurrent_flat_map(std::initializer_list<std::pair<Key, T> >, std::size_t,
Hash, Allocator) -> concurrent_flat_map<boost::remove_const_t<Key>, T,
Hash, std::equal_to<boost::remove_const_t<Key> >, Allocator>;
#endif
} // namespace unordered
using unordered::concurrent_flat_map;
} // namespace boost
#undef BOOST_UNORDERED_STATIC_ASSERT_INVOCABLE
#undef BOOST_UNORDERED_STATIC_ASSERT_CONST_INVOCABLE
#undef BOOST_UNORDERED_STATIC_ASSERT_EXEC_POLICY
#undef BOOST_UNORDERED_COMMA
#undef BOOST_UNORDERED_LAST_ARG
#undef BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_INVOCABLE
#undef BOOST_UNORDERED_STATIC_ASSERT_LAST_ARG_CONST_INVOCABLE
#endif // BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP

View File

@ -0,0 +1,54 @@
/* Fast open-addressing concurrent hash table.
*
* Copyright 2023 Christian Mazakas.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#ifndef BOOST_UNORDERED_CONCURRENT_FLAT_MAP_FWD_HPP
#define BOOST_UNORDERED_CONCURRENT_FLAT_MAP_FWD_HPP
#include <boost/container_hash/hash_fwd.hpp>
#include <functional>
#include <memory>
namespace boost {
namespace unordered {
template <class Key, class T, class Hash = boost::hash<Key>,
class Pred = std::equal_to<Key>,
class Allocator = std::allocator<std::pair<Key const, T> > >
class concurrent_flat_map;
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
bool operator==(
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs,
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs);
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
bool operator!=(
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs,
concurrent_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs);
template <class Key, class T, class Hash, class Pred, class Alloc>
void swap(concurrent_flat_map<Key, T, Hash, Pred, Alloc>& x,
concurrent_flat_map<Key, T, Hash, Pred, Alloc>& y)
noexcept(noexcept(x.swap(y)));
template <class K, class T, class H, class P, class A, class Predicate>
typename concurrent_flat_map<K, T, H, P, A>::size_type erase_if(
concurrent_flat_map<K, T, H, P, A>& c, Predicate pred);
} // namespace unordered
using boost::unordered::concurrent_flat_map;
using boost::unordered::swap;
using boost::unordered::operator==;
using boost::unordered::operator!=;
} // namespace boost
#endif // BOOST_UNORDERED_CONCURRENT_FLAT_MAP_HPP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_UNORDERED_DETAIL_FOA_FLAT_MAP_TYPES_HPP
#define BOOST_UNORDERED_DETAIL_FOA_FLAT_MAP_TYPES_HPP
#include <boost/core/allocator_access.hpp>
namespace boost {
namespace unordered {
namespace detail {
namespace foa {
template <class Key, class T> struct flat_map_types
{
using key_type = Key;
using raw_key_type = typename std::remove_const<Key>::type;
using raw_mapped_type = typename std::remove_const<T>::type;
using init_type = std::pair<raw_key_type, raw_mapped_type>;
using moved_type = std::pair<raw_key_type&&, raw_mapped_type&&>;
using value_type = std::pair<Key const, T>;
using element_type = value_type;
static value_type& value_from(element_type& x) { return x; }
template <class K, class V>
static raw_key_type const& extract(std::pair<K, V> const& kv)
{
return kv.first;
}
static moved_type move(init_type& x)
{
return {std::move(x.first), std::move(x.second)};
}
static moved_type move(element_type& x)
{
// TODO: we probably need to launder here
return {std::move(const_cast<raw_key_type&>(x.first)),
std::move(const_cast<raw_mapped_type&>(x.second))};
}
template <class A, class... Args>
static void construct(A& al, init_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A> static void destroy(A& al, init_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
};
} // namespace foa
} // namespace detail
} // namespace unordered
} // namespace boost
#endif // BOOST_UNORDERED_DETAIL_FOA_FLAT_MAP_TYPES_HPP

View File

@ -0,0 +1,44 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_UNORDERED_DETAIL_FOA_FLAT_SET_TYPES_HPP
#define BOOST_UNORDERED_DETAIL_FOA_FLAT_SET_TYPES_HPP
#include <boost/core/allocator_access.hpp>
namespace boost {
namespace unordered {
namespace detail {
namespace foa {
template <class Key> struct flat_set_types
{
using key_type = Key;
using init_type = Key;
using value_type = Key;
static Key const& extract(value_type const& key) { return key; }
using element_type = value_type;
static Key& value_from(element_type& x) { return x; }
static element_type&& move(element_type& x) { return std::move(x); }
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
};
} // namespace foa
} // namespace detail
} // namespace unordered
} // namespace boost
#endif // BOOST_UNORDERED_DETAIL_FOA_FLAT_SET_TYPES_HPP

View File

@ -0,0 +1,35 @@
/* Copyright 2023 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#include <boost/config.hpp>
#if defined(BOOST_GCC)
#if !defined(BOOST_UNORDERED_DETAIL_RESTORE_WSHADOW)
/* GCC's -Wshadow triggers at scenarios like this:
*
* struct foo{};
* template<typename Base>
* struct derived:Base
* {
* void f(){int foo;}
* };
*
* derived<foo>x;
* x.f(); // declaration of "foo" in derived::f shadows base type "foo"
*
* This makes shadowing warnings unavoidable in general when a class template
* derives from user-provided classes, as is the case with foa::table_core
* deriving from empty_value.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#else
#pragma GCC diagnostic pop
#endif
#endif

View File

@ -0,0 +1,131 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP
#define BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP
#include <boost/core/allocator_access.hpp>
#include <boost/core/pointer_traits.hpp>
namespace boost {
namespace unordered {
namespace detail {
namespace foa {
template <class Key, class T> struct node_map_types
{
using key_type = Key;
using mapped_type = T;
using raw_key_type = typename std::remove_const<Key>::type;
using raw_mapped_type = typename std::remove_const<T>::type;
using init_type = std::pair<raw_key_type, raw_mapped_type>;
using value_type = std::pair<Key const, T>;
using moved_type = std::pair<raw_key_type&&, raw_mapped_type&&>;
using element_type = foa::element_type<value_type>;
static value_type& value_from(element_type const& x)
{
return *(x.p);
}
template <class K, class V>
static raw_key_type const& extract(std::pair<K, V> const& kv)
{
return kv.first;
}
static raw_key_type const& extract(element_type const& kv)
{
return kv.p->first;
}
static element_type&& move(element_type& x) { return std::move(x); }
static moved_type move(init_type& x)
{
return {std::move(x.first), std::move(x.second)};
}
static moved_type move(value_type& x)
{
return {std::move(const_cast<raw_key_type&>(x.first)),
std::move(const_cast<raw_mapped_type&>(x.second))};
}
template <class A>
static void construct(A&, element_type* p, element_type&& x) noexcept
{
p->p = x.p;
x.p = nullptr;
}
template <class A>
static void construct(
A& al, element_type* p, element_type const& copy)
{
construct(al, p, *copy.p);
}
template <class A, class... Args>
static void construct(A& al, init_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, element_type* p, Args&&... args)
{
p->p = boost::to_address(boost::allocator_allocate(al, 1));
BOOST_TRY
{
boost::allocator_construct(al, p->p, std::forward<Args>(args)...);
}
BOOST_CATCH(...)
{
using pointer_type = typename boost::allocator_pointer<A>::type;
using pointer_traits = boost::pointer_traits<pointer_type>;
boost::allocator_deallocate(
al, pointer_traits::pointer_to(*(p->p)), 1);
BOOST_RETHROW
}
BOOST_CATCH_END
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A> static void destroy(A& al, init_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A>
static void destroy(A& al, element_type* p) noexcept
{
if (p->p) {
using pointer_type = typename boost::allocator_pointer<A>::type;
using pointer_traits = boost::pointer_traits<pointer_type>;
destroy(al, p->p);
boost::allocator_deallocate(
al, pointer_traits::pointer_to(*(p->p)), 1);
}
}
};
} // namespace foa
} // namespace detail
} // namespace unordered
} // namespace boost
#endif // BOOST_UNORDERED_DETAIL_FOA_NODE_MAP_TYPES_HPP

View File

@ -0,0 +1,94 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP
#define BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP
#include <boost/core/allocator_access.hpp>
#include <boost/core/pointer_traits.hpp>
namespace boost {
namespace unordered {
namespace detail {
namespace foa {
template <class Key> struct node_set_types
{
using key_type = Key;
using init_type = Key;
using value_type = Key;
static Key const& extract(value_type const& key) { return key; }
using element_type = foa::element_type<value_type>;
static value_type& value_from(element_type const& x) { return *x.p; }
static Key const& extract(element_type const& k) { return *k.p; }
static element_type&& move(element_type& x) { return std::move(x); }
static value_type&& move(value_type& x) { return std::move(x); }
template <class A>
static void construct(
A& al, element_type* p, element_type const& copy)
{
construct(al, p, *copy.p);
}
template <typename Allocator>
static void construct(
Allocator&, element_type* p, element_type&& x) noexcept
{
p->p = x.p;
x.p = nullptr;
}
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, element_type* p, Args&&... args)
{
p->p = boost::to_address(boost::allocator_allocate(al, 1));
BOOST_TRY
{
boost::allocator_construct(al, p->p, std::forward<Args>(args)...);
}
BOOST_CATCH(...)
{
boost::allocator_deallocate(al,
boost::pointer_traits<typename boost::allocator_pointer<
A>::type>::pointer_to(*p->p),
1);
BOOST_RETHROW
}
BOOST_CATCH_END
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A>
static void destroy(A& al, element_type* p) noexcept
{
if (p->p) {
destroy(al, p->p);
boost::allocator_deallocate(al,
boost::pointer_traits<typename boost::allocator_pointer<
A>::type>::pointer_to(*(p->p)),
1);
}
}
};
} // namespace foa
} // namespace detail
} // namespace unordered
} // namespace boost
#endif // BOOST_UNORDERED_DETAIL_FOA_NODE_SET_TYPES_HPP

View File

@ -0,0 +1,11 @@
/* Copyright 2023 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#define BOOST_UNORDERED_DETAIL_RESTORE_WSHADOW
#include <boost/unordered/detail/foa/ignore_wshadow.hpp>
#undef BOOST_UNORDERED_DETAIL_RESTORE_WSHADOW

View File

@ -0,0 +1,187 @@
#ifndef BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
#define BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED
// Copyright 2023 Peter Dimov
// Distributed under the Boost Software License, Version 1.0.
// https://www.boost.org/LICENSE_1_0.txt
#include <boost/core/yield_primitives.hpp>
#include <atomic>
#include <cstdint>
namespace boost{
namespace unordered{
namespace detail{
namespace foa{
class rw_spinlock
{
private:
// bit 31: locked exclusive
// bit 30: writer pending
// bit 29..0: reader lock count
static constexpr std::uint32_t locked_exclusive_mask = 1u << 31; // 0x8000'0000
static constexpr std::uint32_t writer_pending_mask = 1u << 30; // 0x4000'0000
static constexpr std::uint32_t reader_lock_count_mask = writer_pending_mask - 1; // 0x3FFF'FFFF
std::atomic<std::uint32_t> state_ = {};
private:
// number of times to spin before sleeping
static constexpr int spin_count = 24576;
public:
bool try_lock_shared() noexcept
{
std::uint32_t st = state_.load( std::memory_order_relaxed );
if( st >= reader_lock_count_mask )
{
// either bit 31 set, bit 30 set, or reader count is max
return false;
}
std::uint32_t newst = st + 1;
return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed );
}
void lock_shared() noexcept
{
for( ;; )
{
for( int k = 0; k < spin_count; ++k )
{
std::uint32_t st = state_.load( std::memory_order_relaxed );
if( st < reader_lock_count_mask )
{
std::uint32_t newst = st + 1;
if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
}
boost::core::sp_thread_pause();
}
boost::core::sp_thread_sleep();
}
}
void unlock_shared() noexcept
{
// pre: locked shared, not locked exclusive
state_.fetch_sub( 1, std::memory_order_release );
// if the writer pending bit is set, there's a writer waiting
// let it acquire the lock; it will clear the bit on unlock
}
bool try_lock() noexcept
{
std::uint32_t st = state_.load( std::memory_order_relaxed );
if( st & locked_exclusive_mask )
{
// locked exclusive
return false;
}
if( st & reader_lock_count_mask )
{
// locked shared
return false;
}
std::uint32_t newst = locked_exclusive_mask;
return state_.compare_exchange_strong( st, newst, std::memory_order_acquire, std::memory_order_relaxed );
}
void lock() noexcept
{
for( ;; )
{
for( int k = 0; k < spin_count; ++k )
{
std::uint32_t st = state_.load( std::memory_order_relaxed );
if( st & locked_exclusive_mask )
{
// locked exclusive, spin
}
else if( ( st & reader_lock_count_mask ) == 0 )
{
// not locked exclusive, not locked shared, try to lock
std::uint32_t newst = locked_exclusive_mask;
if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
}
else if( st & writer_pending_mask )
{
// writer pending bit already set, nothing to do
}
else
{
// locked shared, set writer pending bit
std::uint32_t newst = st | writer_pending_mask;
state_.compare_exchange_weak( st, newst, std::memory_order_relaxed, std::memory_order_relaxed );
}
boost::core::sp_thread_pause();
}
// clear writer pending bit before going to sleep
{
std::uint32_t st = state_.load( std::memory_order_relaxed );
for( ;; )
{
if( st & locked_exclusive_mask )
{
// locked exclusive, nothing to do
break;
}
else if( ( st & reader_lock_count_mask ) == 0 )
{
// lock free, try to take it
std::uint32_t newst = locked_exclusive_mask;
if( state_.compare_exchange_weak( st, newst, std::memory_order_acquire, std::memory_order_relaxed ) ) return;
}
else if( ( st & writer_pending_mask ) == 0 )
{
// writer pending bit already clear, nothing to do
break;
}
else
{
// clear writer pending bit
std::uint32_t newst = st & ~writer_pending_mask;
if( state_.compare_exchange_weak( st, newst, std::memory_order_relaxed, std::memory_order_relaxed ) ) break;
}
}
}
boost::core::sp_thread_sleep();
}
}
void unlock() noexcept
{
// pre: locked exclusive, not locked shared
state_.store( 0, std::memory_order_release );
}
};
} /* namespace foa */
} /* namespace detail */
} /* namespace unordered */
} /* namespace boost */
#endif // BOOST_UNORDERED_DETAIL_FOA_RW_SPINLOCK_HPP_INCLUDED

View File

@ -0,0 +1,513 @@
/* Fast open-addressing hash table.
*
* Copyright 2022-2023 Joaquin M Lopez Munoz.
* Copyright 2023 Christian Mazakas.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#ifndef BOOST_UNORDERED_DETAIL_FOA_TABLE_HPP
#define BOOST_UNORDERED_DETAIL_FOA_TABLE_HPP
#include <boost/assert.hpp>
#include <boost/config.hpp>
#include <boost/config/workaround.hpp>
#include <boost/unordered/detail/foa/core.hpp>
#include <cstddef>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>
namespace boost{
namespace unordered{
namespace detail{
namespace foa{
/* use plain integrals for group metadata storage */
template<typename Integral>
struct plain_integral
{
operator Integral()const{return n;}
void operator=(Integral m){n=m;}
#if BOOST_WORKAROUND(BOOST_GCC,>=50000 && BOOST_GCC<60000)
void operator|=(Integral m){n=static_cast<Integral>(n|m);}
void operator&=(Integral m){n=static_cast<Integral>(n&m);}
#else
void operator|=(Integral m){n|=m;}
void operator&=(Integral m){n&=m;}
#endif
Integral n;
};
struct plain_size_control
{
std::size_t ml;
std::size_t size;
};
template<typename,typename,typename,typename>
class table;
/* table_iterator keeps two pointers:
*
* - A pointer p to the element slot.
* - A pointer pc to the n-th byte of the associated group metadata, where n
* is the position of the element in the group.
*
* A simpler solution would have been to keep a pointer p to the element, a
* pointer pg to the group, and the position n, but that would increase
* sizeof(table_iterator) by 4/8 bytes. In order to make this compact
* representation feasible, it is required that group objects are aligned
* to their size, so that we can recover pg and n as
*
* - n = pc%sizeof(group)
* - pg = pc-n
*
* (for explanatory purposes pg and pc are treated above as if they were memory
* addresses rather than pointers).
*
* p = nullptr is conventionally used to mark end() iterators.
*/
/* internal conversion from const_iterator to iterator */
struct const_iterator_cast_tag{};
template<typename TypePolicy,typename Group,bool Const>
class table_iterator
{
using type_policy=TypePolicy;
using table_element_type=typename type_policy::element_type;
using group_type=Group;
static constexpr auto N=group_type::N;
static constexpr auto regular_layout=group_type::regular_layout;
public:
using difference_type=std::ptrdiff_t;
using value_type=typename type_policy::value_type;
using pointer=
typename std::conditional<Const,value_type const*,value_type*>::type;
using reference=
typename std::conditional<Const,value_type const&,value_type&>::type;
using iterator_category=std::forward_iterator_tag;
using element_type=
typename std::conditional<Const,value_type const,value_type>::type;
table_iterator()=default;
template<bool Const2,typename std::enable_if<!Const2>::type* =nullptr>
table_iterator(const table_iterator<TypePolicy,Group,Const2>& x):
pc{x.pc},p{x.p}{}
table_iterator(
const_iterator_cast_tag, const table_iterator<TypePolicy,Group,true>& x):
pc{x.pc},p{x.p}{}
inline reference operator*()const noexcept
{return type_policy::value_from(*p);}
inline pointer operator->()const noexcept
{return std::addressof(type_policy::value_from(*p));}
inline table_iterator& operator++()noexcept{increment();return *this;}
inline table_iterator operator++(int)noexcept
{auto x=*this;increment();return x;}
friend inline bool operator==(
const table_iterator& x,const table_iterator& y)
{return x.p==y.p;}
friend inline bool operator!=(
const table_iterator& x,const table_iterator& y)
{return !(x==y);}
private:
template<typename,typename,bool> friend class table_iterator;
template<typename,typename,typename,typename> friend class table;
table_iterator(Group* pg,std::size_t n,const table_element_type* p_):
pc{reinterpret_cast<unsigned char*>(const_cast<group_type*>(pg))+n},
p{const_cast<table_element_type*>(p_)}
{}
inline void increment()noexcept
{
BOOST_ASSERT(p!=nullptr);
increment(std::integral_constant<bool,regular_layout>{});
}
inline void increment(std::true_type /* regular layout */)noexcept
{
for(;;){
++p;
if(reinterpret_cast<uintptr_t>(pc)%sizeof(group_type)==N-1){
pc+=sizeof(group_type)-(N-1);
break;
}
++pc;
if(!group_type::is_occupied(pc))continue;
if(BOOST_UNLIKELY(group_type::is_sentinel(pc)))p=nullptr;
return;
}
for(;;){
int mask=reinterpret_cast<group_type*>(pc)->match_occupied();
if(mask!=0){
auto n=unchecked_countr_zero(mask);
if(BOOST_UNLIKELY(reinterpret_cast<group_type*>(pc)->is_sentinel(n))){
p=nullptr;
}
else{
pc+=n;
p+=n;
}
return;
}
pc+=sizeof(group_type);
p+=N;
}
}
inline void increment(std::false_type /* interleaved */)noexcept
{
std::size_t n0=reinterpret_cast<uintptr_t>(pc)%sizeof(group_type);
pc-=n0;
int mask=(
reinterpret_cast<group_type*>(pc)->match_occupied()>>(n0+1))<<(n0+1);
if(!mask){
do{
pc+=sizeof(group_type);
p+=N;
}
while((mask=reinterpret_cast<group_type*>(pc)->match_occupied())==0);
}
auto n=unchecked_countr_zero(mask);
if(BOOST_UNLIKELY(reinterpret_cast<group_type*>(pc)->is_sentinel(n))){
p=nullptr;
}
else{
pc+=n;
p-=n0;
p+=n;
}
}
unsigned char *pc=nullptr;
table_element_type *p=nullptr;
};
/* foa::table interface departs in a number of ways from that of C++ unordered
* associative containers because it's not for end-user consumption
* (boost::unordered_(flat|node)_(map|set) wrappers complete it as
* appropriate).
*
* The table supports two main modes of operation: flat and node-based. In the
* flat case, buckets directly store elements. For node-based, buckets store
* pointers to individually heap-allocated elements.
*
* For both flat and node-based:
*
* - begin() is not O(1).
* - No bucket API.
* - Load factor is fixed and can't be set by the user.
*
* For flat only:
*
* - value_type must be moveable.
* - Pointer stability is not kept under rehashing.
* - No extract API.
*
* try_emplace, erase and find support heterogeneous lookup by default,
* that is, without checking for any ::is_transparent typedefs --the
* checking is done by boost::unordered_(flat|node)_(map|set).
*/
template <typename TypePolicy,typename Hash,typename Pred,typename Allocator>
using table_core_impl=
table_core<TypePolicy,group15<plain_integral>,table_arrays,
plain_size_control,Hash,Pred,Allocator>;
#include <boost/unordered/detail/foa/ignore_wshadow.hpp>
#if defined(BOOST_MSVC)
#pragma warning(push)
#pragma warning(disable:4714) /* marked as __forceinline not inlined */
#endif
template<typename TypePolicy,typename Hash,typename Pred,typename Allocator>
class table:table_core_impl<TypePolicy,Hash,Pred,Allocator>
{
using super=table_core_impl<TypePolicy,Hash,Pred,Allocator>;
using type_policy=typename super::type_policy;
using group_type=typename super::group_type;
using super::N;
using prober=typename super::prober;
using locator=typename super::locator;
public:
using key_type=typename super::key_type;
using init_type=typename super::init_type;
using value_type=typename super::value_type;
using element_type=typename super::element_type;
private:
static constexpr bool has_mutable_iterator=
!std::is_same<key_type,value_type>::value;
public:
using hasher=typename super::hasher;
using key_equal=typename super::key_equal;
using allocator_type=typename super::allocator_type;
using pointer=typename super::pointer;
using const_pointer=typename super::const_pointer;
using reference=typename super::reference;
using const_reference=typename super::const_reference;
using size_type=typename super::size_type;
using difference_type=typename super::difference_type;
using const_iterator=table_iterator<type_policy,group_type,true>;
using iterator=typename std::conditional<
has_mutable_iterator,
table_iterator<type_policy,group_type,false>,
const_iterator>::type;
table(
std::size_t n=default_bucket_count,const Hash& h_=Hash(),
const Pred& pred_=Pred(),const Allocator& al_=Allocator()):
super{n,h_,pred_,al_}
{}
table(const table& x)=default;
table(table&& x)=default;
table(const table& x,const Allocator& al_):super{x,al_}{}
table(table&& x,const Allocator& al_):super{std::move(x),al_}{}
~table()=default;
table& operator=(const table& x)=default;
table& operator=(table&& x)=default;
using super::get_allocator;
iterator begin()noexcept
{
iterator it{this->arrays.groups,0,this->arrays.elements};
if(this->arrays.elements&&
!(this->arrays.groups[0].match_occupied()&0x1))++it;
return it;
}
const_iterator begin()const noexcept
{return const_cast<table*>(this)->begin();}
iterator end()noexcept{return {};}
const_iterator end()const noexcept{return const_cast<table*>(this)->end();}
const_iterator cbegin()const noexcept{return begin();}
const_iterator cend()const noexcept{return end();}
using super::empty;
using super::size;
using super::max_size;
template<typename... Args>
BOOST_FORCEINLINE std::pair<iterator,bool> emplace(Args&&... args)
{
auto x=alloc_make_insert_type<type_policy>(
this->al(),std::forward<Args>(args)...);
return emplace_impl(type_policy::move(x.value()));
}
template<typename Key,typename... Args>
BOOST_FORCEINLINE std::pair<iterator,bool> try_emplace(
Key&& x,Args&&... args)
{
return emplace_impl(
try_emplace_args_t{},std::forward<Key>(x),std::forward<Args>(args)...);
}
BOOST_FORCEINLINE std::pair<iterator,bool>
insert(const init_type& x){return emplace_impl(x);}
BOOST_FORCEINLINE std::pair<iterator,bool>
insert(init_type&& x){return emplace_impl(std::move(x));}
/* template<typename=void> tilts call ambiguities in favor of init_type */
template<typename=void>
BOOST_FORCEINLINE std::pair<iterator,bool>
insert(const value_type& x){return emplace_impl(x);}
template<typename=void>
BOOST_FORCEINLINE std::pair<iterator,bool>
insert(value_type&& x){return emplace_impl(std::move(x));}
template<typename T=element_type>
BOOST_FORCEINLINE
typename std::enable_if<
!std::is_same<T,value_type>::value,
std::pair<iterator,bool>
>::type
insert(element_type&& x){return emplace_impl(std::move(x));}
template<
bool dependent_value=false,
typename std::enable_if<
has_mutable_iterator||dependent_value>::type* =nullptr
>
void erase(iterator pos)noexcept{return erase(const_iterator(pos));}
BOOST_FORCEINLINE
void erase(const_iterator pos)noexcept
{
super::erase(pos.pc,pos.p);
}
template<typename Key>
BOOST_FORCEINLINE
auto erase(Key&& x) -> typename std::enable_if<
!std::is_convertible<Key,iterator>::value&&
!std::is_convertible<Key,const_iterator>::value, std::size_t>::type
{
auto it=find(x);
if(it!=end()){
erase(it);
return 1;
}
else return 0;
}
void swap(table& x)
noexcept(noexcept(std::declval<super&>().swap(std::declval<super&>())))
{
super::swap(x);
}
using super::clear;
element_type extract(const_iterator pos)
{
BOOST_ASSERT(pos!=end());
erase_on_exit e{*this,pos};
(void)e;
return std::move(*pos.p);
}
// TODO: should we accept different allocator too?
template<typename Hash2,typename Pred2>
void merge(table<TypePolicy,Hash2,Pred2,Allocator>& x)
{
x.for_all_elements([&,this](group_type* pg,unsigned int n,element_type* p){
erase_on_exit e{x,{pg,n,p}};
if(!emplace_impl(type_policy::move(*p)).second)e.rollback();
});
}
template<typename Hash2,typename Pred2>
void merge(table<TypePolicy,Hash2,Pred2,Allocator>&& x){merge(x);}
using super::hash_function;
using super::key_eq;
template<typename Key>
BOOST_FORCEINLINE iterator find(const Key& x)
{
return make_iterator(super::find(x));
}
template<typename Key>
BOOST_FORCEINLINE const_iterator find(const Key& x)const
{
return const_cast<table*>(this)->find(x);
}
using super::capacity;
using super::load_factor;
using super::max_load_factor;
using super::max_load;
using super::rehash;
using super::reserve;
template<typename Predicate>
friend std::size_t erase_if(table& x,Predicate& pr)
{
using value_reference=typename std::conditional<
std::is_same<key_type,value_type>::value,
const_reference,
reference
>::type;
std::size_t s=x.size();
x.for_all_elements(
[&](group_type* pg,unsigned int n,element_type* p){
if(pr(const_cast<value_reference>(type_policy::value_from(*p)))){
x.super::erase(pg,n,p);
}
});
return std::size_t(s-x.size());
}
friend bool operator==(const table& x,const table& y)
{
return static_cast<const super&>(x)==static_cast<const super&>(y);
}
friend bool operator!=(const table& x,const table& y){return !(x==y);}
private:
struct erase_on_exit
{
erase_on_exit(table& x_,const_iterator it_):x{x_},it{it_}{}
~erase_on_exit(){if(!rollback_)x.erase(it);}
void rollback(){rollback_=true;}
table& x;
const_iterator it;
bool rollback_=false;
};
static inline iterator make_iterator(const locator& l)noexcept
{
return {l.pg,l.n,l.p};
}
template<typename... Args>
BOOST_FORCEINLINE std::pair<iterator,bool> emplace_impl(Args&&... args)
{
const auto &k=this->key_from(std::forward<Args>(args)...);
auto hash=this->hash_for(k);
auto pos0=this->position_for(hash);
auto loc=super::find(k,pos0,hash);
if(loc){
return {make_iterator(loc),false};
}
if(BOOST_LIKELY(this->size_ctrl.size<this->size_ctrl.ml)){
return {
make_iterator(
this->unchecked_emplace_at(pos0,hash,std::forward<Args>(args)...)),
true
};
}
else{
return {
make_iterator(
this->unchecked_emplace_with_rehash(
hash,std::forward<Args>(args)...)),
true
};
}
}
};
#if defined(BOOST_MSVC)
#pragma warning(pop) /* C4714 */
#endif
#include <boost/unordered/detail/foa/restore_wshadow.hpp>
} /* namespace foa */
} /* namespace detail */
} /* namespace unordered */
} /* namespace boost */
#endif

View File

@ -0,0 +1,52 @@
/* Copyright 2023 Joaquin M Lopez Munoz.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*
* See https://www.boost.org/libs/unordered for library home page.
*/
#ifndef BOOST_UNORDERED_DETAIL_FOA_TUPLE_ROTATE_RIGHT_HPP
#define BOOST_UNORDERED_DETAIL_FOA_TUPLE_ROTATE_RIGHT_HPP
#include <boost/mp11/algorithm.hpp>
#include <boost/mp11/integer_sequence.hpp>
#include <tuple>
#include <utility>
namespace boost{
namespace unordered{
namespace detail{
namespace foa{
template<typename Tuple>
using tuple_rotate_right_return_type=mp11::mp_rotate_right_c<
typename std::remove_cv<typename std::remove_reference<Tuple>::type>::type,
1
>;
template<std::size_t... Is,typename Tuple>
tuple_rotate_right_return_type<Tuple>
tuple_rotate_right_aux(mp11::index_sequence<Is...>,Tuple&& x)
{
return tuple_rotate_right_return_type<Tuple>{
std::get<(Is+sizeof...(Is)-1)%sizeof...(Is)>(std::forward<Tuple>(x))...};
}
template<typename Tuple>
tuple_rotate_right_return_type<Tuple> tuple_rotate_right(Tuple&& x)
{
using RawTuple=typename std::remove_cv<
typename std::remove_reference<Tuple>::type>::type;
return tuple_rotate_right_aux(
mp11::make_index_sequence<std::tuple_size<RawTuple>::value>{},
std::forward<Tuple>(x));
}
} /* namespace foa */
} /* namespace detail */
} /* namespace unordered */
} /* namespace boost */
#endif

View File

@ -61,4 +61,90 @@ namespace boost {
}
}
// BOOST_UNORDERED_EMPLACE_LIMIT = The maximum number of parameters in
// emplace (not including things like hints). Don't set it to a lower value, as
// that might break something.
#if !defined BOOST_UNORDERED_EMPLACE_LIMIT
#define BOOST_UNORDERED_EMPLACE_LIMIT 10
#endif
////////////////////////////////////////////////////////////////////////////////
// Configuration
//
// Unless documented elsewhere these configuration macros should be considered
// an implementation detail, I'll try not to break them, but you never know.
// Use Sun C++ workarounds
// I'm not sure which versions of the compiler require these workarounds, so
// I'm just using them of everything older than the current test compilers
// (as of May 2017).
#if !defined(BOOST_UNORDERED_SUN_WORKAROUNDS1)
#if BOOST_COMP_SUNPRO && BOOST_COMP_SUNPRO < BOOST_VERSION_NUMBER(5, 20, 0)
#define BOOST_UNORDERED_SUN_WORKAROUNDS1 1
#else
#define BOOST_UNORDERED_SUN_WORKAROUNDS1 0
#endif
#endif
// BOOST_UNORDERED_TUPLE_ARGS
//
// Maximum number of std::tuple members to support, or 0 if std::tuple
// isn't avaiable. More are supported when full C++11 is used.
// Already defined, so do nothing
#if defined(BOOST_UNORDERED_TUPLE_ARGS)
// Assume if we have C++11 tuple it's properly variadic,
// and just use a max number of 10 arguments.
#elif !defined(BOOST_NO_CXX11_HDR_TUPLE)
#define BOOST_UNORDERED_TUPLE_ARGS 10
// Visual C++ has a decent enough tuple for piecewise construction,
// so use that if available, using _VARIADIC_MAX for the maximum
// number of parameters. Note that this comes after the check
// for a full C++11 tuple.
#elif defined(BOOST_MSVC)
#if !BOOST_UNORDERED_HAVE_PIECEWISE_CONSTRUCT
#define BOOST_UNORDERED_TUPLE_ARGS 0
#elif defined(_VARIADIC_MAX)
#define BOOST_UNORDERED_TUPLE_ARGS _VARIADIC_MAX
#else
#define BOOST_UNORDERED_TUPLE_ARGS 5
#endif
// Assume that we don't have std::tuple
#else
#define BOOST_UNORDERED_TUPLE_ARGS 0
#endif
#if BOOST_UNORDERED_TUPLE_ARGS
#include <tuple>
#endif
// BOOST_UNORDERED_CXX11_CONSTRUCTION
//
// Use C++11 construction, requires variadic arguments, good construct support
// in allocator_traits and piecewise construction of std::pair
// Otherwise allocators aren't used for construction/destruction
#if BOOST_UNORDERED_HAVE_PIECEWISE_CONSTRUCT && \
!defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && BOOST_UNORDERED_TUPLE_ARGS
#if BOOST_COMP_SUNPRO && BOOST_LIB_STD_GNU
// Sun C++ std::pair piecewise construction doesn't seem to be exception safe.
// (At least for Sun C++ 12.5 using libstdc++).
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 0
#elif BOOST_COMP_GNUC && BOOST_COMP_GNUC < BOOST_VERSION_NUMBER(4, 7, 0)
// Piecewise construction in GCC 4.6 doesn't work for uncopyable types.
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 0
#elif !defined(BOOST_NO_CXX11_ALLOCATOR)
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 1
#endif
#endif
#if !defined(BOOST_UNORDERED_CXX11_CONSTRUCTION)
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 0
#endif
#endif

View File

@ -60,92 +60,6 @@
#include <type_traits>
#endif
////////////////////////////////////////////////////////////////////////////////
// Configuration
//
// Unless documented elsewhere these configuration macros should be considered
// an implementation detail, I'll try not to break them, but you never know.
// Use Sun C++ workarounds
// I'm not sure which versions of the compiler require these workarounds, so
// I'm just using them of everything older than the current test compilers
// (as of May 2017).
#if !defined(BOOST_UNORDERED_SUN_WORKAROUNDS1)
#if BOOST_COMP_SUNPRO && BOOST_COMP_SUNPRO < BOOST_VERSION_NUMBER(5, 20, 0)
#define BOOST_UNORDERED_SUN_WORKAROUNDS1 1
#else
#define BOOST_UNORDERED_SUN_WORKAROUNDS1 0
#endif
#endif
// BOOST_UNORDERED_EMPLACE_LIMIT = The maximum number of parameters in
// emplace (not including things like hints). Don't set it to a lower value, as
// that might break something.
#if !defined BOOST_UNORDERED_EMPLACE_LIMIT
#define BOOST_UNORDERED_EMPLACE_LIMIT 10
#endif
// BOOST_UNORDERED_TUPLE_ARGS
//
// Maximum number of std::tuple members to support, or 0 if std::tuple
// isn't avaiable. More are supported when full C++11 is used.
// Already defined, so do nothing
#if defined(BOOST_UNORDERED_TUPLE_ARGS)
// Assume if we have C++11 tuple it's properly variadic,
// and just use a max number of 10 arguments.
#elif !defined(BOOST_NO_CXX11_HDR_TUPLE)
#define BOOST_UNORDERED_TUPLE_ARGS 10
// Visual C++ has a decent enough tuple for piecewise construction,
// so use that if available, using _VARIADIC_MAX for the maximum
// number of parameters. Note that this comes after the check
// for a full C++11 tuple.
#elif defined(BOOST_MSVC)
#if !BOOST_UNORDERED_HAVE_PIECEWISE_CONSTRUCT
#define BOOST_UNORDERED_TUPLE_ARGS 0
#elif defined(_VARIADIC_MAX)
#define BOOST_UNORDERED_TUPLE_ARGS _VARIADIC_MAX
#else
#define BOOST_UNORDERED_TUPLE_ARGS 5
#endif
// Assume that we don't have std::tuple
#else
#define BOOST_UNORDERED_TUPLE_ARGS 0
#endif
#if BOOST_UNORDERED_TUPLE_ARGS
#include <tuple>
#endif
// BOOST_UNORDERED_CXX11_CONSTRUCTION
//
// Use C++11 construction, requires variadic arguments, good construct support
// in allocator_traits and piecewise construction of std::pair
// Otherwise allocators aren't used for construction/destruction
#if BOOST_UNORDERED_HAVE_PIECEWISE_CONSTRUCT && \
!defined(BOOST_NO_CXX11_VARIADIC_TEMPLATES) && BOOST_UNORDERED_TUPLE_ARGS
#if BOOST_COMP_SUNPRO && BOOST_LIB_STD_GNU
// Sun C++ std::pair piecewise construction doesn't seem to be exception safe.
// (At least for Sun C++ 12.5 using libstdc++).
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 0
#elif BOOST_COMP_GNUC && BOOST_COMP_GNUC < BOOST_VERSION_NUMBER(4, 7, 0)
// Piecewise construction in GCC 4.6 doesn't work for uncopyable types.
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 0
#elif !defined(BOOST_NO_CXX11_ALLOCATOR)
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 1
#endif
#endif
#if !defined(BOOST_UNORDERED_CXX11_CONSTRUCTION)
#define BOOST_UNORDERED_CXX11_CONSTRUCTION 0
#endif
#if BOOST_UNORDERED_CXX11_CONSTRUCTION
#include <boost/mp11/list.hpp>
#include <boost/mp11/algorithm.hpp>

View File

@ -20,6 +20,9 @@
#include <boost/type_traits/enable_if.hpp>
#include <boost/type_traits/is_integral.hpp>
#include <boost/type_traits/remove_const.hpp>
#include <iterator>
#include <utility>
#endif
// BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES
@ -101,6 +104,16 @@ namespace boost {
!boost::is_integral<H>::value && !is_allocator_v<H>;
template <class P> constexpr bool const is_pred_v = !is_allocator_v<P>;
template <typename T>
using iter_key_t =
typename std::iterator_traits<T>::value_type::first_type;
template <typename T>
using iter_val_t =
typename std::iterator_traits<T>::value_type::second_type;
template <typename T>
using iter_to_alloc_t =
typename std::pair<iter_key_t<T> const, iter_val_t<T> >;
#endif
} // namespace detail
} // namespace unordered

View File

@ -10,7 +10,8 @@
#pragma once
#endif
#include <boost/unordered/detail/foa.hpp>
#include <boost/unordered/detail/foa/flat_map_types.hpp>
#include <boost/unordered/detail/foa/table.hpp>
#include <boost/unordered/detail/type_traits.hpp>
#include <boost/unordered/unordered_flat_map_fwd.hpp>
@ -32,67 +33,10 @@ namespace boost {
#pragma warning(disable : 4714) /* marked as __forceinline not inlined */
#endif
namespace detail {
template <class Key, class T> struct flat_map_types
{
using key_type = Key;
using raw_key_type = typename std::remove_const<Key>::type;
using raw_mapped_type = typename std::remove_const<T>::type;
using init_type = std::pair<raw_key_type, raw_mapped_type>;
using moved_type = std::pair<raw_key_type&&, raw_mapped_type&&>;
using value_type = std::pair<Key const, T>;
using element_type = value_type;
static value_type& value_from(element_type& x) { return x; }
template <class K, class V>
static raw_key_type const& extract(std::pair<K, V> const& kv)
{
return kv.first;
}
static moved_type move(init_type& x)
{
return {std::move(x.first), std::move(x.second)};
}
static moved_type move(element_type& x)
{
// TODO: we probably need to launder here
return {std::move(const_cast<raw_key_type&>(x.first)),
std::move(const_cast<raw_mapped_type&>(x.second))};
}
template <class A, class... Args>
static void construct(A& al, init_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A> static void destroy(A& al, init_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
};
} // namespace detail
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
class unordered_flat_map
{
using map_types = detail::flat_map_types<Key, T>;
using map_types = detail::foa::flat_map_types<Key, T>;
using table_type = detail::foa::table<map_types, Hash, KeyEqual,
typename boost::allocator_rebind<Allocator,
@ -100,6 +44,10 @@ namespace boost {
table_type table_;
template <class K, class V, class H, class KE, class A>
bool friend operator==(unordered_flat_map<K, V, H, KE, A> const& lhs,
unordered_flat_map<K, V, H, KE, A> const& rhs);
template <class K, class V, class H, class KE, class A, class Pred>
typename unordered_flat_map<K, V, H, KE, A>::size_type friend erase_if(
unordered_flat_map<K, V, H, KE, A>& set, Pred pred);
@ -702,19 +650,7 @@ namespace boost {
unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& lhs,
unordered_flat_map<Key, T, Hash, KeyEqual, Allocator> const& rhs)
{
if (&lhs == &rhs) {
return true;
}
return (lhs.size() == rhs.size()) && ([&] {
for (auto const& kvp : lhs) {
auto pos = rhs.find(kvp.first);
if ((pos == rhs.end()) || (*pos != kvp)) {
return false;
}
}
return true;
})();
return lhs.table_ == rhs.table_;
}
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
@ -748,18 +684,6 @@ namespace boost {
#if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES
namespace detail {
template <typename T>
using iter_key_t =
typename std::iterator_traits<T>::value_type::first_type;
template <typename T>
using iter_val_t =
typename std::iterator_traits<T>::value_type::second_type;
template <typename T>
using iter_to_alloc_t =
typename std::pair<iter_key_t<T> const, iter_val_t<T> >;
} // namespace detail
template <class InputIterator,
class Hash =
boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >,

View File

@ -12,7 +12,6 @@
#endif
#include <boost/functional/hash_fwd.hpp>
#include <boost/unordered/detail/fwd.hpp>
#include <functional>
#include <memory>

View File

@ -10,7 +10,8 @@
#pragma once
#endif
#include <boost/unordered/detail/foa.hpp>
#include <boost/unordered/detail/foa/flat_set_types.hpp>
#include <boost/unordered/detail/foa/table.hpp>
#include <boost/unordered/detail/type_traits.hpp>
#include <boost/unordered/unordered_flat_set_fwd.hpp>
@ -30,38 +31,10 @@ namespace boost {
#pragma warning(disable : 4714) /* marked as __forceinline not inlined */
#endif
namespace detail {
template <class Key> struct flat_set_types
{
using key_type = Key;
using init_type = Key;
using value_type = Key;
static Key const& extract(value_type const& key) { return key; }
using element_type = value_type;
static Key& value_from(element_type& x) { return x; }
static element_type&& move(element_type& x) { return std::move(x); }
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
};
} // namespace detail
template <class Key, class Hash, class KeyEqual, class Allocator>
class unordered_flat_set
{
using set_types = detail::flat_set_types<Key>;
using set_types = detail::foa::flat_set_types<Key>;
using table_type = detail::foa::table<set_types, Hash, KeyEqual,
typename boost::allocator_rebind<Allocator,
@ -69,6 +42,10 @@ namespace boost {
table_type table_;
template <class K, class H, class KE, class A>
bool friend operator==(unordered_flat_set<K, H, KE, A> const& lhs,
unordered_flat_set<K, H, KE, A> const& rhs);
template <class K, class H, class KE, class A, class Pred>
typename unordered_flat_set<K, H, KE, A>::size_type friend erase_if(
unordered_flat_set<K, H, KE, A>& set, Pred pred);
@ -499,19 +476,7 @@ namespace boost {
unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& lhs,
unordered_flat_set<Key, Hash, KeyEqual, Allocator> const& rhs)
{
if (&lhs == &rhs) {
return true;
}
return (lhs.size() == rhs.size()) && ([&] {
for (auto const& key : lhs) {
auto pos = rhs.find(key);
if ((pos == rhs.end()) || (key != *pos)) {
return false;
}
}
return true;
})();
return lhs.table_ == rhs.table_;
}
template <class Key, class Hash, class KeyEqual, class Allocator>

View File

@ -12,7 +12,6 @@
#endif
#include <boost/functional/hash_fwd.hpp>
#include <boost/unordered/detail/fwd.hpp>
#include <functional>
#include <memory>

View File

@ -1061,18 +1061,6 @@ namespace boost {
#if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES
namespace detail {
template <typename T>
using iter_key_t =
typename std::iterator_traits<T>::value_type::first_type;
template <typename T>
using iter_val_t =
typename std::iterator_traits<T>::value_type::second_type;
template <typename T>
using iter_to_alloc_t =
typename std::pair<iter_key_t<T> const, iter_val_t<T> >;
} // namespace detail
template <class InputIterator,
class Hash =
boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >,

View File

@ -13,7 +13,6 @@
#endif
#include <boost/functional/hash_fwd.hpp>
#include <boost/unordered/detail/fwd.hpp>
#include <functional>
#include <memory>

View File

@ -10,9 +10,10 @@
#pragma once
#endif
#include <boost/unordered/detail/foa.hpp>
#include <boost/unordered/detail/foa/element_type.hpp>
#include <boost/unordered/detail/foa/node_handle.hpp>
#include <boost/unordered/detail/foa/node_map_types.hpp>
#include <boost/unordered/detail/foa/table.hpp>
#include <boost/unordered/detail/type_traits.hpp>
#include <boost/unordered/unordered_node_map_fwd.hpp>
@ -35,112 +36,6 @@ namespace boost {
#endif
namespace detail {
template <class Key, class T> struct node_map_types
{
using key_type = Key;
using mapped_type = T;
using raw_key_type = typename std::remove_const<Key>::type;
using raw_mapped_type = typename std::remove_const<T>::type;
using init_type = std::pair<raw_key_type, raw_mapped_type>;
using value_type = std::pair<Key const, T>;
using moved_type = std::pair<raw_key_type&&, raw_mapped_type&&>;
using element_type=foa::element_type<value_type>;
static value_type& value_from(element_type const& x) { return *(x.p); }
template <class K, class V>
static raw_key_type const& extract(std::pair<K, V> const& kv)
{
return kv.first;
}
static raw_key_type const& extract(element_type const& kv)
{
return kv.p->first;
}
static element_type&& move(element_type& x) { return std::move(x); }
static moved_type move(init_type& x)
{
return {std::move(x.first), std::move(x.second)};
}
static moved_type move(value_type& x)
{
return {std::move(const_cast<raw_key_type&>(x.first)),
std::move(const_cast<raw_mapped_type&>(x.second))};
}
template <class A>
static void construct(A&, element_type* p, element_type&& x) noexcept
{
p->p = x.p;
x.p = nullptr;
}
template <class A>
static void construct(A& al, element_type* p, element_type const& copy)
{
construct(al, p, *copy.p);
}
template <class A, class... Args>
static void construct(A& al, init_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, element_type* p, Args&&... args)
{
p->p = boost::to_address(boost::allocator_allocate(al, 1));
BOOST_TRY
{
boost::allocator_construct(al, p->p, std::forward<Args>(args)...);
}
BOOST_CATCH(...)
{
using pointer_type = typename boost::allocator_pointer<A>::type;
using pointer_traits = boost::pointer_traits<pointer_type>;
boost::allocator_deallocate(
al, pointer_traits::pointer_to(*(p->p)), 1);
BOOST_RETHROW
}
BOOST_CATCH_END
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A> static void destroy(A& al, init_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A> static void destroy(A& al, element_type* p) noexcept
{
if (p->p) {
using pointer_type = typename boost::allocator_pointer<A>::type;
using pointer_traits = boost::pointer_traits<pointer_type>;
destroy(al, p->p);
boost::allocator_deallocate(
al, pointer_traits::pointer_to(*(p->p)), 1);
}
}
};
template <class TypePolicy, class Allocator>
struct node_map_handle
: public detail::foa::node_handle_base<TypePolicy, Allocator>
@ -179,7 +74,7 @@ namespace boost {
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
class unordered_node_map
{
using map_types = detail::node_map_types<Key, T>;
using map_types = detail::foa::node_map_types<Key, T>;
using table_type = detail::foa::table<map_types, Hash, KeyEqual,
typename boost::allocator_rebind<Allocator,
@ -187,6 +82,10 @@ namespace boost {
table_type table_;
template <class K, class V, class H, class KE, class A>
bool friend operator==(unordered_node_map<K, V, H, KE, A> const& lhs,
unordered_node_map<K, V, H, KE, A> const& rhs);
template <class K, class V, class H, class KE, class A, class Pred>
typename unordered_node_map<K, V, H, KE, A>::size_type friend erase_if(
unordered_node_map<K, V, H, KE, A>& set, Pred pred);
@ -854,19 +753,7 @@ namespace boost {
unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& lhs,
unordered_node_map<Key, T, Hash, KeyEqual, Allocator> const& rhs)
{
if (&lhs == &rhs) {
return true;
}
return (lhs.size() == rhs.size()) && ([&] {
for (auto const& kvp : lhs) {
auto pos = rhs.find(kvp.first);
if ((pos == rhs.end()) || (*pos != kvp)) {
return false;
}
}
return true;
})();
return lhs.table_ == rhs.table_;
}
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
@ -900,18 +787,6 @@ namespace boost {
#if BOOST_UNORDERED_TEMPLATE_DEDUCTION_GUIDES
namespace detail {
template <typename T>
using iter_key_t =
typename std::iterator_traits<T>::value_type::first_type;
template <typename T>
using iter_val_t =
typename std::iterator_traits<T>::value_type::second_type;
template <typename T>
using iter_to_alloc_t =
typename std::pair<iter_key_t<T> const, iter_val_t<T> >;
} // namespace detail
template <class InputIterator,
class Hash =
boost::hash<boost::unordered::detail::iter_key_t<InputIterator> >,

View File

@ -12,7 +12,6 @@
#endif
#include <boost/functional/hash_fwd.hpp>
#include <boost/unordered/detail/fwd.hpp>
#include <functional>
#include <memory>

View File

@ -10,9 +10,10 @@
#pragma once
#endif
#include <boost/unordered/detail/foa.hpp>
#include <boost/unordered/detail/foa/element_type.hpp>
#include <boost/unordered/detail/foa/node_handle.hpp>
#include <boost/unordered/detail/foa/node_set_types.hpp>
#include <boost/unordered/detail/foa/table.hpp>
#include <boost/unordered/detail/type_traits.hpp>
#include <boost/unordered/unordered_node_set_fwd.hpp>
@ -34,77 +35,6 @@ namespace boost {
#endif
namespace detail {
template <class Key> struct node_set_types
{
using key_type = Key;
using init_type = Key;
using value_type = Key;
static Key const& extract(value_type const& key) { return key; }
using element_type=foa::element_type<value_type>;
static value_type& value_from(element_type const& x) { return *x.p; }
static Key const& extract(element_type const& k) { return *k.p; }
static element_type&& move(element_type& x) { return std::move(x); }
static value_type&& move(value_type& x) { return std::move(x); }
template <class A>
static void construct(A& al, element_type* p, element_type const& copy)
{
construct(al, p, *copy.p);
}
template <typename Allocator>
static void construct(
Allocator&, element_type* p, element_type&& x) noexcept
{
p->p = x.p;
x.p = nullptr;
}
template <class A, class... Args>
static void construct(A& al, value_type* p, Args&&... args)
{
boost::allocator_construct(al, p, std::forward<Args>(args)...);
}
template <class A, class... Args>
static void construct(A& al, element_type* p, Args&&... args)
{
p->p = boost::to_address(boost::allocator_allocate(al, 1));
BOOST_TRY
{
boost::allocator_construct(al, p->p, std::forward<Args>(args)...);
}
BOOST_CATCH(...)
{
boost::allocator_deallocate(al,
boost::pointer_traits<
typename boost::allocator_pointer<A>::type>::pointer_to(*p->p),
1);
BOOST_RETHROW
}
BOOST_CATCH_END
}
template <class A> static void destroy(A& al, value_type* p) noexcept
{
boost::allocator_destroy(al, p);
}
template <class A> static void destroy(A& al, element_type* p) noexcept
{
if (p->p) {
destroy(al, p->p);
boost::allocator_deallocate(al,
boost::pointer_traits<typename boost::allocator_pointer<
A>::type>::pointer_to(*(p->p)),
1);
}
}
};
template <class TypePolicy, class Allocator>
struct node_set_handle
: public detail::foa::node_handle_base<TypePolicy, Allocator>
@ -135,7 +65,7 @@ namespace boost {
template <class Key, class Hash, class KeyEqual, class Allocator>
class unordered_node_set
{
using set_types = detail::node_set_types<Key>;
using set_types = detail::foa::node_set_types<Key>;
using table_type = detail::foa::table<set_types, Hash, KeyEqual,
typename boost::allocator_rebind<Allocator,
@ -143,6 +73,10 @@ namespace boost {
table_type table_;
template <class K, class H, class KE, class A>
bool friend operator==(unordered_node_set<K, H, KE, A> const& lhs,
unordered_node_set<K, H, KE, A> const& rhs);
template <class K, class H, class KE, class A, class Pred>
typename unordered_node_set<K, H, KE, A>::size_type friend erase_if(
unordered_node_set<K, H, KE, A>& set, Pred pred);
@ -638,19 +572,7 @@ namespace boost {
unordered_node_set<Key, Hash, KeyEqual, Allocator> const& lhs,
unordered_node_set<Key, Hash, KeyEqual, Allocator> const& rhs)
{
if (&lhs == &rhs) {
return true;
}
return (lhs.size() == rhs.size()) && ([&] {
for (auto const& key : lhs) {
auto pos = rhs.find(key);
if ((pos == rhs.end()) || (key != *pos)) {
return false;
}
}
return true;
})();
return lhs.table_ == rhs.table_;
}
template <class Key, class Hash, class KeyEqual, class Allocator>

View File

@ -12,7 +12,6 @@
#endif
#include <boost/functional/hash_fwd.hpp>
#include <boost/unordered/detail/fwd.hpp>
#include <functional>
#include <memory>

View File

@ -13,7 +13,6 @@
#endif
#include <boost/functional/hash_fwd.hpp>
#include <boost/unordered/detail/fwd.hpp>
#include <functional>
#include <memory>

View File

@ -6,12 +6,158 @@ include(BoostTestJamfile OPTIONAL RESULT_VARIABLE HAVE_BOOST_TEST)
if(HAVE_BOOST_TEST)
boost_test_jamfile(
FILE Jamfile.v2
LINK_LIBRARIES
Boost::unordered
Boost::core
Boost::concept_check
)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
set(BOOST_TEST_LINK_LIBRARIES Boost::unordered Boost::core Boost::concept_check)
function(fca_tests)
boost_test(PREFIX boost_unordered ${ARGN})
endfunction()
function(foa_tests)
boost_test(PREFIX boost_unordered_foa COMPILE_DEFINITIONS BOOST_UNORDERED_FOA_TESTS ${ARGN})
endfunction()
function(cfoa_tests)
boost_test(PREFIX boost_unordered_cfoa LINK_LIBRARIES Threads::Threads ${ARGN})
endfunction()
# FCA tests
fca_tests(SOURCES unordered/prime_fmod_tests.cpp)
fca_tests(SOURCES unordered/fwd_set_test.cpp)
fca_tests(SOURCES unordered/fwd_map_test.cpp)
fca_tests(SOURCES unordered/allocator_traits.cpp)
fca_tests(SOURCES unordered/minimal_allocator.cpp)
fca_tests(SOURCES unordered/compile_set.cpp)
fca_tests(SOURCES unordered/compile_map.cpp)
fca_tests(SOURCES unordered/noexcept_tests.cpp)
fca_tests(SOURCES unordered/link_test_1.cpp unordered/link_test_2.cpp)
fca_tests(SOURCES unordered/incomplete_test.cpp)
fca_tests(SOURCES unordered/simple_tests.cpp)
fca_tests(SOURCES unordered/equivalent_keys_tests.cpp)
fca_tests(SOURCES unordered/constructor_tests.cpp)
fca_tests(SOURCES unordered/copy_tests.cpp)
fca_tests(SOURCES unordered/move_tests.cpp)
fca_tests(SOURCES unordered/post_move_tests.cpp)
fca_tests(SOURCES unordered/assign_tests.cpp)
fca_tests(SOURCES unordered/insert_tests.cpp)
fca_tests(SOURCES unordered/insert_stable_tests.cpp)
fca_tests(SOURCES unordered/insert_hint_tests.cpp)
fca_tests(SOURCES unordered/emplace_tests.cpp)
fca_tests(SOURCES unordered/unnecessary_copy_tests.cpp)
fca_tests(SOURCES unordered/erase_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_SUPPRESS_DEPRECATED)
fca_tests(SOURCES unordered/erase_equiv_tests.cpp)
fca_tests(SOURCES unordered/extract_tests.cpp)
fca_tests(SOURCES unordered/node_handle_tests.cpp)
fca_tests(SOURCES unordered/merge_tests.cpp)
fca_tests(SOURCES unordered/find_tests.cpp)
fca_tests(SOURCES unordered/at_tests.cpp)
fca_tests(SOURCES unordered/bucket_tests.cpp)
fca_tests(SOURCES unordered/load_factor_tests.cpp)
fca_tests(SOURCES unordered/rehash_tests.cpp)
fca_tests(SOURCES unordered/equality_tests.cpp)
fca_tests(SOURCES unordered/swap_tests.cpp)
fca_tests(SOURCES unordered/deduction_tests.cpp)
fca_tests(SOURCES unordered/scoped_allocator.cpp)
fca_tests(SOURCES unordered/transparent_tests.cpp)
fca_tests(SOURCES unordered/reserve_tests.cpp)
fca_tests(SOURCES unordered/contains_tests.cpp)
fca_tests(SOURCES unordered/erase_if.cpp)
fca_tests(SOURCES unordered/scary_tests.cpp)
fca_tests(SOURCES exception/constructor_exception_tests.cpp)
fca_tests(SOURCES exception/copy_exception_tests.cpp)
fca_tests(SOURCES exception/assign_exception_tests.cpp)
fca_tests(SOURCES exception/move_assign_exception_tests.cpp)
fca_tests(SOURCES exception/insert_exception_tests.cpp)
fca_tests(SOURCES exception/erase_exception_tests.cpp)
fca_tests(SOURCES exception/rehash_exception_tests.cpp)
fca_tests(SOURCES exception/swap_exception_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_SWAP_METHOD=2)
fca_tests(SOURCES exception/merge_exception_tests.cpp)
fca_tests(SOURCES exception/less_tests.cpp)
fca_tests(SOURCES unordered/narrow_cast_tests.cpp)
fca_tests(SOURCES unordered/compile_set.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_USE_MOVE NAME bmove_compile_set)
fca_tests(SOURCES unordered/compile_map.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_USE_MOVE NAME bmove_compile_map)
fca_tests(SOURCES unordered/copy_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_USE_MOVE NAME bmove_copy)
fca_tests(SOURCES unordered/move_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_USE_MOVE NAME bmove_move)
fca_tests(SOURCES unordered/assign_tests.cpp COMPILE_DEFINITIONS BOOST_UNORDERED_USE_MOVE NAME bmove_assign)
fca_tests(SOURCES quick.cpp)
fca_tests(TYPE compile-fail NAME insert_node_type_fail_map COMPILE_DEFINITIONS UNORDERED_TEST_MAP SOURCES unordered/insert_node_type_fail.cpp)
fca_tests(TYPE compile-fail NAME insert_node_type_fail_multimap COMPILE_DEFINITIONS UNORDERED_TEST_MULTIMAP SOURCES unordered/insert_node_type_fail.cpp)
fca_tests(TYPE compile-fail NAME insert_node_type_fail_set COMPILE_DEFINITIONS UNORDERED_TEST_SET SOURCES unordered/insert_node_type_fail.cpp)
fca_tests(TYPE compile-fail NAME insert_node_type_fail_multiset COMPILE_DEFINITIONS UNORDERED_TEST_MULTISET SOURCES unordered/insert_node_type_fail.cpp)
# FOA tests
foa_tests(SOURCES unordered/fwd_set_test.cpp)
foa_tests(SOURCES unordered/fwd_map_test.cpp)
foa_tests(SOURCES unordered/compile_set.cpp)
foa_tests(SOURCES unordered/compile_map.cpp)
foa_tests(SOURCES unordered/noexcept_tests.cpp)
foa_tests(SOURCES unordered/incomplete_test.cpp)
foa_tests(SOURCES unordered/simple_tests.cpp)
foa_tests(SOURCES unordered/equivalent_keys_tests.cpp)
foa_tests(SOURCES unordered/constructor_tests.cpp)
foa_tests(SOURCES unordered/copy_tests.cpp)
foa_tests(SOURCES unordered/move_tests.cpp)
foa_tests(SOURCES unordered/post_move_tests.cpp)
foa_tests(SOURCES unordered/assign_tests.cpp)
foa_tests(SOURCES unordered/insert_tests.cpp)
foa_tests(SOURCES unordered/insert_hint_tests.cpp)
foa_tests(SOURCES unordered/emplace_tests.cpp)
foa_tests(SOURCES unordered/erase_tests.cpp)
foa_tests(SOURCES unordered/merge_tests.cpp)
foa_tests(SOURCES unordered/find_tests.cpp)
foa_tests(SOURCES unordered/at_tests.cpp)
foa_tests(SOURCES unordered/load_factor_tests.cpp)
foa_tests(SOURCES unordered/rehash_tests.cpp)
foa_tests(SOURCES unordered/equality_tests.cpp)
foa_tests(SOURCES unordered/swap_tests.cpp)
foa_tests(SOURCES unordered/transparent_tests.cpp)
foa_tests(SOURCES unordered/reserve_tests.cpp)
foa_tests(SOURCES unordered/contains_tests.cpp)
foa_tests(SOURCES unordered/erase_if.cpp)
foa_tests(SOURCES unordered/scary_tests.cpp)
foa_tests(SOURCES unordered/init_type_insert_tests.cpp)
foa_tests(SOURCES unordered/max_load_tests.cpp)
foa_tests(SOURCES unordered/extract_tests.cpp)
foa_tests(SOURCES unordered/node_handle_tests.cpp)
foa_tests(SOURCES unordered/uses_allocator.cpp)
foa_tests(SOURCES unordered/link_test_1.cpp unordered/link_test_2.cpp )
foa_tests(SOURCES unordered/scoped_allocator.cpp)
foa_tests(SOURCES unordered/hash_is_avalanching_test.cpp)
foa_tests(SOURCES exception/constructor_exception_tests.cpp)
foa_tests(SOURCES exception/copy_exception_tests.cpp)
foa_tests(SOURCES exception/assign_exception_tests.cpp)
foa_tests(SOURCES exception/move_assign_exception_tests.cpp)
foa_tests(SOURCES exception/insert_exception_tests.cpp)
foa_tests(SOURCES exception/erase_exception_tests.cpp)
foa_tests(SOURCES exception/rehash_exception_tests.cpp)
foa_tests(SOURCES exception/swap_exception_tests.cpp)
foa_tests(SOURCES exception/merge_exception_tests.cpp)
# CFOA tests
cfoa_tests(SOURCES cfoa/latch_tests.cpp)
cfoa_tests(SOURCES cfoa/insert_tests.cpp)
cfoa_tests(SOURCES cfoa/erase_tests.cpp)
cfoa_tests(SOURCES cfoa/try_emplace_tests.cpp)
cfoa_tests(SOURCES cfoa/emplace_tests.cpp)
cfoa_tests(SOURCES cfoa/visit_tests.cpp)
cfoa_tests(SOURCES cfoa/constructor_tests.cpp)
cfoa_tests(SOURCES cfoa/assign_tests.cpp)
cfoa_tests(SOURCES cfoa/clear_tests.cpp)
cfoa_tests(SOURCES cfoa/swap_tests.cpp)
cfoa_tests(SOURCES cfoa/merge_tests.cpp)
cfoa_tests(SOURCES cfoa/rehash_tests.cpp)
cfoa_tests(SOURCES cfoa/equality_tests.cpp)
cfoa_tests(SOURCES cfoa/fwd_tests.cpp)
cfoa_tests(SOURCES cfoa/exception_insert_tests.cpp)
cfoa_tests(SOURCES cfoa/exception_erase_tests.cpp)
cfoa_tests(SOURCES cfoa/exception_constructor_tests.cpp)
cfoa_tests(SOURCES cfoa/exception_assign_tests.cpp)
cfoa_tests(SOURCES cfoa/exception_merge_tests.cpp)
endif()

View File

@ -104,50 +104,51 @@ import ../../config/checks/config : requires ;
CPP11 = [ requires cxx11_constexpr cxx11_noexcept cxx11_decltype cxx11_alignas ] ;
rule build_foa ( name )
local FOA_TESTS =
fwd_set_test
fwd_map_test
compile_set
compile_map
noexcept_tests
incomplete_test
simple_tests
equivalent_keys_tests
constructor_tests
copy_tests
move_tests
post_move_tests
assign_tests
insert_tests
insert_hint_tests
emplace_tests
erase_tests
merge_tests
find_tests
at_tests
load_factor_tests
rehash_tests
equality_tests
swap_tests
transparent_tests
reserve_tests
contains_tests
erase_if
scary_tests
init_type_insert_tests
max_load_tests
extract_tests
node_handle_tests
uses_allocator
;
for local test in $(FOA_TESTS)
{
run unordered/$(name).cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_$(name) ;
run unordered/$(test).cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_$(test) ;
}
build_foa fwd_set_test ;
build_foa fwd_map_test ;
build_foa compile_set ;
build_foa compile_map ;
build_foa noexcept_tests ;
run unordered/link_test_1.cpp unordered/link_test_2.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_link_test ;
build_foa incomplete_test ;
build_foa simple_tests ;
build_foa equivalent_keys_tests ;
build_foa constructor_tests ;
build_foa copy_tests ;
build_foa move_tests ;
build_foa post_move_tests ;
build_foa assign_tests ;
build_foa insert_tests ;
build_foa insert_hint_tests ;
build_foa emplace_tests ;
build_foa erase_tests ;
build_foa merge_tests ;
build_foa find_tests ;
build_foa at_tests ;
build_foa load_factor_tests ;
build_foa rehash_tests ;
build_foa equality_tests ;
build_foa swap_tests ;
run unordered/scoped_allocator.cpp : : : $(CPP11) <toolset>msvc-14.0:<build>no <define>BOOST_UNORDERED_FOA_TESTS : foa_scoped_allocator ;
build_foa transparent_tests ;
build_foa reserve_tests ;
build_foa contains_tests ;
build_foa erase_if ;
build_foa scary_tests ;
build_foa init_type_insert_tests ;
build_foa max_load_tests ;
build_foa extract_tests ;
build_foa node_handle_tests ;
build_foa uses_allocator ;
run unordered/hash_is_avalanching_test.cpp ;
run exception/constructor_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_constructor_exception_tests ;
run exception/copy_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_copy_exception_tests ;
run exception/assign_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_assign_exception_tests ;
@ -157,3 +158,51 @@ run exception/erase_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORD
run exception/rehash_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_rehash_exception_tests ;
run exception/swap_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_swap_exception_tests ;
run exception/merge_exception_tests.cpp : : : $(CPP11) <define>BOOST_UNORDERED_FOA_TESTS : foa_merge_exception_tests ;
alias foa_tests :
foa_$(FOA_TESTS)
foa_link_test
foa_scoped_allocator
hash_is_avalanching_test
foa_constructor_exception_tests
foa_copy_exception_tests
foa_assign_exception_tests
foa_move_assign_exception_tests
foa_insert_exception_tests
foa_erase_exception_tests
foa_rehash_exception_tests
foa_swap_exception_tests
foa_merge_exception_tests
;
local CFOA_TESTS =
latch_tests
insert_tests
erase_tests
try_emplace_tests
emplace_tests
visit_tests
constructor_tests
assign_tests
clear_tests
swap_tests
merge_tests
rehash_tests
equality_tests
fwd_tests
exception_insert_tests
exception_erase_tests
exception_constructor_tests
exception_assign_tests
exception_merge_tests
;
for local test in $(CFOA_TESTS)
{
run cfoa/$(test).cpp
: requirements $(CPP11) <threading>multi
: target-name cfoa_$(test)
;
}
alias cfoa_tests : cfoa_$(CFOA_TESTS) ;

865
test/cfoa/assign_tests.cpp Normal file
View File

@ -0,0 +1,865 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "helpers.hpp"
#include <boost/unordered/concurrent_flat_map.hpp>
#if defined(__clang__) && defined(__has_warning)
#if __has_warning("-Wself-assign-overloaded")
#pragma clang diagnostic ignored "-Wself-assign-overloaded"
#endif
#if __has_warning("-Wself-move")
#pragma clang diagnostic ignored "-Wself-move"
#endif
#endif /* defined(__clang__) && defined(__has_warning) */
#if defined(BOOST_GCC) && BOOST_GCC >= 130000
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wself-move"
#endif
test::seed_t initialize_seed{2762556623};
using test::default_generator;
using test::limited_range;
using test::sequential;
using hasher = stateful_hash;
using key_equal = stateful_key_equal;
using allocator_type = stateful_allocator<std::pair<raii const, raii> >;
using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher,
key_equal, allocator_type>;
using map_value_type = typename map_type::value_type;
template <class T> struct pocca_allocator
{
using propagate_on_container_copy_assignment = std::true_type;
int x_ = -1;
using value_type = T;
pocca_allocator() = default;
pocca_allocator(pocca_allocator const&) = default;
pocca_allocator(pocca_allocator&&) = default;
pocca_allocator(int const x) : x_{x} {}
pocca_allocator& operator=(pocca_allocator const& rhs)
{
if (this != &rhs) {
x_ = rhs.x_;
}
return *this;
}
template <class U> pocca_allocator(pocca_allocator<U> const& rhs) : x_{rhs.x_}
{
}
T* allocate(std::size_t n)
{
return static_cast<T*>(::operator new(n * sizeof(T)));
}
void deallocate(T* p, std::size_t) { ::operator delete(p); }
bool operator==(pocca_allocator const& rhs) const { return x_ == rhs.x_; }
bool operator!=(pocca_allocator const& rhs) const { return x_ != rhs.x_; }
};
template <class T> struct pocma_allocator
{
using propagate_on_container_move_assignment = std::true_type;
int x_ = -1;
using value_type = T;
pocma_allocator() = default;
pocma_allocator(pocma_allocator const&) = default;
pocma_allocator(pocma_allocator&&) = default;
pocma_allocator(int const x) : x_{x} {}
pocma_allocator& operator=(pocma_allocator const& rhs)
{
if (this != &rhs) {
x_ = rhs.x_;
}
return *this;
}
template <class U> pocma_allocator(pocma_allocator<U> const& rhs) : x_{rhs.x_}
{
}
T* allocate(std::size_t n)
{
return static_cast<T*>(::operator new(n * sizeof(T)));
}
void deallocate(T* p, std::size_t) { ::operator delete(p); }
bool operator==(pocma_allocator const& rhs) const { return x_ == rhs.x_; }
bool operator!=(pocma_allocator const& rhs) const { return x_ != rhs.x_; }
};
namespace {
template <class G> void copy_assign(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
// lhs empty, rhs empty
{
raii::reset_counts();
map_type x(0, hasher(1), key_equal(2), allocator_type(3));
thread_runner(values, [&x](boost::span<map_value_type> s) {
(void)s;
map_type y;
BOOST_TEST(x.empty());
BOOST_TEST(y.empty());
y = x;
BOOST_TEST_EQ(x.hash_function(), y.hash_function());
BOOST_TEST_EQ(x.key_eq(), y.key_eq());
BOOST_TEST(x.get_allocator() != y.get_allocator());
});
BOOST_TEST_EQ(raii::destructor, 0u);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::copy_constructor, 0u);
}
// lhs non-empty, rhs empty
{
raii::reset_counts();
map_type x(0, hasher(1), key_equal(2), allocator_type(3));
auto const old_size = reference_map.size();
thread_runner(values, [&x, &values](boost::span<map_value_type> s) {
(void)s;
map_type y(values.size());
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(x.empty());
BOOST_TEST(!y.empty());
y = x;
BOOST_TEST_EQ(x.hash_function(), y.hash_function());
BOOST_TEST_EQ(x.key_eq(), y.key_eq());
BOOST_TEST(x.get_allocator() != y.get_allocator());
BOOST_TEST(y.empty());
});
BOOST_TEST_EQ(raii::destructor, num_threads * (2 * old_size));
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(
raii::copy_constructor, num_threads * 2 * reference_map.size());
}
check_raii_counts();
// lhs empty, rhs non-empty
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_cc = +raii::copy_constructor;
thread_runner(
values, [&x, &reference_map](boost::span<map_value_type> s) {
(void)s;
map_type y;
BOOST_TEST(!x.empty());
BOOST_TEST(y.empty());
y = x;
BOOST_TEST_EQ(x.hash_function(), y.hash_function());
BOOST_TEST_EQ(x.key_eq(), y.key_eq());
BOOST_TEST(x.get_allocator() != y.get_allocator());
test_matches_reference(y, reference_map);
});
BOOST_TEST_EQ(raii::destructor, num_threads * 2 * x.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(
raii::copy_constructor, old_cc + (num_threads * 2 * x.size()));
}
check_raii_counts();
// lhs non-empty, rhs non-empty
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_size = x.size();
auto const old_cc = +raii::copy_constructor;
thread_runner(values, [&x, &values](boost::span<map_value_type> s) {
(void)s;
map_type y(values.size());
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(!x.empty());
BOOST_TEST(!y.empty());
y = x;
BOOST_TEST_EQ(x.hash_function(), y.hash_function());
BOOST_TEST_EQ(x.key_eq(), y.key_eq());
BOOST_TEST(x.get_allocator() != y.get_allocator());
});
BOOST_TEST_EQ(raii::destructor, 2 * num_threads * 2 * old_size);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(
raii::copy_constructor, old_cc + (2 * num_threads * 2 * x.size()));
}
check_raii_counts();
// self-assign
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_cc = +raii::copy_constructor;
thread_runner(
values, [&x, &reference_map](boost::span<map_value_type> s) {
(void)s;
BOOST_TEST(!x.empty());
x = x;
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
test_matches_reference(x, reference_map);
});
BOOST_TEST_EQ(raii::destructor, 0u);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::copy_constructor, old_cc);
}
check_raii_counts();
// propagation
{
using pocca_allocator_type =
pocca_allocator<std::pair<const raii, raii> >;
using pocca_map_type = boost::unordered::concurrent_flat_map<raii, raii,
hasher, key_equal, pocca_allocator_type>;
raii::reset_counts();
pocca_map_type x(
values.size(), hasher(1), key_equal(2), pocca_allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_size = x.size();
auto const old_cc = +raii::copy_constructor;
thread_runner(values, [&x, &values](boost::span<map_value_type> s) {
(void)s;
pocca_map_type y(values.size());
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(!x.empty());
BOOST_TEST(!y.empty());
BOOST_TEST(x.get_allocator() != y.get_allocator());
y = x;
BOOST_TEST_EQ(x.hash_function(), y.hash_function());
BOOST_TEST_EQ(x.key_eq(), y.key_eq());
BOOST_TEST(x.get_allocator() == y.get_allocator());
});
BOOST_TEST_EQ(raii::destructor, 2 * num_threads * 2 * old_size);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(
raii::copy_constructor, old_cc + (2 * num_threads * 2 * x.size()));
}
check_raii_counts();
}
template <class G> void move_assign(G gen, test::random_generator rg)
{
using pocma_allocator_type = pocma_allocator<std::pair<const raii, raii> >;
using pocma_map_type = boost::unordered::concurrent_flat_map<raii, raii,
hasher, key_equal, pocma_allocator_type>;
BOOST_STATIC_ASSERT(
std::is_nothrow_move_assignable<boost::unordered::concurrent_flat_map<int,
int, std::hash<int>, std::equal_to<int>,
std::allocator<std::pair<int const, int> > > >::value);
BOOST_STATIC_ASSERT(
std::is_nothrow_move_assignable<boost::unordered::concurrent_flat_map<int,
int, std::hash<int>, std::equal_to<int>,
pocma_allocator<std::pair<int const, int> > > >::value);
BOOST_STATIC_ASSERT(
!std::is_nothrow_move_assignable<boost::unordered::concurrent_flat_map<
int, int, std::hash<int>, std::equal_to<int>,
stateful_allocator<std::pair<int const, int> > > >::value);
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
// move assignment has more complex requirements than copying
// equal allocators:
// lhs empty, rhs non-empty
// lhs non-empty, rhs empty
// lhs non-empty, rhs non-empty
//
// unequal allocators:
// lhs non-empty, rhs non-empty
//
// pocma
// self move-assign
// lhs empty, rhs empty
{
raii::reset_counts();
map_type x(0, hasher(1), key_equal(2), allocator_type(3));
std::atomic<unsigned> num_transfers{0};
thread_runner(
values, [&x, &num_transfers](boost::span<map_value_type> s) {
(void)s;
map_type y(0, hasher(2), key_equal(1), allocator_type(3));
BOOST_TEST(x.empty());
BOOST_TEST(y.empty());
BOOST_TEST(x.get_allocator() == y.get_allocator());
y = std::move(x);
if (y.hash_function() == hasher(1)) {
++num_transfers;
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.hash_function(), hasher(2));
BOOST_TEST_EQ(y.key_eq(), key_equal(1));
}
BOOST_TEST_EQ(x.hash_function(), hasher(2));
BOOST_TEST_EQ(x.key_eq(), key_equal(1));
BOOST_TEST(x.get_allocator() == y.get_allocator());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(raii::destructor, 0u);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::copy_constructor, 0u);
}
// lhs non-empty, rhs empty
{
raii::reset_counts();
map_type x(0, hasher(1), key_equal(2), allocator_type(3));
std::atomic<unsigned> num_transfers{0};
thread_runner(
values, [&x, &values, &num_transfers](boost::span<map_value_type> s) {
(void)s;
map_type y(values.size(), hasher(2), key_equal(1), allocator_type(3));
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(x.empty());
BOOST_TEST(!y.empty());
BOOST_TEST(x.get_allocator() == y.get_allocator());
y = std::move(x);
if (y.hash_function() == hasher(1)) {
++num_transfers;
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.hash_function(), hasher(2));
BOOST_TEST_EQ(y.key_eq(), key_equal(1));
}
BOOST_TEST_EQ(x.hash_function(), hasher(2));
BOOST_TEST_EQ(x.key_eq(), key_equal(1));
BOOST_TEST(x.get_allocator() == y.get_allocator());
BOOST_TEST(y.empty());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(raii::destructor, num_threads * 2 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(
raii::copy_constructor, num_threads * 2 * reference_map.size());
}
check_raii_counts();
// lhs empty, rhs non-empty
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_cc = +raii::copy_constructor;
auto const old_mc = +raii::move_constructor;
std::atomic<unsigned> num_transfers{0};
thread_runner(values,
[&x, &reference_map, &num_transfers](boost::span<map_value_type> s) {
(void)s;
map_type y(allocator_type(3));
BOOST_TEST(y.empty());
BOOST_TEST(x.get_allocator() == y.get_allocator());
y = std::move(x);
if (!y.empty()) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.hash_function(), hasher(1));
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.hash_function(), hasher());
BOOST_TEST_EQ(y.key_eq(), key_equal());
}
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == y.get_allocator());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(raii::destructor, 2 * reference_map.size());
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::copy_constructor, old_cc);
BOOST_TEST_EQ(raii::move_constructor, old_mc);
}
check_raii_counts();
// lhs non-empty, rhs non-empty
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_size = x.size();
auto const old_cc = +raii::copy_constructor;
auto const old_mc = +raii::move_constructor;
std::atomic<unsigned> num_transfers{0};
thread_runner(values, [&x, &values, &num_transfers, &reference_map](
boost::span<map_value_type> s) {
(void)s;
map_type y(values.size(), hasher(2), key_equal(1), allocator_type(3));
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(!y.empty());
BOOST_TEST(x.get_allocator() == y.get_allocator());
y = std::move(x);
if (y.hash_function() == hasher(1)) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.hash_function(), hasher(2));
BOOST_TEST_EQ(y.key_eq(), key_equal(1));
}
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(2));
BOOST_TEST_EQ(x.key_eq(), key_equal(1));
BOOST_TEST(x.get_allocator() == y.get_allocator());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(
raii::destructor, 2 * old_size + num_threads * 2 * old_size);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::move_constructor, old_mc);
BOOST_TEST_EQ(raii::copy_constructor,
old_cc + (num_threads * 2 * reference_map.size()));
}
check_raii_counts();
// lhs non-empty, rhs non-empty, unequal allocators, no propagation
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_size = x.size();
auto const old_cc = +raii::copy_constructor;
auto const old_mc = +raii::move_constructor;
std::atomic<unsigned> num_transfers{0};
thread_runner(values, [&x, &values, &num_transfers, &reference_map](
boost::span<map_value_type> s) {
(void)s;
map_type y(values.size(), hasher(2), key_equal(1), allocator_type(13));
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(
!boost::allocator_is_always_equal<allocator_type>::type::value);
BOOST_TEST(!boost::allocator_propagate_on_container_move_assignment<
allocator_type>::type::value);
BOOST_TEST(!y.empty());
BOOST_TEST(x.get_allocator() != y.get_allocator());
y = std::move(x);
if (y.hash_function() == hasher(1)) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.hash_function(), hasher(2));
BOOST_TEST_EQ(y.key_eq(), key_equal(1));
}
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(2));
BOOST_TEST_EQ(x.key_eq(), key_equal(1));
BOOST_TEST(x.get_allocator() != y.get_allocator());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(
raii::destructor, 2 * 2 * old_size + num_threads * 2 * old_size);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::move_constructor, old_mc + 2 * old_size);
BOOST_TEST_EQ(raii::copy_constructor,
old_cc + (num_threads * 2 * reference_map.size()));
}
check_raii_counts();
// lhs non-empty, rhs non-empty, pocma
{
raii::reset_counts();
pocma_map_type x(
values.size(), hasher(1), key_equal(2), pocma_allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_size = x.size();
auto const old_cc = +raii::copy_constructor;
auto const old_mc = +raii::move_constructor;
std::atomic<unsigned> num_transfers{0};
thread_runner(values, [&x, &values, &num_transfers, &reference_map](
boost::span<map_value_type> s) {
(void)s;
pocma_map_type y(
values.size(), hasher(2), key_equal(1), pocma_allocator_type(13));
for (auto const& v : values) {
y.insert(v);
}
BOOST_TEST(!y.empty());
BOOST_TEST(x.get_allocator() != y.get_allocator());
y = std::move(x);
if (y.hash_function() == hasher(1)) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.hash_function(), hasher(2));
BOOST_TEST_EQ(y.key_eq(), key_equal(1));
}
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(2));
BOOST_TEST_EQ(x.key_eq(), key_equal(1));
BOOST_TEST(x.get_allocator() == y.get_allocator());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(
raii::destructor, 2 * old_size + num_threads * 2 * old_size);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::move_constructor, old_mc);
BOOST_TEST_EQ(raii::copy_constructor,
old_cc + (num_threads * 2 * reference_map.size()));
}
check_raii_counts();
// self-assign
{
raii::reset_counts();
map_type x(values.size(), hasher(1), key_equal(2), allocator_type(3));
for (auto const& v : values) {
x.insert(v);
}
auto const old_cc = +raii::copy_constructor;
auto const old_mc = +raii::move_constructor;
thread_runner(
values, [&x, &reference_map](boost::span<map_value_type> s) {
(void)s;
x = std::move(x);
BOOST_TEST(!x.empty());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
test_matches_reference(x, reference_map);
});
BOOST_TEST_EQ(raii::destructor, 0u);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
BOOST_TEST_EQ(raii::move_constructor, old_mc);
BOOST_TEST_EQ(raii::copy_constructor, old_cc);
}
check_raii_counts();
}
UNORDERED_AUTO_TEST (initializer_list_assignment) {
std::initializer_list<map_value_type> values{
map_value_type{raii{0}, raii{0}},
map_value_type{raii{1}, raii{1}},
map_value_type{raii{2}, raii{2}},
map_value_type{raii{3}, raii{3}},
map_value_type{raii{4}, raii{4}},
map_value_type{raii{5}, raii{5}},
map_value_type{raii{6}, raii{6}},
map_value_type{raii{6}, raii{6}},
map_value_type{raii{7}, raii{7}},
map_value_type{raii{8}, raii{8}},
map_value_type{raii{9}, raii{9}},
map_value_type{raii{10}, raii{10}},
map_value_type{raii{9}, raii{9}},
map_value_type{raii{8}, raii{8}},
map_value_type{raii{7}, raii{7}},
map_value_type{raii{6}, raii{6}},
map_value_type{raii{5}, raii{5}},
map_value_type{raii{4}, raii{4}},
map_value_type{raii{3}, raii{3}},
map_value_type{raii{2}, raii{2}},
map_value_type{raii{1}, raii{1}},
map_value_type{raii{0}, raii{0}},
};
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
auto v = std::vector<map_value_type>(values.begin(), values.end());
{
raii::reset_counts();
map_type x(0, hasher(1), key_equal(2), allocator_type(3));
thread_runner(v, [&x, &values](boost::span<map_value_type> s) {
(void)s;
x = values;
});
test_matches_reference(x, reference_map);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST_EQ(raii::copy_constructor, num_threads * 2 * x.size());
BOOST_TEST_EQ(raii::destructor, (num_threads - 1) * 2 * x.size());
BOOST_TEST_EQ(raii::move_constructor, 0u);
BOOST_TEST_EQ(raii::copy_assignment, 0u);
BOOST_TEST_EQ(raii::move_assignment, 0u);
}
check_raii_counts();
}
template <class G> void insert_and_assign(G gen, test::random_generator rg)
{
std::thread t1, t2, t3;
boost::latch start_latch(2), end_latch(2);
auto v1 = make_random_values(1024 * 16, [&] { return gen(rg); });
auto v2 = v1;
shuffle_values(v2);
auto reference_map =
boost::unordered_flat_map<raii, raii>(v1.begin(), v1.end());
raii::reset_counts();
{
map_type map1(v1.size(), hasher(1), key_equal(2), allocator_type(3));
map_type map2(v2.size(), hasher(1), key_equal(2), allocator_type(3));
t1 = std::thread([&v1, &map1, &start_latch, &end_latch] {
start_latch.arrive_and_wait();
for (auto const& v : v1) {
map1.insert(v);
}
end_latch.arrive_and_wait();
});
t2 = std::thread([&v2, &map2, &end_latch, &start_latch] {
start_latch.arrive_and_wait();
for (auto const& v : v2) {
map2.insert(v);
}
end_latch.arrive_and_wait();
});
std::atomic<unsigned> num_assignments{0};
t3 = std::thread([&map1, &map2, &end_latch, &num_assignments] {
while (map1.empty() && map2.empty()) {
std::this_thread::sleep_for(std::chrono::microseconds(10));
}
do {
map1 = map2;
std::this_thread::sleep_for(std::chrono::milliseconds(100));
map2 = map1;
std::this_thread::sleep_for(std::chrono::milliseconds(100));
++num_assignments;
} while (!end_latch.try_wait());
});
t1.join();
t2.join();
t3.join();
BOOST_TEST_GT(num_assignments, 0u);
test_fuzzy_matches_reference(map1, reference_map, rg);
test_fuzzy_matches_reference(map2, reference_map, rg);
}
check_raii_counts();
}
} // namespace
// clang-format off
UNORDERED_TEST(
copy_assign,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
move_assign,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
insert_and_assign,
((init_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on
RUN_TESTS()

126
test/cfoa/clear_tests.cpp Normal file
View File

@ -0,0 +1,126 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "helpers.hpp"
#include <boost/unordered/concurrent_flat_map.hpp>
test::seed_t initialize_seed{674140082};
using test::default_generator;
using test::limited_range;
using test::sequential;
using hasher = stateful_hash;
using key_equal = stateful_key_equal;
using allocator_type = stateful_allocator<std::pair<raii const, raii> >;
using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher,
key_equal, allocator_type>;
using map_value_type = typename map_type::value_type;
namespace {
template <class G> void clear_tests(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
raii::reset_counts();
map_type x(values.begin(), values.end(), values.size(), hasher(1),
key_equal(2), allocator_type(3));
auto const old_size = x.size();
auto const old_d = +raii::destructor;
thread_runner(values, [&x](boost::span<map_value_type> s) {
(void)s;
x.clear();
});
BOOST_TEST(x.empty());
BOOST_TEST_EQ(raii::destructor, old_d + 2 * old_size);
check_raii_counts();
}
template <class G> void insert_and_clear(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
std::thread t1, t2;
{
map_type x(0, hasher(1), key_equal(2), allocator_type(3));
std::mutex m;
std::condition_variable cv;
std::atomic<bool> done{false};
std::atomic<unsigned> num_clears{0};
bool ready = false;
t1 = std::thread([&x, &values, &cv, &done, &m, &ready] {
for (auto i = 0u; i < values.size(); ++i) {
x.insert(values[i]);
if (i % (values.size() / 128) == 0) {
{
std::unique_lock<std::mutex> lk(m);
ready = true;
}
cv.notify_all();
}
}
done = true;
{
std::unique_lock<std::mutex> lk(m);
ready = true;
}
cv.notify_all();
});
t2 = std::thread([&x, &m, &cv, &done, &ready, &num_clears] {
do {
{
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [&ready] { return ready; });
ready = false;
}
x.clear();
++num_clears;
} while (!done);
});
t1.join();
t2.join();
BOOST_TEST_GE(num_clears, 1u);
if (!x.empty()) {
test_fuzzy_matches_reference(x, reference_map, rg);
}
}
check_raii_counts();
}
} // namespace
// clang-format off
UNORDERED_TEST(
clear_tests,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(insert_and_clear,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on
RUN_TESTS()

View File

@ -0,0 +1,823 @@
// Copyright (C) 2023 Christian Mazakas
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include "helpers.hpp"
#include <boost/unordered/concurrent_flat_map.hpp>
test::seed_t initialize_seed(4122023);
using test::default_generator;
using test::limited_range;
using test::sequential;
template <class T> struct soccc_allocator
{
int x_ = -1;
using value_type = T;
soccc_allocator() = default;
soccc_allocator(soccc_allocator const&) = default;
soccc_allocator(soccc_allocator&&) = default;
soccc_allocator(int const x) : x_{x} {}
template <class U> soccc_allocator(soccc_allocator<U> const& rhs) : x_{rhs.x_}
{
}
T* allocate(std::size_t n)
{
return static_cast<T*>(::operator new(n * sizeof(T)));
}
void deallocate(T* p, std::size_t) { ::operator delete(p); }
soccc_allocator select_on_container_copy_construction() const
{
return {x_ + 1};
}
bool operator==(soccc_allocator const& rhs) const { return x_ == rhs.x_; }
bool operator!=(soccc_allocator const& rhs) const { return x_ != rhs.x_; }
};
using hasher = stateful_hash;
using key_equal = stateful_key_equal;
using allocator_type = stateful_allocator<std::pair<raii const, raii> >;
using map_type = boost::unordered::concurrent_flat_map<raii, raii, hasher,
key_equal, allocator_type>;
using map_value_type = typename map_type::value_type;
UNORDERED_AUTO_TEST (default_constructor) {
boost::unordered::concurrent_flat_map<raii, raii> x;
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.size(), 0u);
}
UNORDERED_AUTO_TEST (bucket_count_with_hasher_key_equal_and_allocator) {
raii::reset_counts();
{
map_type x(0);
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
}
{
map_type x(0, hasher(1));
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal());
}
{
map_type x(0, hasher(1), key_equal(2));
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
}
{
map_type x(0, hasher(1), key_equal(2), allocator_type{});
BOOST_TEST(x.empty());
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type{});
}
}
UNORDERED_AUTO_TEST (soccc) {
raii::reset_counts();
boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal,
soccc_allocator<std::pair<raii const, raii> > >
x;
boost::unordered::concurrent_flat_map<raii, raii, hasher, key_equal,
soccc_allocator<std::pair<raii const, raii> > >
y(x);
BOOST_TEST_EQ(y.hash_function(), x.hash_function());
BOOST_TEST_EQ(y.key_eq(), x.key_eq());
BOOST_TEST(y.get_allocator() != x.get_allocator());
}
namespace {
template <class G> void from_iterator_range(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
{
map_type x(values.begin(), values.end());
test_matches_reference(x, reference_map);
BOOST_TEST_GT(x.size(), 0u);
BOOST_TEST_LE(x.size(), values.size());
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type{});
if (rg == sequential) {
BOOST_TEST_EQ(x.size(), values.size());
}
}
{
map_type x(values.begin(), values.end(), 0);
test_matches_reference(x, reference_map);
BOOST_TEST_GT(x.size(), 0u);
BOOST_TEST_LE(x.size(), values.size());
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type{});
if (rg == sequential) {
BOOST_TEST_EQ(x.size(), values.size());
}
}
{
map_type x(values.begin(), values.end(), 0, hasher(1));
test_matches_reference(x, reference_map);
BOOST_TEST_GT(x.size(), 0u);
BOOST_TEST_LE(x.size(), values.size());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type{});
if (rg == sequential) {
BOOST_TEST_EQ(x.size(), values.size());
}
}
{
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2));
test_matches_reference(x, reference_map);
BOOST_TEST_GT(x.size(), 0u);
BOOST_TEST_LE(x.size(), values.size());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type{});
if (rg == sequential) {
BOOST_TEST_EQ(x.size(), values.size());
}
}
{
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2),
allocator_type{});
test_matches_reference(x, reference_map);
BOOST_TEST_GT(x.size(), 0u);
BOOST_TEST_LE(x.size(), values.size());
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type{});
if (rg == sequential) {
BOOST_TEST_EQ(x.size(), values.size());
}
}
check_raii_counts();
}
template <class G> void copy_constructor(G gen, test::random_generator rg)
{
{
map_type x(0, hasher(1), key_equal(2), allocator_type{});
map_type y(x);
BOOST_TEST_EQ(y.size(), x.size());
BOOST_TEST_EQ(y.hash_function(), x.hash_function());
BOOST_TEST_EQ(y.key_eq(), x.key_eq());
BOOST_TEST(y.get_allocator() == x.get_allocator());
}
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
{
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2),
allocator_type{});
thread_runner(
values, [&x, &reference_map](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
map_type y(x);
test_matches_reference(x, reference_map);
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.size(), x.size());
BOOST_TEST_EQ(y.hash_function(), x.hash_function());
BOOST_TEST_EQ(y.key_eq(), x.key_eq());
BOOST_TEST(y.get_allocator() == x.get_allocator());
});
}
check_raii_counts();
raii::reset_counts();
{
allocator_type a;
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2), a);
thread_runner(
values, [&x, &reference_map, a](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
map_type y(x, a);
test_matches_reference(x, reference_map);
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.size(), x.size());
BOOST_TEST_EQ(y.hash_function(), x.hash_function());
BOOST_TEST_EQ(y.key_eq(), x.key_eq());
BOOST_TEST(y.get_allocator() == x.get_allocator());
});
}
check_raii_counts();
}
template <class G>
void copy_constructor_with_insertion(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
std::mutex m;
std::condition_variable cv;
bool ready = false;
{
map_type x(0, hasher(1), key_equal(2), allocator_type{});
auto f = [&x, &values, &m, &cv, &ready] {
{
std::lock_guard<std::mutex> guard(m);
ready = true;
}
cv.notify_all();
for (auto const& val : values) {
x.insert(val);
}
};
std::thread t1(f);
std::thread t2(f);
thread_runner(
values, [&x, &reference_map, &values, rg, &m, &cv, &ready](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
{
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [&] { return ready; });
}
map_type y(x);
BOOST_TEST_LE(y.size(), values.size());
BOOST_TEST_EQ(y.hash_function(), x.hash_function());
BOOST_TEST_EQ(y.key_eq(), x.key_eq());
BOOST_TEST(y.get_allocator() == x.get_allocator());
x.visit_all([&reference_map, rg](
typename map_type::value_type const& val) {
BOOST_TEST(reference_map.contains(val.first));
if (rg == sequential) {
BOOST_TEST_EQ(val.second, reference_map.find(val.first)->second);
}
});
});
t1.join();
t2.join();
}
check_raii_counts();
}
template <class G> void move_constructor(G gen, test::random_generator rg)
{
{
map_type x(0, hasher(1), key_equal(2), allocator_type{});
auto const old_size = x.size();
map_type y(std::move(x));
BOOST_TEST_EQ(y.size(), old_size);
BOOST_TEST_EQ(y.hash_function(), hasher(1));
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(y.get_allocator() == x.get_allocator());
}
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
{
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2),
allocator_type{});
std::atomic_uint num_transfers{0};
auto const old_mc = +raii::move_constructor;
thread_runner(
values, [&x, &reference_map, &num_transfers](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
auto const old_size = x.size();
map_type y(std::move(x));
if (!y.empty()) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.size(), old_size);
BOOST_TEST_EQ(y.hash_function(), hasher(1));
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.size(), 0u);
BOOST_TEST_EQ(y.hash_function(), hasher());
BOOST_TEST_EQ(y.key_eq(), key_equal());
}
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(y.get_allocator() == x.get_allocator());
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(raii::move_constructor, old_mc);
}
check_raii_counts();
// allocator-aware move constructor, unequal allocators
raii::reset_counts();
{
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2),
allocator_type{1});
std::atomic_uint num_transfers{0};
auto const old_mc = +raii::move_constructor;
auto const old_size = x.size();
thread_runner(
values, [&x, &reference_map, &num_transfers, old_size](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
auto a = allocator_type{2};
BOOST_TEST(a != x.get_allocator());
map_type y(std::move(x), a);
if (!y.empty()) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.size(), old_size);
BOOST_TEST_EQ(y.hash_function(), hasher(1));
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.size(), 0u);
BOOST_TEST_EQ(y.hash_function(), hasher());
BOOST_TEST_EQ(y.key_eq(), key_equal());
}
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(y.get_allocator() != x.get_allocator());
BOOST_TEST(y.get_allocator() == a);
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(raii::move_constructor, old_mc + (2 * old_size));
}
check_raii_counts();
// allocator-aware move constructor, equal allocators
raii::reset_counts();
{
map_type x(values.begin(), values.end(), 0, hasher(1), key_equal(2),
allocator_type{1});
std::atomic_uint num_transfers{0};
auto const old_mc = +raii::move_constructor;
auto const old_size = x.size();
thread_runner(
values, [&x, &reference_map, &num_transfers, old_size](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
auto a = allocator_type{1};
BOOST_TEST(a == x.get_allocator());
map_type y(std::move(x), a);
if (!y.empty()) {
++num_transfers;
test_matches_reference(y, reference_map);
BOOST_TEST_EQ(y.size(), old_size);
BOOST_TEST_EQ(y.hash_function(), hasher(1));
BOOST_TEST_EQ(y.key_eq(), key_equal(2));
} else {
BOOST_TEST_EQ(y.size(), 0u);
BOOST_TEST_EQ(y.hash_function(), hasher());
BOOST_TEST_EQ(y.key_eq(), key_equal());
}
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(y.get_allocator() == x.get_allocator());
BOOST_TEST(y.get_allocator() == a);
});
BOOST_TEST_EQ(num_transfers, 1u);
BOOST_TEST_EQ(raii::move_constructor, old_mc);
}
check_raii_counts();
}
template <class G>
void move_constructor_with_insertion(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
std::mutex m;
std::condition_variable cv;
bool ready = false;
{
map_type x(0, hasher(1), key_equal(2), allocator_type{});
std::atomic_uint num_transfers{0};
std::thread t1([&x, &values] {
for (auto const& val : values) {
x.insert(val);
}
});
std::thread t2([&x, &m, &cv, &ready] {
while (x.empty()) {
std::this_thread::yield();
}
{
std::lock_guard<std::mutex> guard(m);
ready = true;
}
cv.notify_all();
});
thread_runner(
values, [&x, &reference_map, &num_transfers, rg, &m, &ready, &cv](
boost::span<span_value_type<decltype(values)> > s) {
(void)s;
{
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, [&] { return ready; });
}
map_type y(std::move(x));
if (!y.empty()) {
++num_transfers;
y.cvisit_all([&reference_map, rg](map_value_type const& val) {
BOOST_TEST(reference_map.contains(val.first));
if (rg == sequential) {
BOOST_TEST_EQ(
val.second, reference_map.find(val.first)->second);
}
});
}
});
t1.join();
t2.join();
BOOST_TEST_GE(num_transfers, 1u);
}
check_raii_counts();
}
template <class G>
void iterator_range_with_allocator(G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
{
allocator_type a;
map_type x(values.begin(), values.end(), a);
BOOST_TEST_GT(x.size(), 0u);
BOOST_TEST_LE(x.size(), values.size());
if (rg == sequential) {
BOOST_TEST_EQ(x.size(), values.size());
}
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == a);
test_fuzzy_matches_reference(x, reference_map, rg);
}
check_raii_counts();
}
UNORDERED_AUTO_TEST (explicit_allocator) {
raii::reset_counts();
{
allocator_type a;
map_type x(a);
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == a);
}
}
UNORDERED_AUTO_TEST (initializer_list_with_all_params) {
// hard-code 11 unique values
std::initializer_list<map_value_type> ilist{
map_value_type{raii{0}, raii{0}},
map_value_type{raii{1}, raii{1}},
map_value_type{raii{2}, raii{2}},
map_value_type{raii{3}, raii{3}},
map_value_type{raii{4}, raii{4}},
map_value_type{raii{5}, raii{5}},
map_value_type{raii{6}, raii{6}},
map_value_type{raii{6}, raii{6}},
map_value_type{raii{7}, raii{7}},
map_value_type{raii{8}, raii{8}},
map_value_type{raii{9}, raii{9}},
map_value_type{raii{10}, raii{10}},
map_value_type{raii{9}, raii{9}},
map_value_type{raii{8}, raii{8}},
map_value_type{raii{7}, raii{7}},
map_value_type{raii{6}, raii{6}},
map_value_type{raii{5}, raii{5}},
map_value_type{raii{4}, raii{4}},
map_value_type{raii{3}, raii{3}},
map_value_type{raii{2}, raii{2}},
map_value_type{raii{1}, raii{1}},
map_value_type{raii{0}, raii{0}},
};
{
raii::reset_counts();
map_type x(ilist, 0, hasher(1), key_equal(2), allocator_type(3));
BOOST_TEST_EQ(x.size(), 11u);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal(2));
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST_EQ(raii::default_constructor, 0u);
BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * 11u);
}
check_raii_counts();
{
raii::reset_counts();
map_type x(ilist, allocator_type(3));
BOOST_TEST_EQ(x.size(), 11u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST_EQ(raii::default_constructor, 0u);
BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * 11u);
}
check_raii_counts();
{
raii::reset_counts();
map_type x(ilist, 0, allocator_type(3));
BOOST_TEST_EQ(x.size(), 11u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST_EQ(raii::default_constructor, 0u);
BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * 11u);
}
check_raii_counts();
{
raii::reset_counts();
map_type x(ilist, 0, hasher(1), allocator_type(3));
BOOST_TEST_EQ(x.size(), 11u);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type(3));
BOOST_TEST_EQ(raii::default_constructor, 0u);
BOOST_TEST_EQ(raii::copy_constructor, 2 * ilist.size());
BOOST_TEST_EQ(raii::move_constructor, 2 * 11u);
}
check_raii_counts();
}
UNORDERED_AUTO_TEST (bucket_count_and_allocator) {
raii::reset_counts();
{
map_type x(0, allocator_type(3));
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type(3));
}
{
map_type x(4096, allocator_type(3));
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type(3));
}
}
UNORDERED_AUTO_TEST (bucket_count_with_hasher_and_allocator) {
raii::reset_counts();
{
map_type x(0, hasher(1), allocator_type(3));
BOOST_TEST_EQ(x.size(), 0u);
BOOST_TEST_EQ(x.hash_function(), hasher(1));
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == allocator_type(3));
}
}
template <class G>
void iterator_range_with_bucket_count_and_allocator(
G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
{
allocator_type a(3);
map_type x(values.begin(), values.end(), 0, a);
test_fuzzy_matches_reference(x, reference_map, rg);
BOOST_TEST_EQ(x.hash_function(), hasher());
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == a);
}
check_raii_counts();
}
template <class G>
void iterator_range_with_bucket_count_hasher_and_allocator(
G gen, test::random_generator rg)
{
auto values = make_random_values(1024 * 16, [&] { return gen(rg); });
auto reference_map =
boost::unordered_flat_map<raii, raii>(values.begin(), values.end());
raii::reset_counts();
{
allocator_type a(3);
hasher hf(1);
map_type x(values.begin(), values.end(), 0, hf, a);
test_fuzzy_matches_reference(x, reference_map, rg);
BOOST_TEST_EQ(x.hash_function(), hf);
BOOST_TEST_EQ(x.key_eq(), key_equal());
BOOST_TEST(x.get_allocator() == a);
}
check_raii_counts();
}
} // namespace
// clang-format off
UNORDERED_TEST(
from_iterator_range,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
copy_constructor,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
copy_constructor_with_insertion,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
move_constructor,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
move_constructor_with_insertion,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
iterator_range_with_allocator,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
iterator_range_with_bucket_count_and_allocator,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
UNORDERED_TEST(
iterator_range_with_bucket_count_hasher_and_allocator,
((value_type_generator))
((default_generator)(sequential)(limited_range)))
// clang-format on
RUN_TESTS()

Some files were not shown because too many files have changed in this diff Show More