mirror of
https://github.com/boostorg/unordered.git
synced 2025-07-31 11:57:15 +02:00
Add the build and documentation source files required for building the
standalone unordered containers. [SVN r4046]
This commit is contained in:
14
Jamfile.v2
Normal file
14
Jamfile.v2
Normal file
@ -0,0 +1,14 @@
|
||||
# (C) Copyright Daniel James 2005-2006.
|
||||
# Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
project /unordered
|
||||
: requirements
|
||||
<include>.
|
||||
<include>$(BOOST_ROOT)
|
||||
: usage-requirements
|
||||
<include>.
|
||||
<include>$(BOOST_ROOT)
|
||||
;
|
||||
|
||||
#use-project /boost/test : $(BOOST_ROOT)/libs/test/build ;
|
@ -1,9 +1,15 @@
|
||||
project boost/doc ;
|
||||
import boostbook : boostbook ;
|
||||
|
||||
# Copyright 2005 Daniel James.
|
||||
# Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
boostbook doc : src/boost.xml
|
||||
:
|
||||
|
||||
using quickbook ;
|
||||
## Build the various generated docs (Doxygen and QuickBook)...
|
||||
|
||||
xml unordered : unordered.qbk ;
|
||||
boostbook standalone : unordered ;
|
||||
<dependency>../libs/unordered/doc//unordered
|
||||
<implicit-dependency>../libs/unordered/doc//unordered
|
||||
#<dependency>../libs/functional/hash/doc//hash
|
||||
#<implicit-dependency>../libs/functional/hash/doc//hash
|
||||
|
||||
<xsl:param>boost.libraries=../../libs/libraries.htm
|
||||
;
|
||||
|
147
doc/buckets.qbk
147
doc/buckets.qbk
@ -1,147 +0,0 @@
|
||||
[/ Copyright 2006-2007 Daniel James.
|
||||
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ]
|
||||
|
||||
[section:buckets The Data Structure]
|
||||
|
||||
The containers are made up of a number of 'buckets', each of which can contain
|
||||
any number of elements. For example, the following diagram shows an [classref
|
||||
boost::unordered_set unordered_set] with 7 buckets containing 5 elements, `A`,
|
||||
`B`, `C`, `D` and `E` (this is just for illustration, in practise containers
|
||||
will have more buckets).
|
||||
|
||||
[$../../libs/unordered/doc/diagrams/buckets.png]
|
||||
|
||||
In order to decide which bucket to place an element in, the container applies
|
||||
`Hash` to the element's key (for `unordered_set` and `unordered_multiset` the
|
||||
key is the whole element, but this refered to as the key so that the same
|
||||
terminology can be used for sets and maps). This gives a `std::size_t`.
|
||||
`std::size_t` has a much greater range of values then the number of buckets, so
|
||||
that container applies another transformation to that value to choose a bucket
|
||||
to place the element in.
|
||||
|
||||
If at a later date the container wants to find an element in the container it
|
||||
just has to apply the same process to the element's key to discover which
|
||||
bucket to find it in. This means that you only have to look at the elements
|
||||
within a single bucket. If the hash function has worked well the elements will
|
||||
be evenly distributed amongst the buckets.
|
||||
|
||||
You can see in the diagram that `A` & `D` have been placed in the same bucket.
|
||||
This means that when looking in this bucket, up to 2 comparison have to be
|
||||
made, making searching slower. This is known as a collision. To keep things
|
||||
fast we try to keep these to a minimum.
|
||||
|
||||
[table Methods for Accessing Buckets
|
||||
[[Method] [Description]]
|
||||
|
||||
[
|
||||
[``size_type bucket_count() const``]
|
||||
[The number of buckets.]
|
||||
]
|
||||
[
|
||||
[``size_type max_bucket_count() const``]
|
||||
[An upper bound on the number of buckets.]
|
||||
]
|
||||
[
|
||||
[``size_type bucket_size(size_type n) const``]
|
||||
[The number of elements in bucket `n`.]
|
||||
]
|
||||
[
|
||||
[``size_type bucket(key_type const& k) const``]
|
||||
[Returns the index of the bucket which would contain k]
|
||||
]
|
||||
[
|
||||
[``
|
||||
local_iterator begin(size_type n);
|
||||
local_iterator end(size_type n);
|
||||
const_local_iterator begin(size_type n) const;
|
||||
const_local_iterator end(size_type n) const;
|
||||
``]
|
||||
[Return begin and end iterators for bucket `n`.]
|
||||
]
|
||||
]
|
||||
|
||||
[h2 Controlling the number of buckets]
|
||||
|
||||
As more elements are added to an unordered associative container, the number
|
||||
of elements in the buckets will increase causing performance to get worse. To
|
||||
combat this the containers increase the bucket count as elements are inserted.
|
||||
|
||||
The standard gives you two methods to influence the bucket count. First you can
|
||||
specify the minimum number of buckets in the constructor, and later, by calling
|
||||
`rehash`.
|
||||
|
||||
The other method is the `max_load_factor` member function. The 'load factor'
|
||||
is the average number of elements per bucket, `max_load_factor` can be used
|
||||
to give a /hint/ of a value that the load factor should be kept below. The
|
||||
draft standard doesn't actually require the container to pay much attention
|
||||
to this value. The only time the load factor is /required/ to be less than the
|
||||
maximum is following a call to `rehash`. But most implementations will probably
|
||||
try to keep the number of elements below the max load factor, and set the
|
||||
maximum load factor something the same or near to your hint - unless your hint
|
||||
is unreasonably small.
|
||||
|
||||
It is not specified anywhere how member functions other than `rehash` affect
|
||||
the bucket count, although `insert` is only allowed to invalidate iterators
|
||||
when the insertion causes the load factor to reach the maximum. Which will
|
||||
typically mean that insert will only change the number of buckets when an
|
||||
insert causes this.
|
||||
|
||||
In a similar manner to using `reserve` for `vector`s, it can be a good idea
|
||||
to call `rehash` before inserting a large number of elements. This will get
|
||||
the expensive rehashing out of the way and let you store iterators, safe in
|
||||
the knowledge that they won't be invalidated. If you are inserting `n`
|
||||
elements into container `x`, you could first call:
|
||||
|
||||
x.rehash((x.size() + n) / x.max_load_factor() + 1);
|
||||
|
||||
[blurb Note: `rehash`'s argument is the number of buckets, not the number of
|
||||
elements, which is why the new size is divided by the maximum load factor. The
|
||||
`+ 1` is required because the container is allowed to resize when the load
|
||||
factor is equal to the maximum load factor.]
|
||||
|
||||
[table Methods for Controlling Bucket Size
|
||||
[[Method] [Description]]
|
||||
|
||||
[
|
||||
[``float load_factor() const``]
|
||||
[The average number of elements per bucket.]
|
||||
]
|
||||
[
|
||||
[``float max_load_factor() const``]
|
||||
[Returns the current maximum load factor.]
|
||||
]
|
||||
[
|
||||
[``float max_load_factor(float z)``]
|
||||
[Changes the container's maximum load factor, using `z` as a hint.]
|
||||
]
|
||||
[
|
||||
[``void rehash(size_type n)``]
|
||||
[Changes the number of buckets so that there at least n buckets, and
|
||||
so that the load factor is less than the maximum load factor.]
|
||||
]
|
||||
|
||||
]
|
||||
|
||||
[/ I'm not at all happy with this section. So I've commented it out.]
|
||||
|
||||
[/ h2 Rehash Techniques]
|
||||
|
||||
[/If the container has a load factor much smaller than the maximum, `rehash`
|
||||
might decrease the number of buckets, reducing the memory usage. This isn't
|
||||
guaranteed by the standard but this implementation will do it.
|
||||
|
||||
If you want to stop the table from ever rehashing due to an insert, you can
|
||||
set the maximum load factor to infinity (or perhaps a load factor that it'll
|
||||
never reach - say `x.max_size()`. As you can only give a 'hint' for the maximum
|
||||
load factor, this isn't guaranteed to work. But again, it'll work in this
|
||||
implementation. (TODO: If an unordered container with infinite load factor
|
||||
is copied, bad things could happen. So maybe this advice should be removed. Or
|
||||
maybe the implementation should cope with that).
|
||||
|
||||
If you do this and want to make the container rehash, `rehash` will still work.
|
||||
But be careful that you only ever call it with a sufficient number of buckets
|
||||
- otherwise it's very likely that the container will decrease the bucket
|
||||
count to an overly small amount.]
|
||||
|
||||
[endsect]
|
@ -1,162 +0,0 @@
|
||||
[/ Copyright 2006-2007 Daniel James.
|
||||
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ]
|
||||
|
||||
[section:comparison Comparison with Associative Containers]
|
||||
|
||||
[table Interface differences.
|
||||
[[Associative Containers] [Unordered Associative Containers]]
|
||||
|
||||
[
|
||||
[Parameterized by an ordering relation `Compare`]
|
||||
[Parameterized by a function object `Hash` and an equivalence relation
|
||||
`Pred`]
|
||||
]
|
||||
[
|
||||
[`Compare` exposed by member typedef `key_compare`, accessed by member function `key_comp()`]
|
||||
[`Hash` exposed by member typedef `hasher`, accessed by member function `hash_function()`.\n`Pred` by member typedef `key_equal` and member function `key_eq()`.]
|
||||
]
|
||||
[
|
||||
[Member typedef `value_compare` supplies an ordering comparison for member elements, accessed by member function `value_comp()`.]
|
||||
[No equivalent. No idea why.]
|
||||
]
|
||||
[/TODO: Mention a range? This is meant to be differences but this doesn't
|
||||
seem to be complete.]
|
||||
[
|
||||
[Constructors have optional extra parameters for the comparison object.]
|
||||
[Constructors have optional extra parameters for the initial minimum
|
||||
number of buckets, a hash function and an equality object.]
|
||||
]
|
||||
|
||||
[
|
||||
[Keys `k1`, `k2` are considered equivalent if
|
||||
`!Compare(k1, k2) && !Compare(k2, k1)`]
|
||||
[Keys `k1`, `k2` are considered equivalent if `Pred(k1, k2)`]
|
||||
]
|
||||
[
|
||||
[Member function `lower_bound(k)` and `upper_bound(k)`]
|
||||
[No equivalent. Since the elements aren't ordered `lower_bound` and
|
||||
`upper_bound` would be meaningless.]
|
||||
]
|
||||
[
|
||||
[`equal_range(k)` returns an empty range at the position that k
|
||||
would be inserted if k isn't present in the container.]
|
||||
[`equal_range(k)` returns a range at the end of the container if
|
||||
k isn't present in the container. It can't return a positioned
|
||||
range as k could be inserted into multiple place. To find out the
|
||||
bucket that k would be inserted into use `bucket(k)`. But remember
|
||||
that an insert can cause the container to rehash - meaning that the
|
||||
element can be inserted into a different bucket.]
|
||||
]
|
||||
[
|
||||
[`iterator`, `const_iterator` are of the biderctional category.]
|
||||
[`iterator`, `const_iterator` are of at least the forward category.]
|
||||
]
|
||||
[
|
||||
[Inserts do not invalidate iterators or references to the container.]
|
||||
[Inserts can invalidate iterators but not references to the container.]
|
||||
]
|
||||
[
|
||||
[Iterators iterate through the container in the order defined by
|
||||
the comparison object.]
|
||||
[Iterators iterate through the container in an arbitrary order, that
|
||||
can change as elements are inserted. Although, equivalent elements
|
||||
are always adjacent.]
|
||||
]
|
||||
[
|
||||
[No equivalent]
|
||||
[Local iterators can be used to iterate through individual buckets.
|
||||
(I don't think that the order of local iterators and iterators are
|
||||
required to have any correspondence.)]
|
||||
]
|
||||
[
|
||||
[Can be compared using the `==`, `!=`, `<`, `<=`, `>`, `>=` operators]
|
||||
[No comparison operators are defined]
|
||||
]
|
||||
[
|
||||
[]
|
||||
[When inserting with a hint, implementations are permitted to ignore
|
||||
the hint.]
|
||||
]
|
||||
[
|
||||
[`erase` never throws an exception]
|
||||
[The containers hash or predicate function can throw exceptions
|
||||
from `erase`]
|
||||
]
|
||||
]
|
||||
|
||||
[table Complexity Guarantess
|
||||
[[Operation] [Associative Containers] [Unordered Associative Containers]]
|
||||
[
|
||||
[Construction of empty container]
|
||||
[constant]
|
||||
[/TODO: Do I meet this?]
|
||||
[O(/n/) where /n/ is the minimum number of buckets.]
|
||||
]
|
||||
[
|
||||
[Construction of container from a range of /N/ elements]
|
||||
[O(/N/ log /N/), O(/N/) if the range is sorted with `value_comp()`]
|
||||
[Average case O(/N/), worst case
|
||||
O(/N/'''<superscript>2</superscript>''')]
|
||||
]
|
||||
[
|
||||
[Insert a single element]
|
||||
[logarithmic]
|
||||
[Average case constant, worst case linear]
|
||||
]
|
||||
[
|
||||
[Insert a single element with a hint]
|
||||
[Amortized constant if t elements inserted right after hint,
|
||||
logarithmic otherwise]
|
||||
[Average case constant, worst case linear (ie. the same as
|
||||
a normal insert).]
|
||||
]
|
||||
[
|
||||
[Inserting a range of /N/ elements]
|
||||
[/N/ log(`size()`+/N/)]
|
||||
[Average case O(/N/), worst case O(/N/ * 'size()')]
|
||||
]
|
||||
[
|
||||
[Erase by key, `k`]
|
||||
[O(log(`size()`) + `count(k)`)]
|
||||
[Average case: O(`count(k)`), Worst case: O(`size()`)]
|
||||
]
|
||||
[
|
||||
[Erase a single element by iterator]
|
||||
[Amortized constant]
|
||||
[Average case: O(1), Worst case: O(`size()`)]
|
||||
]
|
||||
[
|
||||
[Erase a range of /N/ elements]
|
||||
[O(log(`size()`) + /N/)]
|
||||
[Average case: O(/N/), Worst case: O(`size()`)]
|
||||
]
|
||||
[
|
||||
[Clearing the container]
|
||||
[O(`size()`)]
|
||||
[O(`size()`)]
|
||||
]
|
||||
[
|
||||
[Find]
|
||||
[logarithmic]
|
||||
[Average case: O(/N/), Worst case: O(`size()`)]
|
||||
]
|
||||
[/ TODO: Average case is probably wrong. ]
|
||||
[
|
||||
[Count]
|
||||
[O(log(`size()`) + `count(k)`)]
|
||||
[Average case: O(1), Worst case: O(`size()`)]
|
||||
]
|
||||
[
|
||||
[`equal_range(k)`]
|
||||
[logarithmic]
|
||||
[Average case: O(`count(k)`), Worst case: O(`size()`)]
|
||||
]
|
||||
[
|
||||
[`lower_bound`,`upper_bound`]
|
||||
[logarithmic]
|
||||
[n/a]
|
||||
]
|
||||
]
|
||||
|
||||
[endsect]
|
Binary file not shown.
Binary file not shown.
@ -1,140 +0,0 @@
|
||||
[/ Copyright 2006-2007 Daniel James.
|
||||
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ]
|
||||
|
||||
[section:hash_equality Equality Predicates and Hash Functions]
|
||||
|
||||
[/TODO: A better introduction to hash functions?]
|
||||
|
||||
While the associative containers use an ordering relation to specify how the
|
||||
elements are stored, the unordered associative containers use an equality
|
||||
predicate and a hash function. For example [classref boost::unordered_set]
|
||||
is declared as:
|
||||
|
||||
template<typename Value,
|
||||
typename Hash = ``[classref boost::hash]``<Value>,
|
||||
typename Pred = std::equal_to<Value>,
|
||||
typename Alloc = std::allocator<Value> >
|
||||
class ``[classref boost::unordered_set unordered_set]``;
|
||||
|
||||
The hash function comes first as you might want to change the hash function
|
||||
but not the equality predicate, while if you were to change the behaviour
|
||||
of the equality predicate you would have to change the hash function to match
|
||||
it.
|
||||
|
||||
For example, if you wanted to use the
|
||||
[@http://www.isthe.com/chongo/tech/comp/fnv/ FNV-1 hash] you could write:
|
||||
|
||||
``[classref boost::unordered_set]``<std::string, hash::fnv_1> words;
|
||||
|
||||
An example implementation of FNV-1, and some other hash functions are supplied
|
||||
in the examples directory.
|
||||
|
||||
Alternatively, you might wish to use a different equality function. If so, make
|
||||
sure you use a hash function that matches it. For example, a
|
||||
case-insensitive dictionary:
|
||||
|
||||
struct iequal_to
|
||||
: std::binary_function<std::string, std::string, bool>
|
||||
{
|
||||
bool operator()(std::string const& x,
|
||||
std::string const& y) const
|
||||
{
|
||||
return boost::algorithm::iequals(x, y);
|
||||
}
|
||||
};
|
||||
|
||||
struct ihash
|
||||
: std::unary_function<std::string, bool>
|
||||
{
|
||||
bool operator()(std::string const& x) const
|
||||
{
|
||||
std::size_t seed = 0;
|
||||
|
||||
for(std::string::const_iterator it = x.begin();
|
||||
it != x.end(); ++it)
|
||||
{
|
||||
boost::hash_combine(seed, std::toupper(*it));
|
||||
}
|
||||
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
||||
struct word_info;
|
||||
|
||||
boost::unordered_map<std::string, word_info, ihash, iequal_to>
|
||||
idictionary;
|
||||
|
||||
A more generic version of this example is available at:
|
||||
[@../../libs/unordered/examples/case_insensitive.hpp /libs/unordered/examples/case_insensitive.hpp]
|
||||
|
||||
[h2 Custom Types]
|
||||
|
||||
Similarly, a custom hash function can be used for custom types:
|
||||
|
||||
struct point {
|
||||
int x;
|
||||
int y;
|
||||
};
|
||||
|
||||
bool operator==(point const& p1, point const& p2)
|
||||
{
|
||||
return p1.x == p2.x && p1.y == p2.y;
|
||||
}
|
||||
|
||||
struct point_hash
|
||||
: std::unary_function<point, std::size_t>
|
||||
{
|
||||
std::size_t operator()(point const& p) const
|
||||
{
|
||||
std::size_t seed = 0;
|
||||
boost::hash_combine(seed, p.x);
|
||||
boost::hash_combine(seed, p.y);
|
||||
return seed;
|
||||
}
|
||||
}
|
||||
|
||||
boost::unordered_multiset<point, std::equal_to<point>, point_hash>
|
||||
points;
|
||||
|
||||
Although, customizing Boost.Hash is probably a better solution:
|
||||
|
||||
struct point {
|
||||
int x;
|
||||
int y;
|
||||
};
|
||||
|
||||
bool operator==(point const& p1, point const& p2)
|
||||
{
|
||||
return p1.x == p2.x && p1.y == p2.y;
|
||||
}
|
||||
|
||||
std::size_t hash_value(point const& x) {
|
||||
std::size_t seed = 0;
|
||||
boost::hash_combine(seed, p.x);
|
||||
boost::hash_combine(seed, p.y);
|
||||
return seed;
|
||||
}
|
||||
|
||||
// Now the default functions work.
|
||||
boost::unordered_multiset<point> points;
|
||||
|
||||
See the Boost.Hash documentation for more detail on how to do this. Remember
|
||||
that it relies on extensions to the draft standard - so it won't work on other
|
||||
implementations of the unordered associative containers.
|
||||
|
||||
[table Methods for accessing the hash and euqality functions.
|
||||
[[Method] [Description]]
|
||||
|
||||
[
|
||||
[``hasher hash_function() const``]
|
||||
[Returns the container's hash function.]
|
||||
]
|
||||
[
|
||||
[``key_equal key_eq() const``]
|
||||
[Returns the container's key equality function.]
|
||||
]
|
||||
]
|
||||
|
||||
[endsect]
|
@ -1,98 +0,0 @@
|
||||
[/ Copyright 2006-2007 Daniel James.
|
||||
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ]
|
||||
|
||||
[def __tr1__
|
||||
[@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2009.pdf
|
||||
C++ Standard Library Technical Report]]
|
||||
[def __draft__
|
||||
[@http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2006/n2009.pdf
|
||||
Working Draft of the C++ Standard]]
|
||||
[def __hash-table__ [@http://en.wikipedia.org/wiki/Hash_table
|
||||
hash table]]
|
||||
[def __hash-function__ [@http://en.wikipedia.org/wiki/Hash_function
|
||||
hash function]]
|
||||
|
||||
[section:intro Introduction]
|
||||
|
||||
For accessing data based on keys, the C++ standard library offers `std::set`,
|
||||
`std::map`, `std::multiset` and `std::multimap`. These are generally
|
||||
implemented using balanced binary trees so lookup time has
|
||||
logarithmic complexity. Which is generally okay, but in many cases a
|
||||
__hash-table__ can perform better, as accessing data has constant complexity,
|
||||
on average. The worst case complexity is linear, but that occurs rarely and
|
||||
with some care, can be avoided.
|
||||
|
||||
Also, the existing containers require a 'less than' comparison object
|
||||
to order their elements. For some data types this is impossible to implement
|
||||
or isn't practicle. For a hash table you need an equality function
|
||||
and a hash function for the key.
|
||||
|
||||
So the __tr1__ introduced the unordered associative containers, which are
|
||||
implemented using hash tables, and they have now been added to the __draft__.
|
||||
There are four containers to match the existing
|
||||
associate containers. In the header <[headerref boost/unordered_set.hpp]>:
|
||||
|
||||
template <
|
||||
class Key,
|
||||
class Hash = boost::hash<Key>,
|
||||
class Pred = std::equal_to<Key>,
|
||||
class Alloc = std::allocator<Key> >
|
||||
class ``[classref boost::unordered_set unordered_set]``;
|
||||
|
||||
template<
|
||||
class Key,
|
||||
class Hash = boost::hash<Key>,
|
||||
class Pred = std::equal_to<Key>,
|
||||
class Alloc = std::allocator<Key> >
|
||||
class ``[classref boost::unordered_multiset unordered_multiset]``;
|
||||
|
||||
and in <[headerref boost/unordered_map.hpp]>:
|
||||
|
||||
template <
|
||||
class Key, class T,
|
||||
class Hash = boost::hash<Key>,
|
||||
class Pred = std::equal_to<Key>,
|
||||
class Alloc = std::allocator<Key> >
|
||||
class ``[classref boost::unordered_map unordered_map]``;
|
||||
|
||||
template<
|
||||
class Key, class T,
|
||||
class Hash = boost::hash<Key>,
|
||||
class Pred = std::equal_to<Key>,
|
||||
class Alloc = std::allocator<Key> >
|
||||
class ``[classref boost::unordered_multimap unordered_multimap]``;
|
||||
|
||||
The containers are used in a similar manner to the normal associative
|
||||
containers:
|
||||
|
||||
#include <``[headerref boost/unordered_map.hpp]``>
|
||||
#include <cassert>
|
||||
|
||||
int main()
|
||||
{
|
||||
boost::unordered_map<std::string, int> x;
|
||||
x["one"] = 1;
|
||||
x["two"] = 2;
|
||||
x["three"] = 3;
|
||||
|
||||
assert(x["one"] == 1);
|
||||
assert(x["missing"] == 0);
|
||||
}
|
||||
|
||||
But since the elements aren't ordered, the output of:
|
||||
|
||||
BOOST_FOREACH(map::value_type i, x) {
|
||||
std::cout<<i.first<<","<<i.second<<"\n";
|
||||
}
|
||||
|
||||
can be in any order. For example, it might be:
|
||||
|
||||
two,2
|
||||
one,1
|
||||
three,3
|
||||
missing,0
|
||||
|
||||
There are other differences, which will be detailed later.
|
||||
|
||||
[endsect]
|
@ -1,138 +0,0 @@
|
||||
[/ Copyright 2006-2007 Daniel James.
|
||||
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ]
|
||||
|
||||
[def __wang__
|
||||
[@http://www.concentric.net/~Ttwang/tech/inthash.htm
|
||||
Thomas Wang's article on integer hash functions]]
|
||||
|
||||
[section:rationale Implementation Rationale]
|
||||
|
||||
The intent of this library is to implement the unordered
|
||||
containers in the draft standard, so the interface was fixed. But there are
|
||||
still some implementation desicions to make. The priorities are
|
||||
conformance to the standard and portability.
|
||||
|
||||
The [@http://en.wikipedia.org/wiki/Hash_table wikipedia article on hash tables]
|
||||
has a good summary of the implementation issues for hash tables in general.
|
||||
|
||||
[h2 Data Structure]
|
||||
|
||||
By specifying an interface for accessing the buckets of the container the
|
||||
standard pretty much requires that the hash table uses chained addressing.
|
||||
|
||||
It would be conceivable to write a hash table that uses another method. For
|
||||
example, an it could use open addressing, and use the lookup chain to act as a
|
||||
bucket but there are a some serious problems with this:
|
||||
|
||||
* The draft standard requires that pointers to elements aren't invalidated, so
|
||||
the elements can't be stored in one array, but will need a layer of
|
||||
indirection instead - loosing the efficiency and most of the memory gain,
|
||||
the main advantages of open addressing.
|
||||
|
||||
* Local iterators would be very inefficient and may not be able to
|
||||
meet the complexity requirements.
|
||||
|
||||
* There are also the restrictions on when iterators can be invalidated. Since
|
||||
open addressing degrades badly when there are a high number of collisions the
|
||||
restrictions could prevent a rehash when it's really needed. The maximum load
|
||||
factor could be set to a fairly low value to work around this - but the
|
||||
standard requires that it is initially set to 1.0.
|
||||
|
||||
* And since the standard is written with a eye towards chained
|
||||
addressing, users will be suprised if the performance doesn't reflect that.
|
||||
|
||||
So chained addressing is used.
|
||||
|
||||
For containers with unique keys I store the buckets in a single-linked list.
|
||||
There are other possible data structures (such as a double-linked list)
|
||||
that allow for some operations to be faster (such as erasing and iteration)
|
||||
but the possible gain seems small compared to the extra memory needed.
|
||||
The most commonly used operations (insertion and lookup) would not be improved
|
||||
at all.
|
||||
|
||||
But for containers with equivalent keys a single-linked list can degrade badly
|
||||
when a large number of elements with equivalent keys are inserted. I think it's
|
||||
reasonable to assume that users who choose to use `unordered_multiset` or
|
||||
`unordered_multimap` do so because they are likely to insert elements with
|
||||
equivalent keys. So I have used an alternative data structure that doesn't
|
||||
degrade, at the expense of an extra pointer per node.
|
||||
|
||||
This works by adding storing a circular linked list for each group of equivalent
|
||||
nodes in reverse order. This allows quick navigation to the end of a group (since
|
||||
the first element points to the last) and can be quickly updated when elements
|
||||
are inserted or erased. The main disadvantage of this approach is some hairy code
|
||||
for erasing elements.
|
||||
|
||||
[h2 Number of Buckets]
|
||||
|
||||
There are two popular methods for choosing the number of buckets in a hash
|
||||
table. One is to have a prime number of buckets, another is to use a power
|
||||
of 2.
|
||||
|
||||
Using a prime number of buckets, and choosing a bucket by using the modulous
|
||||
of the hash functions's result will usually give a good result. The downside
|
||||
is that the required modulous operation is fairly expensive.
|
||||
|
||||
Using a power of 2 allows for much quicker selection of the bucket
|
||||
to use, but at the expense of loosing the upper bits of the hash value.
|
||||
For some specially designed hash functions it is possible to do this and
|
||||
still get a good result but as the containers can take arbitrary hash
|
||||
functions this can't be relied on.
|
||||
|
||||
To avoid this a transformation could be applied to the hash function, for an
|
||||
example see __wang__. Unfortunately, a transformation like Wang's requires
|
||||
knowledge of the number of bits in the hash value, so it isn't portable enough.
|
||||
This leaves more expensive methods, such as Knuth's Multiplicative Method
|
||||
(mentioned in Wang's article). These don't tend to work as well as taking the
|
||||
modulous of a prime, and the extra computation required might negate
|
||||
efficiency advantage of power of 2 hash tables.
|
||||
|
||||
So, this implementation uses a prime number for the hash table size.
|
||||
|
||||
[h2 Active Issues]
|
||||
|
||||
[h3 [@http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#258
|
||||
258. Missing allocator requirement]]
|
||||
|
||||
Need to look into this one.
|
||||
|
||||
[h3 [@http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#431
|
||||
431. Swapping containers with unequal allocators]]
|
||||
|
||||
I'm following Howard Hinnant's advice and implement option 3.
|
||||
|
||||
There is currently a further issue - if the allocator's swap does throw there's
|
||||
no guarantee what state the allocators will be in. The only solution seems to
|
||||
be to double buffer the allocators. But I'm assuming that it won't throw for now.
|
||||
|
||||
[h3 [@http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#518
|
||||
518. Are insert and erase stable for unordered_multiset and unordered_multimap?]]
|
||||
|
||||
In this implementation, erase is stable. All inserts are stable, except for
|
||||
inserting with a hint, which has slightly surprising behaviour. If the hint
|
||||
points to the first element in the correct equal range it inserts at the end of
|
||||
the range, for all other elements in the range it inserts immediately before
|
||||
the element. I am very tempted to change insert with a hint to just ignore the
|
||||
hint completely.
|
||||
|
||||
[h3 [@http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#528
|
||||
528. TR1: issue 6.19 vs 6.3.4.3/2 (and 6.3.4.5/2)]]
|
||||
|
||||
In the current implementation, for `unordered_set` and
|
||||
`unordered_multiset`, `iterator` and `const_iterator` have the same type and
|
||||
`local_iterator` and `const_local_iterator` also have the same type. This makes
|
||||
it impossible to implement the header exactly as described in the synopsis, as
|
||||
some member functions are overloaded by the same type.
|
||||
|
||||
The proposed resolution is to add a new subsection to 17.4.4:
|
||||
[:An implementation shall not supply an overloaded function signature specified in any library clause if such a signature would be inherently ambiguous during overload resolution due to two library types referring to the same type.]
|
||||
So I don't supply the `iterator` overloads.
|
||||
|
||||
[h3 [@http://www.open-std.org/jtc1/sc22/wg21/docs/lwg-active.html#560
|
||||
560. User-defined allocators without default constructor]]
|
||||
|
||||
This implementation should work okay for an allocator without a default
|
||||
constructor, although I don't currently test for this.
|
||||
|
||||
[endsect]
|
2954
doc/ref.xml
2954
doc/ref.xml
File diff suppressed because it is too large
Load Diff
9
doc/src/boost.xml
Normal file
9
doc/src/boost.xml
Normal file
@ -0,0 +1,9 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE boostbook PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN"
|
||||
"http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
|
||||
<boostbook xmlns:xi="http://www.w3.org/2001/XInclude">
|
||||
<title>The Boost C++ Unordered Containers Library Documentation</title>
|
||||
|
||||
<xi:include href="unordered.xml"/>
|
||||
<!-- <xi:include href="hash.xml"/> -->
|
||||
</boostbook>
|
@ -1,78 +0,0 @@
|
||||
|
||||
// Copyright 2006-2007 Daniel James.
|
||||
// Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
|
||||
|
||||
#include <boost/unordered_map.hpp>
|
||||
#include <boost/detail/lightweight_test.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
//[case_insensitive_functions
|
||||
struct iequal_to
|
||||
: std::binary_function<std::string, std::string, bool>
|
||||
{
|
||||
bool operator()(std::string const& x,
|
||||
std::string const& y) const
|
||||
{
|
||||
return boost::algorithm::iequals(x, y);
|
||||
}
|
||||
};
|
||||
|
||||
struct ihash
|
||||
: std::unary_function<std::string, bool>
|
||||
{
|
||||
bool operator()(std::string const& x) const
|
||||
{
|
||||
std::size_t seed = 0;
|
||||
|
||||
for(std::string::const_iterator it = x.begin();
|
||||
it != x.end(); ++it)
|
||||
{
|
||||
boost::hash_combine(seed, std::toupper(*it));
|
||||
}
|
||||
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
|
||||
struct word_info;
|
||||
//]
|
||||
|
||||
struct word_info {
|
||||
int tag;
|
||||
explicit word_info(int t = 0) : tag(t) {}
|
||||
};
|
||||
|
||||
int main() {
|
||||
//[case_insensitive_dictionary
|
||||
boost::unordered_map<std::string, word_info, ihash, iequal_to>
|
||||
idictionary;
|
||||
//]
|
||||
|
||||
BOOST_TEST(idictionary.empty());
|
||||
|
||||
idictionary["one"] = word_info(1);
|
||||
BOOST_TEST(idictionary.size() == 1);
|
||||
BOOST_TEST(idictionary.find("ONE") != idictionary.end() &&
|
||||
idictionary.find("ONE") == idictionary.find("one"));
|
||||
|
||||
idictionary.insert(std::make_pair("ONE", word_info(2)));
|
||||
BOOST_TEST(idictionary.size() == 1);
|
||||
BOOST_TEST(idictionary.find("ONE") != idictionary.end() &&
|
||||
idictionary.find("ONE")->first == "one" &&
|
||||
idictionary.find("ONE")->second.tag == 1);
|
||||
|
||||
idictionary["One"] = word_info(3);
|
||||
BOOST_TEST(idictionary.size() == 1);
|
||||
BOOST_TEST(idictionary.find("ONE") != idictionary.end() &&
|
||||
idictionary.find("ONE")->first == "one" &&
|
||||
idictionary.find("ONE")->second.tag == 3);
|
||||
|
||||
idictionary["two"] = word_info(4);
|
||||
BOOST_TEST(idictionary.size() == 2);
|
||||
BOOST_TEST(idictionary.find("two") != idictionary.end() &&
|
||||
idictionary.find("TWO")->first == "two" &&
|
||||
idictionary.find("Two")->second.tag == 4);
|
||||
|
||||
return boost::report_errors();
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
[/ Copyright 2006-2007 Daniel James.
|
||||
/ Distributed under the Boost Software License, Version 1.0. (See accompanying
|
||||
/ file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ]
|
||||
|
||||
[library Unordered Associative Containers
|
||||
[quickbook 1.3]
|
||||
[authors [Maitin-Shepard, Jeremy B.], [James, Daniel]]
|
||||
[copyright 2005 2007 Daniel James]
|
||||
[purpose std::tr1 compliant hash containers]
|
||||
[id unordered]
|
||||
[dirname unordered]
|
||||
[license
|
||||
Distributed under the Boost Software License, Version 1.0.
|
||||
(See accompanying file LICENSE_1_0.txt or copy at
|
||||
[@http://www.boost.org/LICENSE_1_0.txt]
|
||||
]
|
||||
]
|
||||
|
||||
[section Warning]
|
||||
This documentation is a work in progress, and is often incomplete, incoherent
|
||||
and, worst of all, incorrect. Don't take anything in it seriously.
|
||||
[endsect]
|
||||
|
||||
[include:unordered intro.qbk]
|
||||
[include:unordered buckets.qbk]
|
||||
[include:unordered hash_equality.qbk]
|
||||
[include:unordered comparison.qbk]
|
||||
[include:unordered rationale.qbk]
|
||||
|
||||
[xinclude ref.xml]
|
9
index.html
Normal file
9
index.html
Normal file
@ -0,0 +1,9 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; URL=doc/html/index.html">
|
||||
</head>
|
||||
<body>
|
||||
Automatic redirection failed, please go to
|
||||
<a href="doc/html/index.html">doc/html/index.html</a>
|
||||
</body>
|
||||
</html>
|
58
project-root.jam
Normal file
58
project-root.jam
Normal file
@ -0,0 +1,58 @@
|
||||
#
|
||||
# Copyright (c) 2006 João Abecasis
|
||||
#
|
||||
# Distributed under the Boost Software License, Version 1.0. (See
|
||||
# accompanying file LICENSE_1_0.txt or copy at
|
||||
# http://www.boost.org/LICENSE_1_0.txt)
|
||||
#
|
||||
|
||||
##
|
||||
## IMPORTANT NOTE: This file MUST NOT be copied over a boost installation
|
||||
##
|
||||
|
||||
path-constant top : . ;
|
||||
|
||||
import modules ;
|
||||
import path ;
|
||||
|
||||
local boost-root = [ modules.peek : BOOST_ROOT ] ;
|
||||
local math-header-include = $(top)/../.. ;
|
||||
|
||||
if ! $(boost-root)
|
||||
{
|
||||
local boost-search-dirs = [ modules.peek : BOOST_BUILD_PATH ] ;
|
||||
|
||||
for local dir in $(boost-search-dirs)
|
||||
{
|
||||
if [ path.glob $(dir)/../../../ : boost/version.hpp ]
|
||||
{
|
||||
boost-root += $(dir)/../../../ ;
|
||||
}
|
||||
}
|
||||
|
||||
if $(boost-root)
|
||||
{
|
||||
boost-root = [ path.make $(boost-root[1]) ] ;
|
||||
}
|
||||
else
|
||||
{
|
||||
ECHO "Warning: couldn't find BOOST_ROOT in" $(boost-root) ;
|
||||
}
|
||||
}
|
||||
|
||||
use-project /boost/unit_test : $(boost-root)/libs/test/build ;
|
||||
|
||||
project unordered
|
||||
: requirements
|
||||
<include>$(boost-root)
|
||||
: # build everything in ./bin.v2
|
||||
build-dir bin.v2
|
||||
;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user