Option to use same node type everywhere.

Will allow me to implement merge and extract fully.
This commit is contained in:
Daniel James
2017-02-27 03:59:02 +00:00
parent 0645700b33
commit 13322fe858
5 changed files with 97 additions and 45 deletions

View File

@ -33,6 +33,10 @@ matrix:
env: BJAM_TOOLSET=clang-m32
#- compiler: clang
# env: BJAM_TOOLSET=clang-std11m32
- compiler: gcc
env: BJAM_TOOLSET=gcc-interopable
- compiler: clang
env: BJAM_TOOLSET=clang-interopable
before_script:
- cd ${TRAVIS_BUILD_DIR}
@ -48,6 +52,8 @@ before_script:
# sanitized=address not available for 32-bit clang on travis.
echo "using clang : m32 : clang++ -m32 -Werror --std=c++03 ;" >> ~/user-config.jam
echo "using clang : std11m32 : clang++ -m32 -Werror --std=c++11 ;" >> ~/user-config.jam
echo "using gcc : interopable : g++-4.8 -Werror --std=c++03 -fsanitize=address -DBOOST_UNORDERED_INTEROPERABLE_NODES=1 ;" >> ~/user-config.jam
echo "using clang : interopable : clang++ -Werror --std=c++11 -fsanitize=address -DBOOST_UNORDERED_INTEROPERABLE_NODES=1 ;" >> ~/user-config.jam
- cat ~/user-config.jam
- wget -O boost.tar.bz2 https://sourceforge.net/projects/boost/files/boost/1.63.0/boost_1_63_0.tar.bz2
- tar -xjf boost.tar.bz2

View File

@ -64,6 +64,18 @@
#define BOOST_UNORDERED_EMPLACE_LIMIT 11
#endif
// BOOST_UNORDERED_INTEROPERABLE_NODES - Use the same node type for
// containers with unique and equivalent keys.
//
// 0 = Use different nodes
// 1 = Use ungrouped nodes everywhere
//
// Might add an extra value to use grouped nodes everywhere later.
#if !defined(BOOST_UNORDERED_INTEROPERABLE_NODES)
#define BOOST_UNORDERED_INTEROPERABLE_NODES 0
#endif
// BOOST_UNORDERED_USE_ALLOCATOR_TRAITS - Pick which version of
// allocator_traits to use.
//
@ -3245,8 +3257,20 @@ inline void table<Types>::rehash_impl(std::size_t num_buckets)
this->create_buckets(num_buckets);
link_pointer prev = this->get_previous_start();
while (prev->next_)
prev = node_algo::place_in_bucket(*this, prev);
while (prev->next_) {
node_pointer group_last = node_algo::last_for_rehash(prev);
bucket_pointer b =
this->get_bucket(this->hash_to_bucket(group_last->hash_));
if (!b->next_) {
b->next_ = prev;
prev = group_last;
} else {
link_pointer next = group_last->next_;
group_last->next_ = b->next_->next_;
b->next_->next_ = prev->next_;
prev->next_ = next;
}
}
}
#if defined(BOOST_MSVC)
@ -3469,6 +3493,47 @@ template <typename N> struct node_algo
return prev->next_;
}
// Group together all nodes with equal hash value, this may
// include nodes with different keys, but that's okay because
// they will end up in the same bucket.
static node_pointer last_for_rehash(link_pointer prev)
{
node_pointer n = next_node(prev);
std::size_t hash = n->hash_;
while (true) {
node_pointer next = next_node(n);
if (!next || next->hash_ != hash) {
return n;
}
n = next;
}
}
template <typename Table>
static node_pointer next_group(node_pointer n, Table const* t)
{
node_pointer n1 = n;
do {
n1 = next_node(n1);
} while (
n1 && t->key_eq()(t->get_key(n->value()), t->get_key(n1->value())));
return n1;
}
template <typename Table>
static std::size_t count(node_pointer n, Table const* t)
{
std::size_t x = 0;
node_pointer it = n;
do {
++x;
it = next_node(it);
} while (
it && t->key_eq()(t->get_key(n->value()), t->get_key(it->value())));
return x;
}
// Add node 'n' after 'pos'.
// This results in a different order to the grouped implementation.
static inline void add_to_node_group(node_pointer n, node_pointer pos)
@ -3484,25 +3549,9 @@ template <typename N> struct node_algo
return n;
}
// Extract a node and place it in the correct bucket.
// TODO: For tables with equivalent keys, this doesn't preserve
// the order.
// pre: prev->next_ is not null.
template <typename Table>
static link_pointer place_in_bucket(Table& dst, link_pointer prev)
static link_pointer split_groups(node_pointer, node_pointer)
{
node_pointer n = next_node(prev);
bucket_pointer b = dst.get_bucket(dst.hash_to_bucket(n->hash_));
if (!b->next_) {
b->next_ = prev;
return n;
} else {
prev->next_ = n->next_;
n->next_ = b->next_->next_;
b->next_->next_ = n;
return prev;
}
return link_pointer();
}
};
@ -4240,10 +4289,15 @@ template <typename N> struct grouped_node_algo
return static_cast<node_pointer>(prev->next_)->group_prev_;
}
static node_pointer last_for_rehash(link_pointer prev)
{
return static_cast<node_pointer>(prev->next_)->group_prev_;
}
// The 'void*' arguments are pointers to the table, which we
// will ignore, but without groups they could be used to
// access the various functions for dealing with values and keys.
static node_pointer next_group(node_pointer(n), void const*)
static node_pointer next_group(node_pointer n, void const*)
{
return static_cast<node_pointer>(n->group_prev_->next_);
}
@ -4313,27 +4367,6 @@ template <typename N> struct grouped_node_algo
return prev;
}
// Extract a group of nodes and place them in the correct bucket.
// pre: prev->next_ is not null.
template <typename Table>
static link_pointer place_in_bucket(Table& dst, link_pointer prev)
{
node_pointer end = static_cast<node_pointer>(prev->next_)->group_prev_;
bucket_pointer b = dst.get_bucket(dst.hash_to_bucket(end->hash_));
if (!b->next_) {
b->next_ = prev;
return end;
} else {
link_pointer next = end->next_;
end->next_ = b->next_->next_;
b->next_->next_ = prev->next_;
prev->next_ = next;
return prev;
}
}
};
// If the allocator uses raw pointers use grouped_ptr_node
@ -4853,10 +4886,9 @@ struct grouped_table_impl : boost::unordered::detail::table<Types>
}
// Delete the nodes.
// Is it inefficient to call fix_bucket for every node?
do {
link_pointer group_end =
node_algo::next_group(node_algo::next_node(prev), this);
this->delete_nodes(prev, group_end);
this->delete_node(prev);
bucket_index = this->fix_bucket(bucket_index, prev);
} while (prev->next_ != j);

View File

@ -61,7 +61,11 @@ struct multimap
typedef boost::unordered::detail::allocator_traits<value_allocator>
value_allocator_traits;
#if BOOST_UNORDERED_INTEROPERABLE_NODES
typedef boost::unordered::detail::pick_node<A, value_type> pick;
#else
typedef boost::unordered::detail::pick_grouped_node<A, value_type> pick;
#endif
typedef typename pick::node node;
typedef typename pick::bucket bucket;
typedef typename pick::link_pointer link_pointer;

View File

@ -60,7 +60,11 @@ template <typename A, typename T, typename H, typename P> struct multiset
typedef boost::unordered::detail::allocator_traits<value_allocator>
value_allocator_traits;
#if BOOST_UNORDERED_INTEROPERABLE_NODES
typedef boost::unordered::detail::pick_node<A, value_type> pick;
#else
typedef boost::unordered::detail::pick_grouped_node<A, value_type> pick;
#endif
typedef typename pick::node node;
typedef typename pick::bucket bucket;
typedef typename pick::link_pointer link_pointer;

View File

@ -47,6 +47,10 @@ std::size_t hash_value(insert_stable::member const& x)
}
}
// This is now only supported when using grouped nodes. I can't see any
// efficient way to do it otherwise.
#if !BOOST_UNORDERED_INTEROPERABLE_NODES
UNORDERED_AUTO_TEST(stable_insert_test1)
{
boost::unordered_multiset<insert_stable::member> x;
@ -110,4 +114,6 @@ UNORDERED_AUTO_TEST(stable_insert_test2)
BOOST_TEST(it == end);
}
#endif
RUN_TESTS()