diff --git a/bench/bench_adaptive_node_pool.cpp b/bench/bench_adaptive_node_pool.cpp index e9a3681..8b33012 100644 --- a/bench/bench_adaptive_node_pool.cpp +++ b/bench/bench_adaptive_node_pool.cpp @@ -7,6 +7,10 @@ // See http://www.boost.org/libs/container for documentation. // ////////////////////////////////////////////////////////////////////////////// +//Enable checks in debug mode +#ifndef NDEBUG +#define BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS +#endif #ifdef _MSC_VER #pragma warning (disable : 4512) @@ -273,12 +277,14 @@ int main(int argc, const char *argv[]) #endif std::size_t numele [] = { 10000, 1000, 100, 10, 5, 2, 1 }; #else - #ifdef NDEBUG - std::size_t numrep [] = { 150000 }; + #ifdef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS + std::size_t numrep[] = { 1000 }; + #elif defined(NDEBUG) + std::size_t numrep [] = { 15000 }; #else - std::size_t numrep [] = { 10000 }; + std::size_t numrep [] = { 1000 }; #endif - std::size_t numele [] = { 10 }; + std::size_t numele [] = { 100 }; #endif bool csv_output = argc == 2 && (strcmp(argv[1], "--csv-output") == 0); diff --git a/bench/bench_set_adaptive_pool.cpp b/bench/bench_set_adaptive_pool.cpp new file mode 100644 index 0000000..dc5f476 --- /dev/null +++ b/bench/bench_set_adaptive_pool.cpp @@ -0,0 +1,34 @@ +////////////////////////////////////////////////////////////////////////////// +// +// (C) Copyright Ion Gaztanaga 2013-2013. Distributed under the Boost +// Software License, Version 1.0. (See accompanying file +// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// See http://www.boost.org/libs/container for documentation. +// +////////////////////////////////////////////////////////////////////////////// +//Enable checks in debug mode +#ifndef NDEBUG +#define BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS +#endif + +#include "bench_set.hpp" +#include +#include +#include + +int main() +{ + using namespace boost::container; + + fill_range_ints(); + fill_range_strings(); + + //set<..., adaptive_pool> vs. set + launch_tests< set, private_adaptive_pool >, set > + ("set", "set"); + launch_tests< set, private_adaptive_pool >, set > + ("set", "set"); + + return 0; +} diff --git a/doc/container.qbk b/doc/container.qbk index cb45341..032b046 100644 --- a/doc/container.qbk +++ b/doc/container.qbk @@ -1232,6 +1232,17 @@ use [*Boost.Container]? There are several reasons for that: [section:release_notes Release Notes] +[section:release_notes_boost_1_68_00 Boost 1.68 Release] + +* Improved correctness of [classref boost::container::adaptive_pool adaptive_pool] and many parameters are now compile-time + constants instead of runtime constants. + +* Fixed bugs: + * [@https://svn.boost.org/trac/boost/ticket/13533 Trac #13533: ['"Boost vector resize causes assert(false)"]]. + +[endsect] + + [section:release_notes_boost_1_67_00 Boost 1.67 Release] * ['vector] can now have options, using [classref boost::container::vector_options vector_options]. diff --git a/include/boost/container/adaptive_pool.hpp b/include/boost/container/adaptive_pool.hpp index 900806a..d1d77bc 100644 --- a/include/boost/container/adaptive_pool.hpp +++ b/include/boost/container/adaptive_pool.hpp @@ -117,9 +117,9 @@ class adaptive_pool #ifndef BOOST_CONTAINER_DOXYGEN_INVOKED private: //!Not assignable from related adaptive_pool - template + template adaptive_pool& operator= - (const adaptive_pool&); + (const adaptive_pool&); #endif //#ifndef BOOST_CONTAINER_DOXYGEN_INVOKED @@ -341,6 +341,266 @@ class adaptive_pool } }; + + + + + + + + + + + + + + + + + + + +template < class T + , std::size_t NodesPerBlock = ADP_nodes_per_block + , std::size_t MaxFreeBlocks = ADP_max_free_blocks + , std::size_t OverheadPercent = ADP_overhead_percent + , unsigned Version = 2 + > +class private_adaptive_pool +{ + //!If Version is 1, the allocator is a STL conforming allocator. If Version is 2, + //!the allocator offers advanced expand in place and burst allocation capabilities. + public: + typedef unsigned int allocation_type; + typedef private_adaptive_pool + self_t; + + static const std::size_t nodes_per_block = NodesPerBlock; + static const std::size_t max_free_blocks = MaxFreeBlocks; + static const std::size_t overhead_percent = OverheadPercent; + static const std::size_t real_nodes_per_block = NodesPerBlock; + + BOOST_CONTAINER_DOCIGN(BOOST_STATIC_ASSERT((Version <=2))); + + typedef dtl::private_adaptive_node_pool + pool_t; + pool_t m_pool; + + public: + //------- + typedef T value_type; + typedef T * pointer; + typedef const T * const_pointer; + typedef typename ::boost::container:: + dtl::unvoid_ref::type reference; + typedef typename ::boost::container:: + dtl::unvoid_ref::type const_reference; + typedef std::size_t size_type; + typedef std::ptrdiff_t difference_type; + + typedef boost::container::dtl:: + version_type version; + + #ifndef BOOST_CONTAINER_DOXYGEN_INVOKED + typedef boost::container::dtl:: + basic_multiallocation_chain multiallocation_chain_void; + typedef boost::container::dtl:: + transform_multiallocation_chain + multiallocation_chain; + #endif //#ifndef BOOST_CONTAINER_DOXYGEN_INVOKED + + //!Obtains private_adaptive_pool from + //!private_adaptive_pool + template + struct rebind + { + typedef private_adaptive_pool + < T2 + , NodesPerBlock + , MaxFreeBlocks + , OverheadPercent + BOOST_CONTAINER_DOCIGN(BOOST_MOVE_I Version) + > other; + }; + + #ifndef BOOST_CONTAINER_DOXYGEN_INVOKED + private: + //!Not assignable from related private_adaptive_pool + template + private_adaptive_pool& operator= + (const private_adaptive_pool&); + #endif //#ifndef BOOST_CONTAINER_DOXYGEN_INVOKED + + public: + //!Default constructor + private_adaptive_pool() BOOST_NOEXCEPT_OR_NOTHROW + {} + + //!Copy constructor from other private_adaptive_pool. + private_adaptive_pool(const private_adaptive_pool &) BOOST_NOEXCEPT_OR_NOTHROW + {} + + //!Copy constructor from related private_adaptive_pool. + template + private_adaptive_pool + (const private_adaptive_pool &) BOOST_NOEXCEPT_OR_NOTHROW + {} + + //!Destructor + ~private_adaptive_pool() BOOST_NOEXCEPT_OR_NOTHROW + {} + + //!Returns the number of elements that could be allocated. + //!Never throws + size_type max_size() const BOOST_NOEXCEPT_OR_NOTHROW + { return size_type(-1)/sizeof(T); } + + //!Allocate memory for an array of count elements. + //!Throws std::bad_alloc if there is no enough memory + pointer allocate(size_type count, const void * = 0) + { + if(BOOST_UNLIKELY(count > this->max_size())) + boost::container::throw_bad_alloc(); + + if(Version == 1 && count == 1){ + return pointer(static_cast(m_pool.allocate_node())); + } + else{ + return static_cast(dlmalloc_malloc(count*sizeof(T))); + } + } + + //!Deallocate allocated memory. + //!Never throws + void deallocate(const pointer &ptr, size_type count) BOOST_NOEXCEPT_OR_NOTHROW + { + (void)count; + if(Version == 1 && count == 1){ + m_pool.deallocate_node(ptr); + } + else{ + dlmalloc_free(ptr); + } + } + + pointer allocation_command(allocation_type command, + size_type limit_size, + size_type &prefer_in_recvd_out_size, + pointer &reuse) + { + pointer ret = this->priv_allocation_command(command, limit_size, prefer_in_recvd_out_size, reuse); + if(BOOST_UNLIKELY(!ret && !(command & BOOST_CONTAINER_NOTHROW_ALLOCATION))) + boost::container::throw_bad_alloc(); + return ret; + } + + //!Returns maximum the number of objects the previously allocated memory + //!pointed by p can hold. + size_type size(pointer p) const BOOST_NOEXCEPT_OR_NOTHROW + { return dlmalloc_size(p); } + + //!Allocates just one object. Memory allocated with this function + //!must be deallocated only with deallocate_one(). + //!Throws bad_alloc if there is no enough memory + pointer allocate_one() + { + return (pointer)m_pool.allocate_node(); + } + + //!Allocates many elements of size == 1. + //!Elements must be individually deallocated with deallocate_one() + void allocate_individual(std::size_t num_elements, multiallocation_chain &chain) + { + m_pool.allocate_nodes(num_elements, static_cast(chain)); + } + + //!Deallocates memory previously allocated with allocate_one(). + //!You should never use deallocate_one to deallocate memory allocated + //!with other functions different from allocate_one(). Never throws + void deallocate_one(pointer p) BOOST_NOEXCEPT_OR_NOTHROW + { + m_pool.deallocate_node(p); + } + + void deallocate_individual(multiallocation_chain &chain) BOOST_NOEXCEPT_OR_NOTHROW + { + m_pool.deallocate_nodes(chain); + } + + //!Allocates many elements of size elem_size. + //!Elements must be individually deallocated with deallocate() + void allocate_many(size_type elem_size, std::size_t n_elements, multiallocation_chain &chain) + { + BOOST_STATIC_ASSERT(( Version > 1 )); + if(BOOST_UNLIKELY(!dlmalloc_multialloc_nodes + (n_elements, elem_size*sizeof(T), DL_MULTIALLOC_DEFAULT_CONTIGUOUS, reinterpret_cast(&chain)))){ + boost::container::throw_bad_alloc(); + } + } + + //!Allocates n_elements elements, each one of size elem_sizes[i] + //!Elements must be individually deallocated with deallocate() + void allocate_many(const size_type *elem_sizes, size_type n_elements, multiallocation_chain &chain) + { + BOOST_STATIC_ASSERT(( Version > 1 )); + if(BOOST_UNLIKELY(!dlmalloc_multialloc_arrays + (n_elements, elem_sizes, sizeof(T), DL_MULTIALLOC_DEFAULT_CONTIGUOUS, reinterpret_cast(&chain)))){ + boost::container::throw_bad_alloc(); + } + } + + void deallocate_many(multiallocation_chain &chain) BOOST_NOEXCEPT_OR_NOTHROW + { + dlmalloc_multidealloc(reinterpret_cast(&chain)); + } + + //!Deallocates all free blocks of the pool + void deallocate_free_blocks() BOOST_NOEXCEPT_OR_NOTHROW + { + m_pool.deallocate_free_blocks(); + } + + //!Swaps allocators. Does not throw. If each allocator is placed in a + //!different memory segment, the result is undefined. + friend void swap(private_adaptive_pool &, private_adaptive_pool &) BOOST_NOEXCEPT_OR_NOTHROW + {} + + //!An allocator always compares to true, as memory allocated with one + //!instance can be deallocated by another instance + friend bool operator==(const private_adaptive_pool &, const private_adaptive_pool &) BOOST_NOEXCEPT_OR_NOTHROW + { return true; } + + //!An allocator always compares to false, as memory allocated with one + //!instance can be deallocated by another instance + friend bool operator!=(const private_adaptive_pool &, const private_adaptive_pool &) BOOST_NOEXCEPT_OR_NOTHROW + { return false; } + + private: + pointer priv_allocation_command + (allocation_type command, std::size_t limit_size + ,size_type &prefer_in_recvd_out_size, pointer &reuse_ptr) + { + std::size_t const preferred_size = prefer_in_recvd_out_size; + dlmalloc_command_ret_t ret = {0 , 0}; + if(BOOST_UNLIKELY(limit_size > this->max_size() || preferred_size > this->max_size())){ + return pointer(); + } + std::size_t l_size = limit_size*sizeof(T); + std::size_t p_size = preferred_size*sizeof(T); + std::size_t r_size; + { + void* reuse_ptr_void = reuse_ptr; + ret = dlmalloc_allocation_command(command, sizeof(T), l_size, p_size, &r_size, reuse_ptr_void); + reuse_ptr = ret.second ? static_cast(reuse_ptr_void) : 0; + } + prefer_in_recvd_out_size = r_size/sizeof(T); + return (pointer)ret.first; + } +}; + } //namespace container { } //namespace boost { diff --git a/include/boost/container/detail/adaptive_node_pool.hpp b/include/boost/container/detail/adaptive_node_pool.hpp index 33bfcc9..d14e865 100644 --- a/include/boost/container/detail/adaptive_node_pool.hpp +++ b/include/boost/container/detail/adaptive_node_pool.hpp @@ -39,17 +39,6 @@ namespace boost { namespace container { namespace dtl { -template -struct select_private_adaptive_node_pool_impl -{ - typedef boost::container::dtl:: - private_adaptive_node_pool_impl - < fake_segment_manager - , unsigned(AlignOnly)*::boost::container::adaptive_pool_flag::align_only - | ::boost::container::adaptive_pool_flag::size_ordered | ::boost::container::adaptive_pool_flag::address_ordered - > type; -}; - //!Pooled memory allocator using an smart adaptive pool. Includes //!a reference count but the class does not delete itself, this is //!responsibility of user classes. Node size (NodeSize) and the number of @@ -60,24 +49,38 @@ template< std::size_t NodeSize , std::size_t OverheadPercent > class private_adaptive_node_pool - : public select_private_adaptive_node_pool_impl<(OverheadPercent == 0)>::type + : public private_adaptive_node_pool_impl_ct + < fake_segment_manager + , MaxFreeBlocks + , NodeSize + , NodesPerBlock + , OverheadPercent + , unsigned(OverheadPercent == 0)*::boost::container::adaptive_pool_flag::align_only + | ::boost::container::adaptive_pool_flag::size_ordered + | ::boost::container::adaptive_pool_flag::address_ordered + > { - typedef typename select_private_adaptive_node_pool_impl::type base_t; + typedef private_adaptive_node_pool_impl_ct + < fake_segment_manager + , MaxFreeBlocks + , NodeSize + , NodesPerBlock + , OverheadPercent + , unsigned(OverheadPercent == 0)*::boost::container::adaptive_pool_flag::align_only + | ::boost::container::adaptive_pool_flag::size_ordered + | ::boost::container::adaptive_pool_flag::address_ordered + > base_t; + //Non-copyable private_adaptive_node_pool(const private_adaptive_node_pool &); private_adaptive_node_pool &operator=(const private_adaptive_node_pool &); public: - typedef typename base_t::multiallocation_chain multiallocation_chain; static const std::size_t nodes_per_block = NodesPerBlock; //!Constructor. Never throws private_adaptive_node_pool() - : base_t(0 - , NodeSize - , NodesPerBlock - , MaxFreeBlocks - , (unsigned char)OverheadPercent) + : base_t(0) {} }; diff --git a/include/boost/container/detail/adaptive_node_pool_impl.hpp b/include/boost/container/detail/adaptive_node_pool_impl.hpp index 9ff4ec5..a92ba98 100644 --- a/include/boost/container/detail/adaptive_node_pool_impl.hpp +++ b/include/boost/container/detail/adaptive_node_pool_impl.hpp @@ -87,7 +87,7 @@ template struct less_func { static bool less(SizeType, SizeType, const void *la, const void *ra) - { return &la < &ra; } + { return la < ra; } }; template @@ -97,7 +97,7 @@ struct less_func +template struct block_container_traits { typedef typename bi::make_set_base_hook @@ -116,11 +116,21 @@ struct block_container_traits static void reinsert_was_used(Container &container, typename Container::reference v, bool) { typedef typename Container::const_iterator const_block_iterator; - const const_block_iterator this_block - (Container::s_iterator_to(const_cast(v))); - const_block_iterator next_block(this_block); - if(++next_block != container.cend()){ - if(this_block->free_nodes.size() > next_block->free_nodes.size()){ + typedef typename Container::iterator block_iterator; + typedef typename Container::value_compare value_compare; + + const block_iterator this_block(Container::s_iterator_to(v)); + const const_block_iterator cendit(container.cend()); + block_iterator next_block(this_block); + + if(++next_block != cendit && value_compare()(*next_block, v)){ + const_block_iterator next2_block(next_block); + //Test if v should be swapped with next (optimization) + if(++next2_block == cendit || !value_compare()(*next2_block, v)){ + v.swap_nodes(*next_block); + BOOST_ASSERT(++next_block == this_block); + } + else{ container.erase(this_block); container.insert(v); } @@ -147,7 +157,7 @@ struct block_container_traits }; template -struct block_container_traits +struct block_container_traits { typedef typename bi::make_list_base_hook < bi::void_pointer @@ -193,11 +203,16 @@ struct block_container_traits } }; +///////////////////////////// +// +// adaptive_pool_types +// +///////////////////////////// template struct adaptive_pool_types { typedef VoidPointer void_pointer; - static const bool ordered = (Flags & (adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered)) != 0; + static const unsigned ordered = (Flags & (adaptive_pool_flag::size_ordered | adaptive_pool_flag::address_ordered)); typedef block_container_traits block_container_traits_t; typedef typename block_container_traits_t::hook_t hook_t; typedef hdr_offset_holder_t hdr_offset_holder; @@ -222,95 +237,210 @@ struct adaptive_pool_types typedef typename block_container_traits_t:: template container::type block_container_t; }; -template -inline size_type calculate_alignment - ( size_type overhead_percent, size_type real_node_size - , size_type hdr_size, size_type hdr_offset_size, size_type payload_per_allocation) + +///////////////////////////////////////////// +// +// candidate_power_of_2_ct +// +///////////////////////////////////////////// +template< std::size_t alignment + , std::size_t real_node_size + , std::size_t payload_per_allocation + , std::size_t min_elements_per_block + , std::size_t hdr_size + , std::size_t hdr_offset_size + , std::size_t overhead_percent> +struct candidate_power_of_2_ct_helper +{ + static const std::size_t hdr_subblock_elements_alone = (alignment - hdr_size - payload_per_allocation)/real_node_size; + static const std::size_t hdr_subblock_elements_first = (alignment - hdr_size - payload_per_allocation)/real_node_size; + static const std::size_t elements_per_b_subblock_mid = (alignment - hdr_offset_size)/real_node_size; + static const std::size_t elements_per_b_subblock_end = (alignment - hdr_offset_size - payload_per_allocation)/real_node_size; + static const std::size_t num_b_subblock = + hdr_subblock_elements_alone >= min_elements_per_block + ? 0 + : ( ((hdr_subblock_elements_first + elements_per_b_subblock_end) >= min_elements_per_block) + ? 1 + : 2 + (min_elements_per_block - hdr_subblock_elements_first - elements_per_b_subblock_end - 1)/elements_per_b_subblock_mid + ) + ; + + static const std::size_t num_b_subblock_mid = (num_b_subblock > 1) ? (num_b_subblock - 1) : 0; + + static const std::size_t total_nodes = (num_b_subblock == 0) + ? hdr_subblock_elements_alone + : ( (num_b_subblock == 1) + ? (hdr_subblock_elements_first + elements_per_b_subblock_end) + : (hdr_subblock_elements_first + num_b_subblock_mid*elements_per_b_subblock_mid + elements_per_b_subblock_end) + ) + ; + static const std::size_t total_data = total_nodes*real_node_size; + static const std::size_t total_size = alignment*(num_b_subblock+1); + static const bool overhead_satisfied = (total_size - total_data)*100/total_size < overhead_percent; +}; + +template< std::size_t initial_alignment + , std::size_t real_node_size + , std::size_t payload_per_allocation + , std::size_t min_elements_per_block + , std::size_t hdr_size + , std::size_t hdr_offset_size + , std::size_t overhead_percent + , bool Loop = true> +struct candidate_power_of_2_ct +{ + typedef candidate_power_of_2_ct_helper + < initial_alignment + , real_node_size + , payload_per_allocation + , min_elements_per_block + , hdr_size + , hdr_offset_size + , overhead_percent> helper_t; + + static const std::size_t candidate_power_of_2 = initial_alignment << std::size_t(!helper_t::overhead_satisfied); + + typedef typename candidate_power_of_2_ct + < candidate_power_of_2 + , real_node_size + , payload_per_allocation + , min_elements_per_block + , hdr_size + , hdr_offset_size + , overhead_percent + , !helper_t::overhead_satisfied + >::type type; + + static const std::size_t alignment = type::alignment; + static const std::size_t num_subblocks = type::num_subblocks; + static const std::size_t real_num_node = type::real_num_node; +}; + +template< std::size_t initial_alignment + , std::size_t real_node_size + , std::size_t payload_per_allocation + , std::size_t min_elements_per_block + , std::size_t hdr_size + , std::size_t hdr_offset_size + , std::size_t overhead_percent + > +struct candidate_power_of_2_ct + < initial_alignment + , real_node_size + , payload_per_allocation + , min_elements_per_block + , hdr_size + , hdr_offset_size + , overhead_percent + , false> +{ + typedef candidate_power_of_2_ct + < initial_alignment + , real_node_size + , payload_per_allocation + , min_elements_per_block + , hdr_size + , hdr_offset_size + , overhead_percent + , false> type; + + typedef candidate_power_of_2_ct_helper + < initial_alignment + , real_node_size + , payload_per_allocation + , min_elements_per_block + , hdr_size + , hdr_offset_size + , overhead_percent> helper_t; + + static const std::size_t alignment = initial_alignment; + static const std::size_t num_subblocks = helper_t::num_b_subblock+1; + static const std::size_t real_num_node = helper_t::total_nodes; +}; + +///////////////////////////////////////////// +// +// candidate_power_of_2_rt +// +///////////////////////////////////////////// +void candidate_power_of_2_rt ( std::size_t initial_alignment + , std::size_t real_node_size + , std::size_t payload_per_allocation + , std::size_t min_elements_per_block + , std::size_t hdr_size + , std::size_t hdr_offset_size + , std::size_t overhead_percent + , std::size_t &alignment + , std::size_t &num_subblocks + , std::size_t &real_num_node) { - //to-do: handle real_node_size != node_size - const size_type divisor = overhead_percent*real_node_size; - const size_type dividend = hdr_offset_size*100; - size_type elements_per_subblock = (dividend - 1)/divisor + 1; - size_type candidate_power_of_2 = - upper_power_of_2(elements_per_subblock*real_node_size + hdr_offset_size); bool overhead_satisfied = false; - //Now calculate the wors-case overhead for a subblock - const size_type max_subblock_overhead = hdr_size + payload_per_allocation; - while(!overhead_satisfied){ - elements_per_subblock = (candidate_power_of_2 - max_subblock_overhead)/real_node_size; - const size_type overhead_size = candidate_power_of_2 - elements_per_subblock*real_node_size; - if(overhead_size*100/candidate_power_of_2 < overhead_percent){ - overhead_satisfied = true; - } - else{ - candidate_power_of_2 <<= 1; - } + std::size_t num_b_subblock = 0; + std::size_t total_nodes = 0; + + while(!overhead_satisfied) + { + std::size_t hdr_subblock_elements_alone = (initial_alignment - hdr_size - payload_per_allocation)/real_node_size; + std::size_t hdr_subblock_elements_first = (initial_alignment - hdr_size - payload_per_allocation)/real_node_size; + std::size_t elements_per_b_subblock_mid = (initial_alignment - hdr_offset_size)/real_node_size; + std::size_t elements_per_b_subblock_end = (initial_alignment - hdr_offset_size - payload_per_allocation)/real_node_size; + + num_b_subblock = + hdr_subblock_elements_alone >= min_elements_per_block + ? 0 + : ( ((hdr_subblock_elements_first + elements_per_b_subblock_end) >= min_elements_per_block) + ? 1 + : 2 + (min_elements_per_block - hdr_subblock_elements_first - elements_per_b_subblock_end - 1)/elements_per_b_subblock_mid + ) + ; + + std::size_t num_b_subblock_mid = (num_b_subblock > 1) ? (num_b_subblock - 1) : 0; + + total_nodes = (num_b_subblock == 0) + ? hdr_subblock_elements_alone + : ( (num_b_subblock == 1) + ? (hdr_subblock_elements_first + elements_per_b_subblock_end) + : (hdr_subblock_elements_first + num_b_subblock_mid*elements_per_b_subblock_mid + elements_per_b_subblock_end) + ) + ; + std::size_t total_data = total_nodes*real_node_size; + std::size_t total_size = initial_alignment*(num_b_subblock+1); + overhead_satisfied = (total_size - total_data)*100/total_size < overhead_percent; + initial_alignment = initial_alignment << std::size_t(!overhead_satisfied); } - return candidate_power_of_2; + alignment = initial_alignment; + num_subblocks = num_b_subblock+1; + real_num_node = total_nodes; } -template -inline void calculate_num_subblocks - (size_type alignment, size_type real_node_size, size_type elements_per_block - , size_type &num_subblocks, size_type &real_num_node, size_type overhead_percent - , size_type hdr_size, size_type hdr_offset_size, size_type payload_per_allocation) +///////////////////////////////////////////// +// +// private_adaptive_node_pool_impl_common +// +///////////////////////////////////////////// +template< class SegmentManagerBase, unsigned int Flags> +class private_adaptive_node_pool_impl_common { - const size_type hdr_subblock_elements = (alignment - hdr_size - payload_per_allocation)/real_node_size; - size_type elements_per_subblock = (alignment - hdr_offset_size)/real_node_size; - size_type possible_num_subblock = (elements_per_block - 1)/elements_per_subblock + 1; - while(((possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements) < elements_per_block){ - ++possible_num_subblock; - } - elements_per_subblock = (alignment - hdr_offset_size)/real_node_size; - bool overhead_satisfied = false; - while(!overhead_satisfied){ - const size_type total_data = (elements_per_subblock*(possible_num_subblock-1) + hdr_subblock_elements)*real_node_size; - const size_type total_size = alignment*possible_num_subblock; - if((total_size - total_data)*100/total_size < overhead_percent){ - overhead_satisfied = true; - } - else{ - ++possible_num_subblock; - } - } - num_subblocks = possible_num_subblock; - real_num_node = (possible_num_subblock-1)*elements_per_subblock + hdr_subblock_elements; -} - -template -class private_adaptive_node_pool_impl -{ - //Non-copyable - private_adaptive_node_pool_impl(); - private_adaptive_node_pool_impl(const private_adaptive_node_pool_impl &); - private_adaptive_node_pool_impl &operator=(const private_adaptive_node_pool_impl &); - typedef private_adaptive_node_pool_impl this_type; - - typedef typename SegmentManagerBase::void_pointer void_pointer; - static const typename SegmentManagerBase:: - size_type PayloadPerAllocation = SegmentManagerBase::PayloadPerAllocation; + public: + //!Segment manager typedef + typedef SegmentManagerBase segment_manager_base_type; + typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain; + typedef typename SegmentManagerBase::size_type size_type; //Flags //align_only static const bool AlignOnly = (Flags & adaptive_pool_flag::align_only) != 0; typedef bool_ IsAlignOnly; typedef true_ AlignOnlyTrue; typedef false_ AlignOnlyFalse; - //size_ordered - static const bool SizeOrdered = (Flags & adaptive_pool_flag::size_ordered) != 0; - typedef bool_ IsSizeOrdered; - typedef true_ SizeOrderedTrue; - typedef false_ SizeOrderedFalse; - //address_ordered - static const bool AddressOrdered = (Flags & adaptive_pool_flag::address_ordered) != 0; - typedef bool_ IsAddressOrdered; - typedef true_ AddressOrderedTrue; - typedef false_ AddressOrderedFalse; - public: - typedef typename SegmentManagerBase::multiallocation_chain multiallocation_chain; - typedef typename SegmentManagerBase::size_type size_type; + typedef typename SegmentManagerBase::void_pointer void_pointer; + static const typename SegmentManagerBase:: + size_type PayloadPerAllocation = SegmentManagerBase::PayloadPerAllocation; - private: + typedef typename boost::intrusive::pointer_traits + ::template rebind_pointer::type segment_mngr_base_ptr_t; + + protected: typedef adaptive_pool_types adaptive_pool_types_t; typedef typename adaptive_pool_types_t::free_nodes_t free_nodes_t; @@ -320,70 +450,285 @@ class private_adaptive_node_pool_impl typedef typename block_container_t::iterator block_iterator; typedef typename block_container_t::const_iterator const_block_iterator; typedef typename adaptive_pool_types_t::hdr_offset_holder hdr_offset_holder; + typedef private_adaptive_node_pool_impl_common this_type; static const size_type MaxAlign = alignment_of::value; static const size_type HdrSize = ((sizeof(block_info_t)-1)/MaxAlign+1)*MaxAlign; static const size_type HdrOffsetSize = ((sizeof(hdr_offset_holder)-1)/MaxAlign+1)*MaxAlign; - public: - //!Segment manager typedef - typedef SegmentManagerBase segment_manager_base_type; + segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager + block_container_t m_block_container; //Intrusive block list + size_type m_totally_free_blocks; //Free blocks - //!Constructor from a segment manager. Never throws - private_adaptive_node_pool_impl - ( segment_manager_base_type *segment_mngr_base - , size_type node_size - , size_type nodes_per_block - , size_type max_free_blocks - , unsigned char overhead_percent - ) - : m_max_free_blocks(max_free_blocks) - , m_real_node_size(lcm(node_size, size_type(alignment_of::value))) - //Round the size to a power of two value. - //This is the total memory size (including payload) that we want to - //allocate from the general-purpose allocator - , m_real_block_alignment - (AlignOnly ? - upper_power_of_2(HdrSize + m_real_node_size*nodes_per_block) : - calculate_alignment( (size_type)overhead_percent, m_real_node_size - , HdrSize, HdrOffsetSize, PayloadPerAllocation)) - //This is the real number of nodes per block - , m_num_subblocks(0) - , m_real_num_node(AlignOnly ? (m_real_block_alignment - PayloadPerAllocation - HdrSize)/m_real_node_size : 0) - //General purpose allocator - , mp_segment_mngr_base(segment_mngr_base) - , m_block_container() - , m_totally_free_blocks(0) + class block_destroyer; + friend class block_destroyer; + + class block_destroyer { - if(!AlignOnly){ - calculate_num_subblocks - ( m_real_block_alignment - , m_real_node_size - , nodes_per_block - , m_num_subblocks - , m_real_num_node - , (size_type)overhead_percent - , HdrSize - , HdrOffsetSize - , PayloadPerAllocation); + public: + block_destroyer(const this_type *impl, multiallocation_chain &chain, const size_type num_subblocks, const size_type real_block_alignment, const size_type real_num_node) + : mp_impl(impl), m_chain(chain), m_num_subblocks(num_subblocks), m_real_block_alignment(real_block_alignment), m_real_num_node(real_num_node) + {} + + void operator()(typename block_container_t::pointer to_deallocate) + { return this->do_destroy(to_deallocate, IsAlignOnly()); } + + private: + void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyTrue) + { + BOOST_ASSERT(to_deallocate->free_nodes.size() == m_real_num_node); + m_chain.push_back(to_deallocate); + } + + void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyFalse) + { + BOOST_ASSERT(to_deallocate->free_nodes.size() == m_real_num_node); + BOOST_ASSERT(0 == to_deallocate->hdr_offset); + hdr_offset_holder *hdr_off_holder = + mp_impl->priv_first_subblock_from_block(boost::movelib::to_raw_pointer(to_deallocate), m_num_subblocks, m_real_block_alignment); + m_chain.push_back(hdr_off_holder); + } + + const this_type *mp_impl; + multiallocation_chain &m_chain; + const size_type m_num_subblocks; + const size_type m_real_block_alignment; + const size_type m_real_num_node; + }; + + //This macro will activate invariant checking. Slow, but helpful for debugging the code. + //#define BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS + void priv_invariants(const size_type real_num_node, const size_type num_subblocks, const size_type real_block_alignment) const + { + (void)real_num_node; (void)num_subblocks; (void)real_block_alignment; + #ifdef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS + //Check that the total totally free blocks are correct + BOOST_ASSERT(m_block_container.size() >= m_totally_free_blocks); + + const const_block_iterator itend(m_block_container.cend()); + const const_block_iterator itbeg(m_block_container.cbegin()); + + { //Try to do checks in a single iteration + const_block_iterator it(itbeg); + size_type total_free_nodes = 0; + size_type total_free_blocks = 0u; + for(; it != itend; ++it){ + if(it != itbeg){ + //Check order invariant + const_block_iterator prev(it); + --prev; + BOOST_ASSERT(!(m_block_container.key_comp()(*it, *prev))); + (void)prev; (void)it; + } + + //free_nodes invariant + const size_type free_nodes = it->free_nodes.size(); + BOOST_ASSERT(free_nodes <= real_num_node); + BOOST_ASSERT(free_nodes != 0); + + //Acummulate total_free_nodes and total_free_blocks + total_free_nodes += free_nodes; + total_free_blocks += it->free_nodes.size() == real_num_node; + + if (!AlignOnly) { + //Check that header offsets are correct + hdr_offset_holder *hdr_off_holder = this->priv_first_subblock_from_block(const_cast(&*it), num_subblocks, real_block_alignment); + for (size_type i = 0, max = num_subblocks; i < max; ++i) { + const size_type offset = reinterpret_cast(const_cast(&*it)) - reinterpret_cast(hdr_off_holder); + (void)offset; + BOOST_ASSERT(hdr_off_holder->hdr_offset == offset); + BOOST_ASSERT(0 == (reinterpret_cast(hdr_off_holder) & (real_block_alignment - 1))); + BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (real_block_alignment - 1))); + hdr_off_holder = reinterpret_cast(reinterpret_cast(hdr_off_holder) + real_block_alignment); + } + } + } + BOOST_ASSERT(total_free_blocks == m_totally_free_blocks); + BOOST_ASSERT(total_free_nodes >= m_totally_free_blocks*real_num_node); + } + #endif + } + + void priv_deallocate_free_blocks( const size_type max_free_blocks, const size_type real_num_node + , const size_type num_subblocks, const size_type real_block_alignment) + { //Trampoline function to ease inlining + if(m_totally_free_blocks > max_free_blocks){ + this->priv_deallocate_free_blocks_impl(max_free_blocks, real_num_node, num_subblocks, real_block_alignment); } } - //!Destructor. Deallocates all allocated blocks. Never throws - ~private_adaptive_node_pool_impl() - { this->priv_clear(); } + hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, const size_type num_subblocks, const size_type real_block_alignment) const + { return this->priv_first_subblock_from_block(block, num_subblocks, real_block_alignment, IsAlignOnly()); } - size_type get_real_num_node() const - { return m_real_num_node; } + hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, const size_type num_subblocks, const size_type real_block_alignment, AlignOnlyFalse) const + { + hdr_offset_holder *const hdr_off_holder = reinterpret_cast + (reinterpret_cast(block) - (num_subblocks-1)*real_block_alignment); + BOOST_ASSERT(hdr_off_holder->hdr_offset == size_type(reinterpret_cast(block) - reinterpret_cast(hdr_off_holder))); + BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (real_block_alignment - 1))); + BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (real_block_alignment - 1))); + return hdr_off_holder; + } - //!Returns the segment manager. Never throws - segment_manager_base_type* get_segment_manager_base()const - { return boost::movelib::to_raw_pointer(mp_segment_mngr_base); } + hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, const size_type num_subblocks, const size_type real_block_alignment, AlignOnlyTrue) const + { + (void)num_subblocks; (void)real_block_alignment; + return reinterpret_cast(block); + } + + void priv_deallocate_free_blocks_impl( const size_type max_free_blocks, const size_type real_num_node + , const size_type num_subblocks, const size_type real_block_alignment) + { + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + //Now check if we've reached the free nodes limit + //and check if we have free blocks. If so, deallocate as much + //as we can to stay below the limit + multiallocation_chain chain; + { + if(Flags & adaptive_pool_flag::size_ordered){ + const_block_iterator it = m_block_container.cend(); + --it; + size_type totally_free_blocks = m_totally_free_blocks; + + for( ; totally_free_blocks > max_free_blocks; --totally_free_blocks){ + BOOST_ASSERT(it->free_nodes.size() == real_num_node); + void *addr = priv_first_subblock_from_block(const_cast(&*it), num_subblocks, real_block_alignment); + --it; + block_container_traits_t::erase_last(m_block_container); + chain.push_front(void_pointer(addr)); + } + } + else{ + const_block_iterator it = m_block_container.cend(); + size_type totally_free_blocks = m_totally_free_blocks; + + while(totally_free_blocks > max_free_blocks){ + --it; + if(it->free_nodes.size() == real_num_node){ + void *addr = priv_first_subblock_from_block(const_cast(&*it), num_subblocks, real_block_alignment); + it = m_block_container.erase(it); + chain.push_front(void_pointer(addr)); + --totally_free_blocks; + } + } + } + BOOST_ASSERT((m_totally_free_blocks - max_free_blocks) == chain.size()); + m_totally_free_blocks = max_free_blocks; + } + this->mp_segment_mngr_base->deallocate_many(chain); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + } + + void priv_fill_chain_remaining_to_block + ( multiallocation_chain &chain, size_type target_elem_in_chain, block_info_t &c_info + , char *mem_address, size_type max_node_in_mem + , const size_type real_node_size) + { + BOOST_ASSERT(chain.size() <= target_elem_in_chain); + + //First add all possible nodes to the chain + const size_type left = target_elem_in_chain - chain.size(); + const size_type add_to_chain = (max_node_in_mem < left) ? max_node_in_mem : left; + char *free_mem_address = static_cast(boost::movelib::to_raw_pointer + (chain.incorporate_after(chain.last(), void_pointer(mem_address), real_node_size, add_to_chain))); + //Now store remaining nodes in the free list + if(const size_type free = max_node_in_mem - add_to_chain){ + free_nodes_t & free_nodes = c_info.free_nodes; + free_nodes.incorporate_after(free_nodes.last(), void_pointer(free_mem_address), real_node_size, free); + } + } + + //!Allocates a several blocks of nodes. Can throw + void priv_append_from_new_blocks( size_type min_elements, multiallocation_chain &chain + , const size_type max_free_blocks + , const size_type real_block_alignment, const size_type real_node_size + , const size_type real_num_node, const size_type num_subblocks + , AlignOnlyTrue) + { + (void)num_subblocks; + BOOST_ASSERT(m_block_container.empty()); + BOOST_ASSERT(min_elements > 0); + const size_type n = (min_elements - 1)/real_num_node + 1; + const size_type real_block_size = real_block_alignment - PayloadPerAllocation; + const size_type target_elem_in_chain = chain.size() + min_elements; + for(size_type i = 0; i != n; ++i){ + //We allocate a new NodeBlock and put it the last + //element of the tree + char *mem_address = static_cast + (mp_segment_mngr_base->allocate_aligned(real_block_size, real_block_alignment)); + if(!mem_address){ + //In case of error, free memory deallocating all nodes (the new ones allocated + //in this function plus previously stored nodes in chain). + this->priv_deallocate_nodes(chain, max_free_blocks, real_num_node, num_subblocks, real_block_alignment); + throw_bad_alloc(); + } + block_info_t &c_info = *new(mem_address)block_info_t(); + mem_address += HdrSize; + this->priv_fill_chain_remaining_to_block(chain, target_elem_in_chain, c_info, mem_address, real_num_node, real_node_size); + const size_type free_nodes = c_info.free_nodes.size(); + if(free_nodes){ + const bool is_full = free_nodes == real_num_node; + BOOST_ASSERT(free_nodes < real_num_node); + m_totally_free_blocks += static_cast(is_full); + block_container_traits_t::insert_was_empty(m_block_container, c_info, is_full); + } + } + } + + void priv_append_from_new_blocks( size_type min_elements, multiallocation_chain &chain + , const size_type max_free_blocks + , const size_type real_block_alignment, const size_type real_node_size + , const size_type real_num_node, const size_type num_subblocks + , AlignOnlyFalse) + { + BOOST_ASSERT(m_block_container.empty()); + BOOST_ASSERT(min_elements > 0); + const size_type n = (min_elements - 1)/real_num_node + 1; + const size_type real_block_size = real_block_alignment*num_subblocks - PayloadPerAllocation; + const size_type elements_per_subblock_mid = (real_block_alignment - HdrOffsetSize)/real_node_size; + const size_type elements_per_subblock_end = (real_block_alignment - HdrOffsetSize - PayloadPerAllocation) / real_node_size; + const size_type hdr_subblock_elements = (real_block_alignment - HdrSize - PayloadPerAllocation)/real_node_size; + const size_type target_elem_in_chain = chain.size() + min_elements; + + for(size_type i = 0; i != n; ++i){ + //We allocate a new NodeBlock and put it the last + //element of the tree + char *mem_address = static_cast + (mp_segment_mngr_base->allocate_aligned(real_block_size, real_block_alignment)); + if(!mem_address){ + //In case of error, free memory deallocating all nodes (the new ones allocated + //in this function plus previously stored nodes in chain). + this->priv_deallocate_nodes(chain, max_free_blocks, real_num_node, num_subblocks, real_block_alignment); + throw_bad_alloc(); + } + //First initialize header information on the last subblock + char *hdr_addr = mem_address + real_block_alignment*(num_subblocks-1); + block_info_t &c_info = *new(hdr_addr)block_info_t(); + //Some structural checks + BOOST_ASSERT(static_cast(&static_cast(c_info).hdr_offset) == + static_cast(&c_info)); (void)c_info; + for( size_type subblock = 0, maxsubblock = num_subblocks - 1 + ; subblock < maxsubblock + ; ++subblock, mem_address += real_block_alignment){ + //Initialize header offset mark + new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address)); + const size_type elements_per_subblock = (subblock != (maxsubblock - 1)) ? elements_per_subblock_mid : elements_per_subblock_end; + this->priv_fill_chain_remaining_to_block + (chain, target_elem_in_chain, c_info, mem_address + HdrOffsetSize, elements_per_subblock, real_node_size); + } + this->priv_fill_chain_remaining_to_block + (chain, target_elem_in_chain, c_info, hdr_addr + HdrSize, hdr_subblock_elements, real_node_size); + m_totally_free_blocks += static_cast(c_info.free_nodes.size() == real_num_node); + if (c_info.free_nodes.size()) + m_block_container.push_front(c_info); + } + } //!Allocates array of count elements. Can throw - void *allocate_node() + void *priv_allocate_node( const size_type max_free_blocks, const size_type real_block_alignment, const size_type real_node_size + , const size_type real_num_node, const size_type num_subblocks) { - this->priv_invariants(); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); //If there are no free nodes we allocate a new block if(!m_block_container.empty()){ //We take the first free node the multiset can't be empty @@ -394,51 +739,39 @@ class private_adaptive_node_pool_impl if(free_nodes.empty()){ block_container_traits_t::erase_first(m_block_container); } - m_totally_free_blocks -= static_cast(free_nodes_count == m_real_num_node); - this->priv_invariants(); + m_totally_free_blocks -= static_cast(free_nodes_count == real_num_node); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); return first_node; } else{ multiallocation_chain chain; - this->priv_append_from_new_blocks(1, chain, IsAlignOnly()); - return boost::movelib::to_raw_pointer(chain.pop_front()); + this->priv_append_from_new_blocks + (1, chain, max_free_blocks, real_block_alignment, real_node_size, real_num_node, num_subblocks, IsAlignOnly()); + void *node = boost::movelib::to_raw_pointer(chain.pop_front()); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + return node; } } - //!Deallocates an array pointed by ptr. Never throws - void deallocate_node(void *pElem) - { - this->priv_invariants(); - block_info_t &block_info = *this->priv_block_from_node(pElem); - BOOST_ASSERT(block_info.free_nodes.size() < m_real_num_node); - - //We put the node at the beginning of the free node list - block_info.free_nodes.push_back(void_pointer(pElem)); - - //The loop reinserts all blocks except the last one - this->priv_reinsert_block(block_info, block_info.free_nodes.size() == 1); - this->priv_deallocate_free_blocks(m_max_free_blocks); - this->priv_invariants(); - } - - //!Allocates n nodes. - //!Can throw - void allocate_nodes(const size_type n, multiallocation_chain &chain) + void priv_allocate_nodes( const size_type n, multiallocation_chain &chain + , const size_type max_free_blocks, const size_type real_block_alignment, const size_type real_node_size + , const size_type real_num_node, const size_type num_subblocks) { size_type i = 0; BOOST_TRY{ - this->priv_invariants(); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); while(i != n){ //If there are no free nodes we allocate all needed blocks if (m_block_container.empty()){ - this->priv_append_from_new_blocks(n - i, chain, IsAlignOnly()); + this->priv_append_from_new_blocks + (n - i, chain, max_free_blocks, real_block_alignment, real_node_size, real_num_node, num_subblocks, IsAlignOnly()); BOOST_ASSERT(m_block_container.empty() || (++m_block_container.cbegin() == m_block_container.cend())); BOOST_ASSERT(chain.size() == n); break; } free_nodes_t &free_nodes = m_block_container.begin()->free_nodes; const size_type free_nodes_count_before = free_nodes.size(); - m_totally_free_blocks -= static_cast(free_nodes_count_before == m_real_num_node); + m_totally_free_blocks -= static_cast(free_nodes_count_before == real_num_node); const size_type num_left = n-i; const size_type num_elems = (num_left < free_nodes_count_before) ? num_left : free_nodes_count_before; typedef typename free_nodes_t::iterator free_nodes_iterator; @@ -466,17 +799,38 @@ class private_adaptive_node_pool_impl } } BOOST_CATCH(...){ - this->deallocate_nodes(chain); + this->priv_deallocate_nodes(chain, max_free_blocks, real_num_node, num_subblocks, real_block_alignment); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); BOOST_RETHROW } BOOST_CATCH_END - this->priv_invariants(); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); } - //!Deallocates a linked list of nodes. Never throws - void deallocate_nodes(multiallocation_chain &nodes) + //!Deallocates an array pointed by ptr. Never throws + void priv_deallocate_node( void *pElem + , const size_type max_free_blocks, const size_type real_num_node + , const size_type num_subblocks, const size_type real_block_alignment) { - this->priv_invariants(); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + block_info_t &block_info = *this->priv_block_from_node(pElem, real_block_alignment); + const size_type prev_free_nodes = block_info.free_nodes.size(); + BOOST_ASSERT(block_info.free_nodes.size() < real_num_node); + + //We put the node at the beginning of the free node list + block_info.free_nodes.push_back(void_pointer(pElem)); + + //The loop reinserts all blocks except the last one + this->priv_reinsert_block(block_info, prev_free_nodes == 0, real_num_node); + this->priv_deallocate_free_blocks(max_free_blocks, real_num_node, num_subblocks, real_block_alignment); + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + } + + void priv_deallocate_nodes( multiallocation_chain &nodes + , const size_type max_free_blocks, const size_type real_num_node + , const size_type num_subblocks, const size_type real_block_alignment) + { + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); //To take advantage of node locality, wait until two //nodes belong to different blocks. Only then reinsert //the block of the first node in the block tree. @@ -493,8 +847,8 @@ class private_adaptive_node_pool_impl size_type splice_node_count = size_type(-1); while(itf != ite){ void *pElem = boost::movelib::to_raw_pointer(boost::movelib::iterator_to_raw_pointer(itf)); - block_info_t &block_info = *this->priv_block_from_node(pElem); - BOOST_ASSERT(block_info.free_nodes.size() < m_real_num_node); + block_info_t &block_info = *this->priv_block_from_node(pElem, real_block_alignment); + BOOST_ASSERT(block_info.free_nodes.size() < real_num_node); ++splice_node_count; //If block change is detected calculate the cached block position in the tree @@ -503,7 +857,7 @@ class private_adaptive_node_pool_impl free_nodes_iterator it(itbb); ++it; nodes.erase_after(itbb, itf, splice_node_count); prev_block_info->free_nodes.incorporate_after(prev_block_info->free_nodes.last(), &*it, &*itbf, splice_node_count); - this->priv_reinsert_block(*prev_block_info, prev_block_was_empty); + this->priv_reinsert_block(*prev_block_info, prev_block_was_empty, real_num_node); splice_node_count = 0; } //Update cache with new data @@ -520,14 +874,78 @@ class private_adaptive_node_pool_impl const size_type splice_node_count = nodes.size(); nodes.clear(); prev_block_info->free_nodes.incorporate_after(prev_block_info->free_nodes.last(), &*itfirst, &*itlast, splice_node_count); - this->priv_reinsert_block(*prev_block_info, prev_block_was_empty); - this->priv_invariants(); - this->priv_deallocate_free_blocks(m_max_free_blocks); + this->priv_reinsert_block(*prev_block_info, prev_block_was_empty, real_num_node); + this->priv_deallocate_free_blocks(max_free_blocks, real_num_node, num_subblocks, real_block_alignment); + } + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + } + + void priv_reinsert_block(block_info_t &prev_block_info, const bool prev_block_was_empty, const size_type real_num_node) + { + //Cache the free nodes from the block + const size_type this_block_free_nodes = prev_block_info.free_nodes.size(); + const bool is_full = this_block_free_nodes == real_num_node; + + //Update free block count + m_totally_free_blocks += static_cast(is_full); + if(prev_block_was_empty){ + block_container_traits_t::insert_was_empty(m_block_container, prev_block_info, is_full); + } + else{ + block_container_traits_t::reinsert_was_used(m_block_container, prev_block_info, is_full); } } - void deallocate_free_blocks() - { this->priv_deallocate_free_blocks(0); } + block_info_t *priv_block_from_node(void *node, const size_type real_block_alignment, AlignOnlyFalse) const + { + hdr_offset_holder *hdr_off_holder = + reinterpret_cast((std::size_t)node & size_type(~(real_block_alignment - 1))); + BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (real_block_alignment - 1))); + BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (real_block_alignment - 1))); + block_info_t *block = reinterpret_cast + (reinterpret_cast(hdr_off_holder) + hdr_off_holder->hdr_offset); + BOOST_ASSERT(block->hdr_offset == 0); + return block; + } + + block_info_t *priv_block_from_node(void *node, const size_type real_block_alignment, AlignOnlyTrue) const + { + return (block_info_t *)((std::size_t)node & std::size_t(~(real_block_alignment - 1))); + } + + block_info_t *priv_block_from_node(void *node, const size_type real_block_alignment) const + { return this->priv_block_from_node(node, real_block_alignment, IsAlignOnly()); } + + //!Deallocates all used memory. Never throws + void priv_clear(const size_type num_subblocks, const size_type real_block_alignment, const size_type real_num_node) + { + #ifndef NDEBUG + block_iterator it = m_block_container.begin(); + block_iterator itend = m_block_container.end(); + size_type n_free_nodes = 0; + for(; it != itend; ++it){ + //Check for memory leak + BOOST_ASSERT(it->free_nodes.size() == real_num_node); + ++n_free_nodes; + } + BOOST_ASSERT(n_free_nodes == m_totally_free_blocks); + #endif + //Check for memory leaks + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + multiallocation_chain chain; + m_block_container.clear_and_dispose(block_destroyer(this, chain, num_subblocks, real_block_alignment, real_num_node)); + this->mp_segment_mngr_base->deallocate_many(chain); + m_totally_free_blocks = 0; + this->priv_invariants(real_num_node, num_subblocks, real_block_alignment); + } + + public: + private_adaptive_node_pool_impl_common(segment_manager_base_type *segment_mngr_base) + //General purpose allocator + : mp_segment_mngr_base(segment_mngr_base) + , m_block_container() + , m_totally_free_blocks(0) + {} size_type num_free_nodes() { @@ -540,340 +958,296 @@ class private_adaptive_node_pool_impl return count; } - void swap(private_adaptive_node_pool_impl &other) + void swap(private_adaptive_node_pool_impl_common &other) { - BOOST_ASSERT(m_max_free_blocks == other.m_max_free_blocks); - BOOST_ASSERT(m_real_node_size == other.m_real_node_size); - BOOST_ASSERT(m_real_block_alignment == other.m_real_block_alignment); - BOOST_ASSERT(m_real_num_node == other.m_real_num_node); std::swap(mp_segment_mngr_base, other.mp_segment_mngr_base); std::swap(m_totally_free_blocks, other.m_totally_free_blocks); m_block_container.swap(other.m_block_container); } + //!Returns the segment manager. Never throws + segment_manager_base_type* get_segment_manager_base()const + { return boost::movelib::to_raw_pointer(mp_segment_mngr_base); } +}; + +template< class SizeType + , std::size_t HdrSize + , std::size_t PayloadPerAllocation + , std::size_t RealNodeSize + , std::size_t NodesPerBlock + , std::size_t HdrOffsetSize + , std::size_t OverheadPercent + , bool AlignOnly> +struct calculate_alignment_ct +{ + static const std::size_t alignment = upper_power_of_2_ct::value; + static const std::size_t num_subblocks = 0; + static const std::size_t real_num_node = (alignment - PayloadPerAllocation - HdrSize)/RealNodeSize; +}; + +template< class SizeType + , std::size_t HdrSize + , std::size_t PayloadPerAllocation + , std::size_t RealNodeSize + , std::size_t NodesPerBlock + , std::size_t HdrOffsetSize + , std::size_t OverheadPercent> +struct calculate_alignment_ct + < SizeType + , HdrSize + , PayloadPerAllocation + , RealNodeSize + , NodesPerBlock + , HdrOffsetSize + , OverheadPercent + , false> +{ + typedef typename candidate_power_of_2_ct + < upper_power_of_2_ct::value + , RealNodeSize + , PayloadPerAllocation + , NodesPerBlock + , HdrSize + , HdrOffsetSize + , OverheadPercent + >::type type; + + static const std::size_t alignment = type::alignment; + static const std::size_t num_subblocks = type::num_subblocks; + static const std::size_t real_num_node = type::real_num_node; +}; + + +///////////////////////////////////////////// +// +// private_adaptive_node_pool_impl_ct +// +///////////////////////////////////////////// +template< class SegmentManagerBase + , std::size_t MaxFreeBlocks + , std::size_t NodeSize + , std::size_t NodesPerBlock + , std::size_t OverheadPercent + , unsigned int Flags> +class private_adaptive_node_pool_impl_ct + : public private_adaptive_node_pool_impl_common +{ + typedef private_adaptive_node_pool_impl_common base_t; + + //Non-copyable + private_adaptive_node_pool_impl_ct(); + private_adaptive_node_pool_impl_ct(const private_adaptive_node_pool_impl_ct &); + private_adaptive_node_pool_impl_ct &operator=(const private_adaptive_node_pool_impl_ct &); + + public: + typedef typename base_t::void_pointer void_pointer; + typedef typename base_t::size_type size_type; + typedef typename base_t::multiallocation_chain multiallocation_chain; + typedef typename base_t::segment_manager_base_type segment_manager_base_type; + + static const typename base_t::size_type PayloadPerAllocation = base_t::PayloadPerAllocation; + + //align_only + static const bool AlignOnly = base_t::AlignOnly; + + private: + static const size_type MaxAlign = base_t::MaxAlign; + static const size_type HdrSize = base_t::HdrSize; + static const size_type HdrOffsetSize = base_t::HdrOffsetSize; + + static const size_type RealNodeSize = lcm_ct::value>::value; + + typedef calculate_alignment_ct + < size_type, HdrSize, PayloadPerAllocation + , RealNodeSize, NodesPerBlock, HdrOffsetSize, OverheadPercent, AlignOnly> data_t; + + //Round the size to a power of two value. + //This is the total memory size (including payload) that we want to + //allocate from the general-purpose allocator + static const size_type NumSubBlocks = data_t::num_subblocks; + static const size_type RealNumNode = data_t::real_num_node; + static const size_type RealBlockAlignment = data_t::alignment; + + public: + + //!Constructor from a segment manager. Never throws + private_adaptive_node_pool_impl_ct(typename base_t::segment_manager_base_type *segment_mngr_base) + //General purpose allocator + : base_t(segment_mngr_base) + {} + + //!Destructor. Deallocates all allocated blocks. Never throws + ~private_adaptive_node_pool_impl_ct() + { this->priv_clear(NumSubBlocks, data_t::alignment, RealNumNode); } + + size_type get_real_num_node() const + { return RealNumNode; } + + //!Allocates array of count elements. Can throw + void *allocate_node() + { + return this->priv_allocate_node + (MaxFreeBlocks, data_t::alignment, RealNodeSize, RealNumNode, NumSubBlocks); + } + + //!Allocates n nodes. + //!Can throw + void allocate_nodes(const size_type n, multiallocation_chain &chain) + { + this->priv_allocate_nodes + (n, chain, MaxFreeBlocks, data_t::alignment, RealNodeSize, RealNumNode, NumSubBlocks); + } + + //!Deallocates an array pointed by ptr. Never throws + void deallocate_node(void *pElem) + { + this->priv_deallocate_node(pElem, MaxFreeBlocks, RealNumNode, NumSubBlocks, RealBlockAlignment); + } + + //!Deallocates a linked list of nodes. Never throws + void deallocate_nodes(multiallocation_chain &nodes) + { + this->priv_deallocate_nodes(nodes, MaxFreeBlocks, RealNumNode, NumSubBlocks, data_t::alignment); + } + + void deallocate_free_blocks() + { this->priv_deallocate_free_blocks(0, RealNumNode, NumSubBlocks, data_t::alignment); } + //Deprecated, use deallocate_free_blocks void deallocate_free_chunks() - { this->priv_deallocate_free_blocks(0); } + { this->priv_deallocate_free_blocks(0, RealNumNode, NumSubBlocks, data_t::alignment); } +}; - private: +///////////////////////////////////////////// +// +// private_adaptive_node_pool_impl_rt +// +///////////////////////////////////////////// +template +struct private_adaptive_node_pool_impl_rt_data +{ + typedef SizeType size_type; - void priv_deallocate_free_blocks(size_type max_free_blocks) - { //Trampoline function to ease inlining - if(m_totally_free_blocks > max_free_blocks){ - this->priv_deallocate_free_blocks_impl(max_free_blocks); - } - } + private_adaptive_node_pool_impl_rt_data(size_type max_free_blocks, size_type real_node_size) + : m_max_free_blocks(max_free_blocks), m_real_node_size(real_node_size) + , m_real_block_alignment(), m_num_subblocks(), m_real_num_node() + {} - void priv_deallocate_free_blocks_impl(size_type max_free_blocks) - { - this->priv_invariants(); - //Now check if we've reached the free nodes limit - //and check if we have free blocks. If so, deallocate as much - //as we can to stay below the limit - multiallocation_chain chain; - { - const const_block_iterator itend = m_block_container.cend(); - const_block_iterator it = itend; - --it; - size_type totally_free_blocks = m_totally_free_blocks; - - for( ; totally_free_blocks > max_free_blocks; --totally_free_blocks){ - BOOST_ASSERT(it->free_nodes.size() == m_real_num_node); - void *addr = priv_first_subblock_from_block(const_cast(&*it)); - --it; - block_container_traits_t::erase_last(m_block_container); - chain.push_front(void_pointer(addr)); - } - BOOST_ASSERT((m_totally_free_blocks - max_free_blocks) == chain.size()); - m_totally_free_blocks = max_free_blocks; - } - this->mp_segment_mngr_base->deallocate_many(chain); - } - - void priv_reinsert_block(block_info_t &prev_block_info, const bool prev_block_was_empty) - { - //Cache the free nodes from the block - const size_type this_block_free_nodes = prev_block_info.free_nodes.size(); - const bool is_full = this_block_free_nodes == m_real_num_node; - - //Update free block count - m_totally_free_blocks += static_cast(is_full); - if(prev_block_was_empty){ - block_container_traits_t::insert_was_empty(m_block_container, prev_block_info, is_full); - } - else{ - block_container_traits_t::reinsert_was_used(m_block_container, prev_block_info, is_full); - } - } - - class block_destroyer; - friend class block_destroyer; - - class block_destroyer - { - public: - block_destroyer(const this_type *impl, multiallocation_chain &chain) - : mp_impl(impl), m_chain(chain) - {} - - void operator()(typename block_container_t::pointer to_deallocate) - { return this->do_destroy(to_deallocate, IsAlignOnly()); } - - private: - void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyTrue) - { - BOOST_ASSERT(to_deallocate->free_nodes.size() == mp_impl->m_real_num_node); - m_chain.push_back(to_deallocate); - } - - void do_destroy(typename block_container_t::pointer to_deallocate, AlignOnlyFalse) - { - BOOST_ASSERT(to_deallocate->free_nodes.size() == mp_impl->m_real_num_node); - BOOST_ASSERT(0 == to_deallocate->hdr_offset); - hdr_offset_holder *hdr_off_holder = - mp_impl->priv_first_subblock_from_block(boost::movelib::to_raw_pointer(to_deallocate)); - m_chain.push_back(hdr_off_holder); - } - - const this_type *mp_impl; - multiallocation_chain &m_chain; - }; - - //This macro will activate invariant checking. Slow, but helpful for debugging the code. - //#define BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS - void priv_invariants() - #ifdef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS - #undef BOOST_CONTAINER_ADAPTIVE_NODE_POOL_CHECK_INVARIANTS - { - const const_block_iterator itend(m_block_container.end()); - - { //We iterate through the block tree to free the memory - const_block_iterator it(m_block_container.begin()); - - if(it != itend){ - for(++it; it != itend; ++it){ - const_block_iterator prev(it); - --prev; - BOOST_ASSERT(*prev < *it); - (void)prev; (void)it; - } - } - } - { //Check that the total free nodes are correct - const_block_iterator it(m_block_container.cbegin()); - size_type total_free_nodes = 0; - for(; it != itend; ++it){ - total_free_nodes += it->free_nodes.size(); - } - BOOST_ASSERT(total_free_nodes >= m_totally_free_blocks*m_real_num_node); - } - { //Check that the total totally free blocks are correct - BOOST_ASSERT(m_block_container.size() >= m_totally_free_blocks); - const_block_iterator it = m_block_container.cend(); - size_type total_free_blocks = m_totally_free_blocks; - while(total_free_blocks--){ - BOOST_ASSERT((--it)->free_nodes.size() == m_real_num_node); - } - } - - if(!AlignOnly){ - //Check that header offsets are correct - const_block_iterator it = m_block_container.begin(); - for(; it != itend; ++it){ - hdr_offset_holder *hdr_off_holder = this->priv_first_subblock_from_block(const_cast(&*it)); - for(size_type i = 0, max = m_num_subblocks; i < max; ++i){ - const size_type offset = reinterpret_cast(const_cast(&*it)) - reinterpret_cast(hdr_off_holder); - BOOST_ASSERT(hdr_off_holder->hdr_offset == offset); - BOOST_ASSERT(0 == ((size_type)hdr_off_holder & (m_real_block_alignment - 1))); - BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1))); - hdr_off_holder = reinterpret_cast(reinterpret_cast(hdr_off_holder) + m_real_block_alignment); - } - } - } - } - #else - {} //empty - #endif - - //!Deallocates all used memory. Never throws - void priv_clear() - { - #ifndef NDEBUG - block_iterator it = m_block_container.begin(); - block_iterator itend = m_block_container.end(); - size_type n_free_nodes = 0; - for(; it != itend; ++it){ - //Check for memory leak - BOOST_ASSERT(it->free_nodes.size() == m_real_num_node); - ++n_free_nodes; - } - BOOST_ASSERT(n_free_nodes == m_totally_free_blocks); - #endif - //Check for memory leaks - this->priv_invariants(); - multiallocation_chain chain; - m_block_container.clear_and_dispose(block_destroyer(this, chain)); - this->mp_segment_mngr_base->deallocate_many(chain); - m_totally_free_blocks = 0; - } - - block_info_t *priv_block_from_node(void *node, AlignOnlyFalse) const - { - hdr_offset_holder *hdr_off_holder = - reinterpret_cast((std::size_t)node & size_type(~(m_real_block_alignment - 1))); - BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1))); - BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1))); - block_info_t *block = reinterpret_cast - (reinterpret_cast(hdr_off_holder) + hdr_off_holder->hdr_offset); - BOOST_ASSERT(block->hdr_offset == 0); - return block; - } - - block_info_t *priv_block_from_node(void *node, AlignOnlyTrue) const - { - return (block_info_t *)((std::size_t)node & std::size_t(~(m_real_block_alignment - 1))); - } - - block_info_t *priv_block_from_node(void *node) const - { return this->priv_block_from_node(node, IsAlignOnly()); } - - hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block) const - { return this->priv_first_subblock_from_block(block, IsAlignOnly()); } - - hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, AlignOnlyFalse) const - { - hdr_offset_holder *const hdr_off_holder = reinterpret_cast - (reinterpret_cast(block) - (m_num_subblocks-1)*m_real_block_alignment); - BOOST_ASSERT(hdr_off_holder->hdr_offset == size_type(reinterpret_cast(block) - reinterpret_cast(hdr_off_holder))); - BOOST_ASSERT(0 == ((std::size_t)hdr_off_holder & (m_real_block_alignment - 1))); - BOOST_ASSERT(0 == (hdr_off_holder->hdr_offset & (m_real_block_alignment - 1))); - return hdr_off_holder; - } - - hdr_offset_holder *priv_first_subblock_from_block(block_info_t *block, AlignOnlyTrue) const - { - return reinterpret_cast(block); - } - - void priv_dispatch_block_chain_or_free - ( multiallocation_chain &chain, block_info_t &c_info, size_type num_node - , char *mem_address, size_type total_elements, bool insert_block_if_free) - { - BOOST_ASSERT(chain.size() <= total_elements); - //First add all possible nodes to the chain - const size_type left = total_elements - chain.size(); - const size_type max_chain = (num_node < left) ? num_node : left; - mem_address = static_cast(boost::movelib::to_raw_pointer - (chain.incorporate_after(chain.last(), void_pointer(mem_address), m_real_node_size, max_chain))); - //Now store remaining nodes in the free list - if(const size_type max_free = num_node - max_chain){ - free_nodes_t & free_nodes = c_info.free_nodes; - free_nodes.incorporate_after(free_nodes.last(), void_pointer(mem_address), m_real_node_size, max_free); - if(insert_block_if_free){ - m_block_container.push_front(c_info); - } - } - } - - //!Allocates a several blocks of nodes. Can throw - void priv_append_from_new_blocks(size_type min_elements, multiallocation_chain &chain, AlignOnlyTrue) - { - BOOST_ASSERT(m_block_container.empty()); - BOOST_ASSERT(min_elements > 0); - const size_type n = (min_elements - 1)/m_real_num_node + 1; - const size_type real_block_size = m_real_block_alignment - PayloadPerAllocation; - const size_type total_elements = chain.size() + min_elements; - for(size_type i = 0; i != n; ++i){ - //We allocate a new NodeBlock and put it the last - //element of the tree - char *mem_address = static_cast - (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment)); - if(!mem_address){ - //In case of error, free memory deallocating all nodes (the new ones allocated - //in this function plus previously stored nodes in chain). - this->deallocate_nodes(chain); - throw_bad_alloc(); - } - block_info_t &c_info = *new(mem_address)block_info_t(); - mem_address += HdrSize; - if(i != (n-1)){ - chain.incorporate_after(chain.last(), void_pointer(mem_address), m_real_node_size, m_real_num_node); - } - else{ - this->priv_dispatch_block_chain_or_free(chain, c_info, m_real_num_node, mem_address, total_elements, true); - } - } - } - - void priv_append_from_new_blocks(size_type min_elements, multiallocation_chain &chain, AlignOnlyFalse) - { - BOOST_ASSERT(m_block_container.empty()); - BOOST_ASSERT(min_elements > 0); - const size_type n = (min_elements - 1)/m_real_num_node + 1; - const size_type real_block_size = m_real_block_alignment*m_num_subblocks - PayloadPerAllocation; - const size_type elements_per_subblock = (m_real_block_alignment - HdrOffsetSize)/m_real_node_size; - const size_type hdr_subblock_elements = (m_real_block_alignment - HdrSize - PayloadPerAllocation)/m_real_node_size; - const size_type total_elements = chain.size() + min_elements; - - for(size_type i = 0; i != n; ++i){ - //We allocate a new NodeBlock and put it the last - //element of the tree - char *mem_address = static_cast - (mp_segment_mngr_base->allocate_aligned(real_block_size, m_real_block_alignment)); - if(!mem_address){ - //In case of error, free memory deallocating all nodes (the new ones allocated - //in this function plus previously stored nodes in chain). - this->deallocate_nodes(chain); - throw_bad_alloc(); - } - //First initialize header information on the last subblock - char *hdr_addr = mem_address + m_real_block_alignment*(m_num_subblocks-1); - block_info_t &c_info = *new(hdr_addr)block_info_t(); - //Some structural checks - BOOST_ASSERT(static_cast(&static_cast(c_info).hdr_offset) == - static_cast(&c_info)); (void)c_info; - if(i != (n-1)){ - for( size_type subblock = 0, maxsubblock = m_num_subblocks - 1 - ; subblock < maxsubblock - ; ++subblock, mem_address += m_real_block_alignment){ - //Initialize header offset mark - new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address)); - chain.incorporate_after - (chain.last(), void_pointer(mem_address + HdrOffsetSize), m_real_node_size, elements_per_subblock); - } - chain.incorporate_after(chain.last(), void_pointer(hdr_addr + HdrSize), m_real_node_size, hdr_subblock_elements); - } - else{ - for( size_type subblock = 0, maxsubblock = m_num_subblocks - 1 - ; subblock < maxsubblock - ; ++subblock, mem_address += m_real_block_alignment){ - //Initialize header offset mark - new(mem_address) hdr_offset_holder(size_type(hdr_addr - mem_address)); - this->priv_dispatch_block_chain_or_free - (chain, c_info, elements_per_subblock, mem_address + HdrOffsetSize, total_elements, false); - } - this->priv_dispatch_block_chain_or_free - (chain, c_info, hdr_subblock_elements, hdr_addr + HdrSize, total_elements, true); - } - } - } - - private: - typedef typename boost::intrusive::pointer_traits - ::template rebind_pointer::type segment_mngr_base_ptr_t; const size_type m_max_free_blocks; const size_type m_real_node_size; //Round the size to a power of two value. //This is the total memory size (including payload) that we want to //allocate from the general-purpose allocator - const size_type m_real_block_alignment; + size_type m_real_block_alignment; size_type m_num_subblocks; //This is the real number of nodes per block - //const size_type m_real_num_node; - segment_mngr_base_ptr_t mp_segment_mngr_base; //Segment manager - block_container_t m_block_container; //Intrusive block list - size_type m_totally_free_blocks; //Free blocks +}; + + +template +class private_adaptive_node_pool_impl_rt + : private private_adaptive_node_pool_impl_rt_data + , public private_adaptive_node_pool_impl_common +{ + typedef private_adaptive_node_pool_impl_common impl_t; + typedef private_adaptive_node_pool_impl_rt_data data_t; + + //Non-copyable + private_adaptive_node_pool_impl_rt(); + private_adaptive_node_pool_impl_rt(const private_adaptive_node_pool_impl_rt &); + private_adaptive_node_pool_impl_rt &operator=(const private_adaptive_node_pool_impl_rt &); + + protected: + + typedef typename impl_t::void_pointer void_pointer; + typedef typename impl_t::size_type size_type; + typedef typename impl_t::multiallocation_chain multiallocation_chain; + + static const typename impl_t::size_type PayloadPerAllocation = impl_t::PayloadPerAllocation; + + //Flags + //align_only + static const bool AlignOnly = impl_t::AlignOnly; + + static const size_type HdrSize = impl_t::HdrSize; + static const size_type HdrOffsetSize = impl_t::HdrOffsetSize; + + public: + + //!Segment manager typedef + typedef SegmentManagerBase segment_manager_base_type; + + //!Constructor from a segment manager. Never throws + private_adaptive_node_pool_impl_rt + ( segment_manager_base_type *segment_mngr_base + , size_type node_size + , size_type nodes_per_block + , size_type max_free_blocks + , unsigned char overhead_percent + ) + : data_t(max_free_blocks, lcm(node_size, size_type(alignment_of::value))) + , impl_t(segment_mngr_base) + { + if(AlignOnly){ + this->m_real_block_alignment = upper_power_of_2(HdrSize + this->m_real_node_size*nodes_per_block); + this->m_real_num_node = (this->m_real_block_alignment - PayloadPerAllocation - HdrSize)/this->m_real_node_size; + } + else{ + candidate_power_of_2_rt ( upper_power_of_2(HdrSize + PayloadPerAllocation + this->m_real_node_size) + , this->m_real_node_size + , PayloadPerAllocation + , nodes_per_block + , HdrSize + , HdrOffsetSize + , overhead_percent + , this->m_real_block_alignment + , this->m_num_subblocks + , this->m_real_num_node); + } + } + + //!Destructor. Deallocates all allocated blocks. Never throws + ~private_adaptive_node_pool_impl_rt() + { this->priv_clear(this->m_num_subblocks, this->m_real_block_alignment, this->m_real_num_node); } + + size_type get_real_num_node() const + { return this->m_real_num_node; } + + //!Allocates array of count elements. Can throw + void *allocate_node() + { + return this->priv_allocate_node + (this->m_max_free_blocks, this->m_real_block_alignment, this->m_real_node_size, this->m_real_num_node, this->m_num_subblocks); + } + + //!Allocates n nodes. + //!Can throw + void allocate_nodes(const size_type n, multiallocation_chain &chain) + { + + this->priv_allocate_nodes + (n, chain, this->m_max_free_blocks, this->m_real_block_alignment, this->m_real_node_size, this->m_real_num_node, this->m_num_subblocks); + } + + //!Deallocates an array pointed by ptr. Never throws + void deallocate_node(void *pElem) + { + this->priv_deallocate_node(pElem, this->m_max_free_blocks, this->m_real_num_node, this->m_num_subblocks, this->m_real_block_alignment); + } + + //!Deallocates a linked list of nodes. Never throws + void deallocate_nodes(multiallocation_chain &nodes) + { + this->priv_deallocate_nodes(nodes, this->m_max_free_blocks, this->m_real_num_node, this->m_num_subblocks, this->m_real_block_alignment); + } + + void deallocate_free_blocks() + { this->priv_deallocate_free_blocks(0, this->m_real_num_node, this->m_num_subblocks, this->m_real_block_alignment); } + + //Deprecated, use deallocate_free_blocks + void deallocate_free_chunks() + { this->priv_deallocate_free_blocks(0, this->m_real_num_node, this->m_num_subblocks, this->m_real_block_alignment); } }; } //namespace dtl { diff --git a/include/boost/container/detail/math_functions.hpp b/include/boost/container/detail/math_functions.hpp index f151931..8d350a1 100644 --- a/include/boost/container/detail/math_functions.hpp +++ b/include/boost/container/detail/math_functions.hpp @@ -94,6 +94,34 @@ inline Integer upper_power_of_2(const Integer & A) return power_of_2; } +template +struct upper_power_of_2_loop_ct +{ + + template + struct apply + { + static const Integer value = + upper_power_of_2_loop_ct P*2)>::template apply::value; + }; +}; + +template +struct upper_power_of_2_loop_ct +{ + template + struct apply + { + static const Integer value = P; + }; +}; + +template +struct upper_power_of_2_ct +{ + static const Integer value = upper_power_of_2_loop_ct 1)>::template apply::value; +}; + //This function uses binary search to discover the //highest set bit of the integer inline std::size_t floor_log2 (std::size_t x) @@ -114,6 +142,32 @@ inline std::size_t floor_log2 (std::size_t x) return log2; } +template +struct gcd_ct +{ + static const std::size_t Max = I1 > I2 ? I1 : I2; + static const std::size_t Min = I1 < I2 ? I1 : I2; + static const std::size_t value = gcd_ct::value; +}; + +template +struct gcd_ct +{ + static const std::size_t value = I1; +}; + +template +struct gcd_ct<0, I1> +{ + static const std::size_t value = I1; +}; + +template +struct lcm_ct +{ + static const std::size_t value = I1 * I2 / gcd_ct::value; +}; + } // namespace dtl } // namespace container } // namespace boost diff --git a/proj/vc7ide/bench_set_adaptive_pool.vcproj b/proj/vc7ide/bench_set_adaptive_pool.vcproj new file mode 100644 index 0000000..fcb8dbe --- /dev/null +++ b/proj/vc7ide/bench_set_adaptive_pool.vcproj @@ -0,0 +1,136 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/proj/vc7ide/container.sln b/proj/vc7ide/container.sln index d4947fb..7693348 100644 --- a/proj/vc7ide/container.sln +++ b/proj/vc7ide/container.sln @@ -331,6 +331,10 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "allocator_traits_test", "al ProjectSection(ProjectDependencies) = postProject EndProjectSection EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bench_set_adaptive_pool", "bench_set_adaptive_pool.vcproj", "{5C2D1813-24CE-A826-4FE5-5732251A3FAF}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject Global GlobalSection(SolutionConfiguration) = preSolution Debug = Debug @@ -669,6 +673,10 @@ Global {5CE11C83-096A-84FE-4FA2-D3A6BA792002}.Debug.Build.0 = Debug|Win32 {5CE11C83-096A-84FE-4FA2-D3A6BA792002}.Release.ActiveCfg = Release|Win32 {5CE11C83-096A-84FE-4FA2-D3A6BA792002}.Release.Build.0 = Release|Win32 + {5C2D1813-24CE-A826-4FE5-5732251A3FAF}.Debug.ActiveCfg = Debug|Win32 + {5C2D1813-24CE-A826-4FE5-5732251A3FAF}.Debug.Build.0 = Debug|Win32 + {5C2D1813-24CE-A826-4FE5-5732251A3FAF}.Release.ActiveCfg = Release|Win32 + {5C2D1813-24CE-A826-4FE5-5732251A3FAF}.Release.Build.0 = Release|Win32 EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution EndGlobalSection