From d8296b393300828ea3077c1763f694eea3ad7ec2 Mon Sep 17 00:00:00 2001 From: Peter Dimov Date: Wed, 9 Apr 2008 19:58:54 +0000 Subject: [PATCH] sp_counted_base_spin.hpp added, enabled by BOOST_SP_USE_SPINLOCK. [SVN r44137] --- include/boost/detail/sp_counted_base.hpp | 4 + include/boost/detail/sp_counted_base_spin.hpp | 131 ++++++++++++++++++ include/boost/detail/spinlock_pool.hpp | 4 +- 3 files changed, 137 insertions(+), 2 deletions(-) create mode 100644 include/boost/detail/sp_counted_base_spin.hpp diff --git a/include/boost/detail/sp_counted_base.hpp b/include/boost/detail/sp_counted_base.hpp index f925a5d..241b2de 100644 --- a/include/boost/detail/sp_counted_base.hpp +++ b/include/boost/detail/sp_counted_base.hpp @@ -23,6 +23,10 @@ # include +#elif defined( BOOST_SP_USE_SPINLOCK ) + +# include + #elif defined( BOOST_SP_USE_PTHREADS ) # include diff --git a/include/boost/detail/sp_counted_base_spin.hpp b/include/boost/detail/sp_counted_base_spin.hpp new file mode 100644 index 0000000..610a468 --- /dev/null +++ b/include/boost/detail/sp_counted_base_spin.hpp @@ -0,0 +1,131 @@ +#ifndef BOOST_DETAIL_SP_COUNTED_BASE_SPIN_HPP_INCLUDED +#define BOOST_DETAIL_SP_COUNTED_BASE_SPIN_HPP_INCLUDED + +// MS compatible compilers support #pragma once + +#if defined(_MSC_VER) && (_MSC_VER >= 1020) +# pragma once +#endif + +// +// detail/sp_counted_base_spin.hpp - spinlock pool atomic emulation +// +// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd. +// Copyright 2004-2008 Peter Dimov +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#include +#include + +namespace boost +{ + +namespace detail +{ + +inline int atomic_exchange_and_add( int * pw, int dv ) +{ + spinlock_pool<1>::scoped_lock lock( pw ); + + int r = *pw; + *pw += dv; + return r; +} + +inline void atomic_increment( int * pw ) +{ + spinlock_pool<1>::scoped_lock lock( pw ); + ++*pw; +} + +inline int atomic_conditional_increment( int * pw ) +{ + spinlock_pool<1>::scoped_lock lock( pw ); + + int rv = *pw; + if( rv != 0 ) ++*pw; + return rv; +} + +class sp_counted_base +{ +private: + + sp_counted_base( sp_counted_base const & ); + sp_counted_base & operator= ( sp_counted_base const & ); + + int use_count_; // #shared + int weak_count_; // #weak + (#shared != 0) + +public: + + sp_counted_base(): use_count_( 1 ), weak_count_( 1 ) + { + } + + virtual ~sp_counted_base() // nothrow + { + } + + // dispose() is called when use_count_ drops to zero, to release + // the resources managed by *this. + + virtual void dispose() = 0; // nothrow + + // destroy() is called when weak_count_ drops to zero. + + virtual void destroy() // nothrow + { + delete this; + } + + virtual void * get_deleter( sp_typeinfo const & ti ) = 0; + + void add_ref_copy() + { + atomic_increment( &use_count_ ); + } + + bool add_ref_lock() // true on success + { + return atomic_conditional_increment( &use_count_ ) != 0; + } + + void release() // nothrow + { + if( atomic_exchange_and_add( &use_count_, -1 ) == 1 ) + { + dispose(); + weak_release(); + } + } + + void weak_add_ref() // nothrow + { + atomic_increment( &weak_count_ ); + } + + void weak_release() // nothrow + { + if( atomic_exchange_and_add( &weak_count_, -1 ) == 1 ) + { + destroy(); + } + } + + long use_count() const // nothrow + { + spinlock_pool<1>::scoped_lock lock( &use_count_ ); + return use_count_; + } +}; + +} // namespace detail + +} // namespace boost + +#endif // #ifndef BOOST_DETAIL_SP_COUNTED_BASE_SPIN_HPP_INCLUDED diff --git a/include/boost/detail/spinlock_pool.hpp b/include/boost/detail/spinlock_pool.hpp index b50ccb5..92d26cb 100644 --- a/include/boost/detail/spinlock_pool.hpp +++ b/include/boost/detail/spinlock_pool.hpp @@ -37,7 +37,7 @@ private: public: - static spinlock & spinlock_for( void * pv ) + static spinlock & spinlock_for( void const * pv ) { size_t i = reinterpret_cast< size_t >( pv ) % 41; return pool_[ i ]; @@ -54,7 +54,7 @@ public: public: - explicit scoped_lock( void * pv ): sp_( spinlock_for( pv ) ) + explicit scoped_lock( void const * pv ): sp_( spinlock_for( pv ) ) { sp_.lock(); }