diff --git a/include/boost/unordered/detail/foa/concurrent_table.hpp b/include/boost/unordered/detail/foa/concurrent_table.hpp index d51f5279..db09b9ca 100644 --- a/include/boost/unordered/detail/foa/concurrent_table.hpp +++ b/include/boost/unordered/detail/foa/concurrent_table.hpp @@ -173,7 +173,15 @@ struct atomic_integral void operator|=(Integral m){n.fetch_or(m,std::memory_order_relaxed);} void operator&=(Integral m){n.fetch_and(m,std::memory_order_relaxed);} + atomic_integral& operator=(atomic_integral const& rhs) { + if(this!=&rhs){ + n.store(rhs.n.load(std::memory_order_relaxed),std::memory_order_relaxed); + } + return *this; + } + std::atomic n; + }; /* Group-level concurrency protection. It provides a rw mutex plus an diff --git a/include/boost/unordered/detail/foa/core.hpp b/include/boost/unordered/detail/foa/core.hpp index b7c546c1..0c26699b 100644 --- a/include/boost/unordered/detail/foa/core.hpp +++ b/include/boost/unordered/detail/foa/core.hpp @@ -1773,9 +1773,7 @@ private: { if(arrays.elements){ copy_elements_array_from(x); - std::memcpy( - arrays.groups,x.arrays.groups, - (arrays.groups_size_mask+1)*sizeof(group_type)); + copy_groups_array_from(x); size_=std::size_t(x.size_); } } @@ -1833,6 +1831,34 @@ private: BOOST_CATCH_END } + void copy_groups_array_from(const table_core& x) { + copy_groups_array_from(x, std::integral_constant::value +#else + std::is_trivially_copyable::value +#endif + >{} + ); + } + + void copy_groups_array_from( + const table_core& x, std::true_type /* -> memcpy */) + { + std::memcpy( + arrays.groups,x.arrays.groups, + (arrays.groups_size_mask+1)*sizeof(group_type)); + } + + void copy_groups_array_from( + const table_core& x, std::false_type /* -> manual */) + { + for(std::size_t i=0;i