00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023
00024 #include <stdexcept>
00025 #include <iterator>
00026 #include <utility>
00027 #include <cstring>
00028 #include <string>
00029 #include "tbb_stddef.h"
00030 #include "cache_aligned_allocator.h"
00031 #include "tbb_allocator.h"
00032 #include "spin_rw_mutex.h"
00033 #include "atomic.h"
00034 #include "aligned_space.h"
00035 #include "tbb_exception.h"
00036 #if TBB_USE_PERFORMANCE_WARNINGS
00037 #include <typeinfo>
00038 #endif
00039
00040 namespace tbb {
00041
00043 namespace internal {
00045 void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
00047 void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
00049 void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
00050 }
00051
00053 template<typename Key>
00054 struct tbb_hash_compare {
00055 static size_t hash( const Key& a ) { return tbb_hasher(a); }
00056 static bool equal( const Key& a, const Key& b ) { return a == b; }
00057 };
00058
00059 namespace interface4 {
00060
00061 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00062 class concurrent_hash_map;
00063
00064 namespace internal {
00065
00066
00068 typedef size_t hashcode_t;
00070 struct hash_map_node_base : tbb::internal::no_copy {
00072 typedef spin_rw_mutex mutex_t;
00074 typedef mutex_t::scoped_lock scoped_t;
00076 hash_map_node_base *next;
00077 mutex_t mutex;
00078 };
00080 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
00082 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
00084 class hash_map_base {
00085 public:
00087 typedef size_t size_type;
00089 typedef size_t hashcode_t;
00091 typedef size_t segment_index_t;
00093 typedef hash_map_node_base node_base;
00095 struct bucket : tbb::internal::no_copy {
00097 typedef spin_rw_mutex mutex_t;
00099 typedef mutex_t::scoped_lock scoped_t;
00100 mutex_t mutex;
00101 node_base *node_list;
00102 };
00104 static size_type const embedded_block = 1;
00106 static size_type const embedded_buckets = 1<<embedded_block;
00108 static size_type const first_block = 8;
00110 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
00112 typedef bucket *segment_ptr_t;
00114 typedef segment_ptr_t segments_table_t[pointers_per_table];
00116 atomic<hashcode_t> my_mask;
00118 segments_table_t my_table;
00120 atomic<size_type> my_size;
00122 bucket my_embedded_segment[embedded_buckets];
00123
00125 hash_map_base() {
00126 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t)
00127 + sizeof(my_size) + sizeof(my_mask)
00128 + embedded_buckets*sizeof(bucket) );
00129 for( size_type i = 0; i < embedded_block; i++ )
00130 my_table[i] = my_embedded_segment + segment_base(i);
00131 my_mask = embedded_buckets - 1;
00132 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
00133 }
00134
00136 static segment_index_t segment_index_of( size_type index ) {
00137 return segment_index_t( __TBB_Log2( index|1 ) );
00138 }
00139
00141 static segment_index_t segment_base( segment_index_t k ) {
00142 return (segment_index_t(1)<<k & ~segment_index_t(1));
00143 }
00144
00146 static size_type segment_size( segment_index_t k ) {
00147 return size_type(1)<<k;
00148 }
00149
00151 static bool is_valid( void *ptr ) {
00152 return reinterpret_cast<size_t>(ptr) > size_t(63);
00153 }
00154
00156 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
00157 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
00158 else for(size_type i = 0; i < sz; i++, ptr++) {
00159 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
00160 ptr->node_list = rehash_req;
00161 }
00162 }
00163
00165 static void add_to_bucket( bucket *b, node_base *n ) {
00166 __TBB_ASSERT(b->node_list != rehash_req, NULL);
00167 n->next = b->node_list;
00168 b->node_list = n;
00169 }
00170
00172 struct enable_segment_failsafe {
00173 segment_ptr_t *my_segment_ptr;
00174 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
00175 ~enable_segment_failsafe() {
00176 if( my_segment_ptr ) *my_segment_ptr = 0;
00177 }
00178 };
00179
00181 void enable_segment( segment_index_t k, bool is_initial = false ) {
00182 __TBB_ASSERT( k, "Zero segment must be embedded" );
00183 enable_segment_failsafe watchdog( my_table, k );
00184 cache_aligned_allocator<bucket> alloc;
00185 size_type sz;
00186 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
00187 if( k >= first_block ) {
00188 sz = segment_size( k );
00189 segment_ptr_t ptr = alloc.allocate( sz );
00190 init_buckets( ptr, sz, is_initial );
00191 #if TBB_USE_THREADING_TOOLS
00192
00193 itt_store_pointer_with_release_v3( my_table + k, ptr );
00194 #else
00195 my_table[k] = ptr;
00196 #endif
00197 sz <<= 1;
00198 } else {
00199 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
00200 sz = segment_size( first_block );
00201 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
00202 init_buckets( ptr, sz - embedded_buckets, is_initial );
00203 ptr -= segment_base(embedded_block);
00204 for(segment_index_t i = embedded_block; i < first_block; i++)
00205 #if TBB_USE_THREADING_TOOLS
00206 itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
00207 #else
00208 my_table[i] = ptr + segment_base(i);
00209 #endif
00210 }
00211 #if TBB_USE_THREADING_TOOLS
00212 itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) );
00213 #else
00214 my_mask = sz - 1;
00215 #endif
00216 watchdog.my_segment_ptr = 0;
00217 }
00218
00220 bucket *get_bucket( hashcode_t h ) const throw() {
00221 segment_index_t s = segment_index_of( h );
00222 h -= segment_base(s);
00223 segment_ptr_t seg = my_table[s];
00224 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
00225 return &seg[h];
00226 }
00227
00228
00229 void mark_rehashed_levels( hashcode_t h ) throw () {
00230 segment_index_t s = segment_index_of( h );
00231 while( segment_ptr_t seg = my_table[++s] )
00232 if( seg[h].node_list == rehash_req ) {
00233 seg[h].node_list = empty_rehashed;
00234 mark_rehashed_levels( h + segment_base(s) );
00235 }
00236 }
00237
00239
00240 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
00241 hashcode_t m_now, m_old = m;
00242 #if TBB_USE_THREADING_TOOLS
00243 m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00244 #else
00245 m_now = my_mask;
00246 #endif
00247 if( m_old != m_now )
00248 return check_rehashing_collision( h, m_old, m = m_now );
00249 return false;
00250 }
00251
00253 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
00254 __TBB_ASSERT(m_old != m, NULL);
00255 if( (h & m_old) != (h & m) ) {
00256
00257
00258 for( ++m_old; !(h & m_old); m_old <<= 1 );
00259 m_old = (m_old<<1) - 1;
00260 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
00261
00262 #if TBB_USE_THREADING_TOOLS
00263 if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req )
00264 #else
00265 if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req )
00266 #endif
00267 return true;
00268 }
00269 return false;
00270 }
00271
00273 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
00274 size_type sz = ++my_size;
00275 add_to_bucket( b, n );
00276
00277 if( sz >= mask ) {
00278 segment_index_t new_seg = segment_index_of( mask+1 );
00279 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
00280 #if TBB_USE_THREADING_TOOLS
00281 if( !itt_load_pointer_v3(my_table+new_seg)
00282 #else
00283 if( !my_table[new_seg]
00284 #endif
00285 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
00286 return new_seg;
00287 }
00288 return 0;
00289 }
00290
00292 void reserve(size_type buckets) {
00293 if( !buckets-- ) return;
00294 bool is_initial = !my_size;
00295 for( size_type m = my_mask; buckets > m; m = my_mask )
00296 enable_segment( segment_index_of( m+1 ), is_initial );
00297 }
00299 void internal_swap(hash_map_base &table) {
00300 std::swap(this->my_mask, table.my_mask);
00301 std::swap(this->my_size, table.my_size);
00302 for(size_type i = 0; i < embedded_buckets; i++)
00303 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
00304 for(size_type i = embedded_block; i < pointers_per_table; i++)
00305 std::swap(this->my_table[i], table.my_table[i]);
00306 }
00307 };
00308
00309 template<typename Iterator>
00310 class hash_map_range;
00311
00313
00315 template<typename Container, typename Value>
00316 class hash_map_iterator
00317 : public std::iterator<std::forward_iterator_tag,Value>
00318 {
00319 typedef Container map_type;
00320 typedef typename Container::node node;
00321 typedef hash_map_base::node_base node_base;
00322 typedef hash_map_base::bucket bucket;
00323
00324 template<typename C, typename T, typename U>
00325 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00326
00327 template<typename C, typename T, typename U>
00328 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00329
00330 template<typename C, typename T, typename U>
00331 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00332
00333 template<typename C, typename U>
00334 friend class hash_map_iterator;
00335
00336 template<typename I>
00337 friend class hash_map_range;
00338
00339 void advance_to_next_bucket() {
00340 size_t k = my_index+1;
00341 while( my_bucket && k <= my_map->my_mask ) {
00342
00343 if( k& (k-2) )
00344 ++my_bucket;
00345 else my_bucket = my_map->get_bucket( k );
00346 my_node = static_cast<node*>( my_bucket->node_list );
00347 if( hash_map_base::is_valid(my_node) ) {
00348 my_index = k; return;
00349 }
00350 ++k;
00351 }
00352 my_bucket = 0; my_node = 0; my_index = k;
00353 }
00354 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00355 template<typename Key, typename T, typename HashCompare, typename A>
00356 friend class interface4::concurrent_hash_map;
00357 #else
00358 public:
00359 #endif
00361 const Container *my_map;
00362
00364 size_t my_index;
00365
00367 const bucket *my_bucket;
00368
00370 node *my_node;
00371
00372 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
00373
00374 public:
00376 hash_map_iterator() {}
00377 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
00378 my_map(other.my_map),
00379 my_index(other.my_index),
00380 my_bucket(other.my_bucket),
00381 my_node(other.my_node)
00382 {}
00383 Value& operator*() const {
00384 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
00385 return my_node->item;
00386 }
00387 Value* operator->() const {return &operator*();}
00388 hash_map_iterator& operator++();
00389
00391 Value* operator++(int) {
00392 Value* result = &operator*();
00393 operator++();
00394 return result;
00395 }
00396 };
00397
00398 template<typename Container, typename Value>
00399 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
00400 my_map(&map),
00401 my_index(index),
00402 my_bucket(b),
00403 my_node( static_cast<node*>(n) )
00404 {
00405 if( b && !hash_map_base::is_valid(n) )
00406 advance_to_next_bucket();
00407 }
00408
00409 template<typename Container, typename Value>
00410 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00411 my_node = static_cast<node*>( my_node->next );
00412 if( !my_node ) advance_to_next_bucket();
00413 return *this;
00414 }
00415
00416 template<typename Container, typename T, typename U>
00417 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00418 return i.my_node == j.my_node && i.my_map == j.my_map;
00419 }
00420
00421 template<typename Container, typename T, typename U>
00422 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00423 return i.my_node != j.my_node || i.my_map != j.my_map;
00424 }
00425
00427
00428 template<typename Iterator>
00429 class hash_map_range {
00430 typedef typename Iterator::map_type map_type;
00431 Iterator my_begin;
00432 Iterator my_end;
00433 mutable Iterator my_midpoint;
00434 size_t my_grainsize;
00436 void set_midpoint() const;
00437 template<typename U> friend class hash_map_range;
00438 public:
00440 typedef std::size_t size_type;
00441 typedef typename Iterator::value_type value_type;
00442 typedef typename Iterator::reference reference;
00443 typedef typename Iterator::difference_type difference_type;
00444 typedef Iterator iterator;
00445
00447 bool empty() const {return my_begin==my_end;}
00448
00450 bool is_divisible() const {
00451 return my_midpoint!=my_end;
00452 }
00454 hash_map_range( hash_map_range& r, split ) :
00455 my_end(r.my_end),
00456 my_grainsize(r.my_grainsize)
00457 {
00458 r.my_end = my_begin = r.my_midpoint;
00459 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00460 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00461 set_midpoint();
00462 r.set_midpoint();
00463 }
00465 template<typename U>
00466 hash_map_range( hash_map_range<U>& r) :
00467 my_begin(r.my_begin),
00468 my_end(r.my_end),
00469 my_midpoint(r.my_midpoint),
00470 my_grainsize(r.my_grainsize)
00471 {}
00472 #if TBB_DEPRECATED
00474 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
00475 my_begin(begin_),
00476 my_end(end_),
00477 my_grainsize(grainsize_)
00478 {
00479 if(!my_end.my_index && !my_end.my_bucket)
00480 my_end.my_index = my_end.my_map->my_mask + 1;
00481 set_midpoint();
00482 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00483 }
00484 #endif
00486 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
00487 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
00488 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
00489 my_grainsize( grainsize_ )
00490 {
00491 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00492 set_midpoint();
00493 }
00494 const Iterator& begin() const {return my_begin;}
00495 const Iterator& end() const {return my_end;}
00497 size_type grainsize() const {return my_grainsize;}
00498 };
00499
00500 template<typename Iterator>
00501 void hash_map_range<Iterator>::set_midpoint() const {
00502
00503 size_t m = my_end.my_index-my_begin.my_index;
00504 if( m > my_grainsize ) {
00505 m = my_begin.my_index + m/2u;
00506 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
00507 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
00508 } else {
00509 my_midpoint = my_end;
00510 }
00511 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
00512 "my_begin is after my_midpoint" );
00513 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
00514 "my_midpoint is after my_end" );
00515 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00516 "[my_begin, my_midpoint) range should not be empty" );
00517 }
00518
00519 }
00521
00523 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
00524
00526 template<typename T>
00527 inline size_t tbb_hasher( const T& t ) {
00528 return static_cast<size_t>( t ) * hash_multiplier;
00529 }
00530 template<typename P>
00531 inline size_t tbb_hasher( P* ptr ) {
00532 size_t const h = reinterpret_cast<size_t>( ptr );
00533 return (h >> 3) ^ h;
00534 }
00535 template<typename E, typename S, typename A>
00536 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
00537 size_t h = 0;
00538 for( const E* c = s.c_str(); *c; c++ )
00539 h = static_cast<size_t>(*c) ^ (h * hash_multiplier);
00540 return h;
00541 }
00542 template<typename F, typename S>
00543 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
00544 return tbb_hasher(p.first) ^ tbb_hasher(p.second);
00545 }
00546
00548
00577 template<typename Key, typename T, typename HashCompare, typename Allocator>
00578 class concurrent_hash_map : protected internal::hash_map_base {
00579 template<typename Container, typename Value>
00580 friend class internal::hash_map_iterator;
00581
00582 template<typename I>
00583 friend class internal::hash_map_range;
00584
00585 public:
00586 typedef Key key_type;
00587 typedef T mapped_type;
00588 typedef std::pair<const Key,T> value_type;
00589 typedef hash_map_base::size_type size_type;
00590 typedef ptrdiff_t difference_type;
00591 typedef value_type *pointer;
00592 typedef const value_type *const_pointer;
00593 typedef value_type &reference;
00594 typedef const value_type &const_reference;
00595 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00596 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00597 typedef internal::hash_map_range<iterator> range_type;
00598 typedef internal::hash_map_range<const_iterator> const_range_type;
00599 typedef Allocator allocator_type;
00600
00601 protected:
00602 friend class const_accessor;
00603 struct node;
00604 typedef typename Allocator::template rebind<node>::other node_allocator_type;
00605 node_allocator_type my_allocator;
00606 HashCompare my_hash_compare;
00607
00608 struct node : public node_base {
00609 value_type item;
00610 node( const Key &key ) : item(key, T()) {}
00611 node( const Key &key, const T &t ) : item(key, t) {}
00612
00613 void *operator new( size_t , node_allocator_type &a ) {
00614 void *ptr = a.allocate(1);
00615 if(!ptr)
00616 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
00617 return ptr;
00618 }
00619
00620 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00621 };
00622
00623 void delete_node( node_base *n ) {
00624 my_allocator.destroy( static_cast<node*>(n) );
00625 my_allocator.deallocate( static_cast<node*>(n), 1);
00626 }
00627
00628 node *search_bucket( const key_type &key, bucket *b ) const {
00629 node *n = static_cast<node*>( b->node_list );
00630 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
00631 n = static_cast<node*>( n->next );
00632 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
00633 return n;
00634 }
00635
00637 class bucket_accessor : public bucket::scoped_t {
00638 bool my_is_writer;
00639 bucket *my_b;
00640 public:
00641 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
00643 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
00644 my_b = base->get_bucket( h );
00645 #if TBB_USE_THREADING_TOOLS
00646
00647 if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req
00648 #else
00649 if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req
00650 #endif
00651 && try_acquire( my_b->mutex, true ) )
00652 {
00653 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
00654 my_is_writer = true;
00655 }
00656 else bucket::scoped_t::acquire( my_b->mutex, my_is_writer = writer );
00657 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
00658 }
00660 bool is_writer() { return my_is_writer; }
00662 bucket *operator() () { return my_b; }
00663
00664 bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
00665 };
00666
00667
00668 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
00669 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
00670 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
00671 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
00672 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
00673
00674 bucket_accessor b_old( this, h & mask );
00675
00676 mask = (mask<<1) | 1;
00677 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
00678 restart:
00679 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
00680 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
00681 #if TBB_USE_ASSERT
00682 hashcode_t bmask = h & (mask>>1);
00683 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
00684 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
00685 #endif
00686 if( (c & mask) == h ) {
00687 if( !b_old.is_writer() )
00688 if( !b_old.upgrade_to_writer() ) {
00689 goto restart;
00690 }
00691 *p = n->next;
00692 add_to_bucket( b_new, n );
00693 } else p = &n->next;
00694 }
00695 }
00696
00697 public:
00698
00699 class accessor;
00701 class const_accessor {
00702 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
00703 friend class accessor;
00704 void operator=( const accessor & ) const;
00705 const_accessor( const accessor & );
00706 public:
00708 typedef const typename concurrent_hash_map::value_type value_type;
00709
00711 bool empty() const {return !my_node;}
00712
00714 void release() {
00715 if( my_node ) {
00716 my_lock.release();
00717 my_node = 0;
00718 }
00719 }
00720
00722 const_reference operator*() const {
00723 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00724 return my_node->item;
00725 }
00726
00728 const_pointer operator->() const {
00729 return &operator*();
00730 }
00731
00733 const_accessor() : my_node(NULL) {}
00734
00736 ~const_accessor() {
00737 my_node = NULL;
00738 }
00739 private:
00740 node *my_node;
00741 typename node::scoped_t my_lock;
00742 hashcode_t my_hash;
00743 };
00744
00746 class accessor: public const_accessor {
00747 public:
00749 typedef typename concurrent_hash_map::value_type value_type;
00750
00752 reference operator*() const {
00753 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00754 return this->my_node->item;
00755 }
00756
00758 pointer operator->() const {
00759 return &operator*();
00760 }
00761 };
00762
00764 concurrent_hash_map(const allocator_type &a = allocator_type())
00765 : my_allocator(a)
00766 {}
00767
00769 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
00770 : my_allocator(a)
00771 {
00772 reserve( n );
00773 }
00774
00776 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00777 : my_allocator(a)
00778 {
00779 internal_copy(table);
00780 }
00781
00783 template<typename I>
00784 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00785 : my_allocator(a)
00786 {
00787 reserve( std::distance(first, last) );
00788 internal_copy(first, last);
00789 }
00790
00792 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00793 if( this!=&table ) {
00794 clear();
00795 internal_copy(table);
00796 }
00797 return *this;
00798 }
00799
00800
00802
00804 void rehash(size_type n = 0);
00805
00807 void clear();
00808
00810 ~concurrent_hash_map() { clear(); }
00811
00812
00813
00814
00815 range_type range( size_type grainsize=1 ) {
00816 return range_type( *this, grainsize );
00817 }
00818 const_range_type range( size_type grainsize=1 ) const {
00819 return const_range_type( *this, grainsize );
00820 }
00821
00822
00823
00824
00825 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00826 iterator end() {return iterator(*this,0,0,0);}
00827 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00828 const_iterator end() const {return const_iterator(*this,0,0,0);}
00829 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00830 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00831
00833 size_type size() const { return my_size; }
00834
00836 bool empty() const { return my_size == 0; }
00837
00839 size_type max_size() const {return (~size_type(0))/sizeof(node);}
00840
00842 size_type bucket_count() const { return my_mask+1; }
00843
00845 allocator_type get_allocator() const { return this->my_allocator; }
00846
00848 void swap(concurrent_hash_map &table);
00849
00850
00851
00852
00853
00855 size_type count( const Key &key ) const {
00856 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false );
00857 }
00858
00860
00861 bool find( const_accessor &result, const Key &key ) const {
00862 result.release();
00863 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false );
00864 }
00865
00867
00868 bool find( accessor &result, const Key &key ) {
00869 result.release();
00870 return lookup(false, key, NULL, &result, true );
00871 }
00872
00874
00875 bool insert( const_accessor &result, const Key &key ) {
00876 result.release();
00877 return lookup(true, key, NULL, &result, false );
00878 }
00879
00881
00882 bool insert( accessor &result, const Key &key ) {
00883 result.release();
00884 return lookup(true, key, NULL, &result, true );
00885 }
00886
00888
00889 bool insert( const_accessor &result, const value_type &value ) {
00890 result.release();
00891 return lookup(true, value.first, &value.second, &result, false );
00892 }
00893
00895
00896 bool insert( accessor &result, const value_type &value ) {
00897 result.release();
00898 return lookup(true, value.first, &value.second, &result, true );
00899 }
00900
00902
00903 bool insert( const value_type &value ) {
00904 return lookup(true, value.first, &value.second, NULL, false );
00905 }
00906
00908 template<typename I>
00909 void insert(I first, I last) {
00910 for(; first != last; ++first)
00911 insert( *first );
00912 }
00913
00915
00916 bool erase( const Key& key );
00917
00919
00920 bool erase( const_accessor& item_accessor ) {
00921 return exclude( item_accessor, true );
00922 }
00923
00925
00926 bool erase( accessor& item_accessor ) {
00927 return exclude( item_accessor, false );
00928 }
00929
00930 protected:
00932 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
00933
00935 bool exclude( const_accessor &item_accessor, bool readonly );
00936
00938 template<typename I>
00939 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00940
00942 void internal_copy( const concurrent_hash_map& source );
00943
00944 template<typename I>
00945 void internal_copy(I first, I last);
00946
00948
00950 const_pointer internal_fast_find( const Key& key ) const {
00951 hashcode_t h = my_hash_compare.hash( key );
00952 #if TBB_USE_THREADING_TOOLS
00953 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00954 #else
00955 hashcode_t m = my_mask;
00956 #endif
00957 node *n;
00958 restart:
00959 __TBB_ASSERT((m&(m+1))==0, NULL);
00960 bucket *b = get_bucket( h & m );
00961 #if TBB_USE_THREADING_TOOLS
00962
00963 if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req )
00964 #else
00965 if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req )
00966 #endif
00967 {
00968 bucket::scoped_t lock;
00969 if( lock.try_acquire( b->mutex, true ) ) {
00970 if( b->node_list == internal::rehash_req)
00971 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
00972 }
00973 else lock.acquire( b->mutex, false );
00974 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
00975 }
00976 n = search_bucket( key, b );
00977 if( n )
00978 return &n->item;
00979 else if( check_mask_race( h, m ) )
00980 goto restart;
00981 return 0;
00982 }
00983 };
00984
00985 #if _MSC_VER && !defined(__INTEL_COMPILER)
00986
00987 #pragma warning( push )
00988 #pragma warning( disable: 4127 )
00989 #endif
00990
00991 template<typename Key, typename T, typename HashCompare, typename A>
00992 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
00993 __TBB_ASSERT( !result || !result->my_node, NULL );
00994 segment_index_t grow_segment;
00995 bool return_value;
00996 node *n, *tmp_n = 0;
00997 hashcode_t const h = my_hash_compare.hash( key );
00998 #if TBB_USE_THREADING_TOOLS
00999 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01000 #else
01001 hashcode_t m = my_mask;
01002 #endif
01003 restart:
01004 {
01005 __TBB_ASSERT((m&(m+1))==0, NULL);
01006 return_value = false;
01007
01008 bucket_accessor b( this, h & m );
01009
01010
01011 n = search_bucket( key, b() );
01012 if( op_insert ) {
01013
01014 if( !n ) {
01015 if( !tmp_n ) {
01016 if(t) tmp_n = new( my_allocator ) node(key, *t);
01017 else tmp_n = new( my_allocator ) node(key);
01018 }
01019 if( !b.is_writer() && !b.upgrade_to_writer() ) {
01020
01021 n = search_bucket( key, b() );
01022 if( is_valid(n) ) {
01023 b.downgrade_to_reader();
01024 goto exists;
01025 }
01026 }
01027 if( check_mask_race(h, m) )
01028 goto restart;
01029
01030 grow_segment = insert_new_node( b(), n = tmp_n, m );
01031 tmp_n = 0;
01032 return_value = true;
01033 } else {
01034 exists: grow_segment = 0;
01035 }
01036 } else {
01037 if( !n ) {
01038 if( check_mask_race( h, m ) )
01039 goto restart;
01040 return false;
01041 }
01042 return_value = true;
01043 grow_segment = 0;
01044 }
01045 if( !result ) goto check_growth;
01046
01047
01048 if( !result->my_lock.try_acquire( n->mutex, write ) ) {
01049
01050 tbb::internal::atomic_backoff trials;
01051 do {
01052 if( !trials.bounded_pause() ) {
01053
01054 b.release();
01055 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
01056 __TBB_Yield();
01057 #if TBB_USE_THREADING_TOOLS
01058 m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01059 #else
01060 m = my_mask;
01061 #endif
01062 goto restart;
01063 }
01064 } while( !result->my_lock.try_acquire( n->mutex, write ) );
01065 }
01066 }
01067 result->my_node = n;
01068 result->my_hash = h;
01069 check_growth:
01070
01071 if( grow_segment )
01072 enable_segment( grow_segment );
01073 if( tmp_n )
01074 delete_node( tmp_n );
01075 return return_value;
01076 }
01077
01078 template<typename Key, typename T, typename HashCompare, typename A>
01079 template<typename I>
01080 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
01081 hashcode_t h = my_hash_compare.hash( key );
01082 hashcode_t m = my_mask;
01083 __TBB_ASSERT((m&(m+1))==0, NULL);
01084 h &= m;
01085 bucket *b = get_bucket( h );
01086 while( b->node_list == internal::rehash_req ) {
01087 m = ( 1u<<__TBB_Log2( h ) ) - 1;
01088 b = get_bucket( h &= m );
01089 }
01090 node *n = search_bucket( key, b );
01091 if( !n )
01092 return std::make_pair(end_, end_);
01093 iterator lower(*this, h, b, n), upper(lower);
01094 return std::make_pair(lower, ++upper);
01095 }
01096
01097 template<typename Key, typename T, typename HashCompare, typename A>
01098 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
01099 __TBB_ASSERT( item_accessor.my_node, NULL );
01100 node_base *const n = item_accessor.my_node;
01101 item_accessor.my_node = NULL;
01102 hashcode_t const h = item_accessor.my_hash;
01103 #if TBB_USE_THREADING_TOOLS
01104 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01105 #else
01106 hashcode_t m = my_mask;
01107 #endif
01108 do {
01109
01110 bucket_accessor b( this, h & m, true );
01111 node_base **p = &b()->node_list;
01112 while( *p && *p != n )
01113 p = &(*p)->next;
01114 if( !*p ) {
01115 if( check_mask_race( h, m ) )
01116 continue;
01117 item_accessor.my_lock.release();
01118 return false;
01119 }
01120 __TBB_ASSERT( *p == n, NULL );
01121 *p = n->next;
01122 my_size--;
01123 break;
01124 } while(true);
01125 if( readonly )
01126 item_accessor.my_lock.upgrade_to_writer();
01127 item_accessor.my_lock.release();
01128 delete_node( n );
01129 return true;
01130 }
01131
01132 template<typename Key, typename T, typename HashCompare, typename A>
01133 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
01134 node_base *n;
01135 hashcode_t const h = my_hash_compare.hash( key );
01136 #if TBB_USE_THREADING_TOOLS
01137 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01138 #else
01139 hashcode_t m = my_mask;
01140 #endif
01141 restart:
01142 {
01143
01144 bucket_accessor b( this, h & m );
01145 search:
01146 node_base **p = &b()->node_list;
01147 n = *p;
01148 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
01149 p = &n->next;
01150 n = *p;
01151 }
01152 if( !n ) {
01153 if( check_mask_race( h, m ) )
01154 goto restart;
01155 return false;
01156 }
01157 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
01158 if( check_mask_race( h, m ) )
01159 goto restart;
01160 goto search;
01161 }
01162 *p = n->next;
01163 my_size--;
01164 }
01165 {
01166 typename node::scoped_t item_locker( n->mutex, true );
01167 }
01168
01169 delete_node( n );
01170 return true;
01171 }
01172
01173 template<typename Key, typename T, typename HashCompare, typename A>
01174 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
01175 std::swap(this->my_allocator, table.my_allocator);
01176 std::swap(this->my_hash_compare, table.my_hash_compare);
01177 internal_swap(table);
01178 }
01179
01180 template<typename Key, typename T, typename HashCompare, typename A>
01181 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
01182 reserve( sz );
01183 hashcode_t mask = my_mask;
01184 hashcode_t b = (mask+1)>>1;
01185 __TBB_ASSERT((b&(b-1))==0, NULL);
01186 bucket *bp = get_bucket( b );
01187 for(; b <= mask; b++, bp++ ) {
01188 node_base *n = bp->node_list;
01189 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01190 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01191 if( n == internal::rehash_req ) {
01192 hashcode_t h = b; bucket *b_old = bp;
01193 do {
01194 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
01195 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
01196 b_old = get_bucket( h &= m );
01197 } while( b_old->node_list == internal::rehash_req );
01198
01199 mark_rehashed_levels( h );
01200 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
01201 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
01202 if( (c & mask) != h ) {
01203 *p = q->next;
01204 bucket *b_new = get_bucket( c & mask );
01205 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
01206 add_to_bucket( b_new, q );
01207 } else p = &q->next;
01208 }
01209 }
01210 }
01211 #if TBB_USE_PERFORMANCE_WARNINGS
01212 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
01213 static bool reported = false;
01214 #endif
01215 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01216 for( b = 0; b <= mask; b++ ) {
01217 if( b & (b-2) ) ++bp;
01218 else bp = get_bucket( b );
01219 node_base *n = bp->node_list;
01220 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01221 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
01222 #if TBB_USE_PERFORMANCE_WARNINGS
01223 if( n == internal::empty_rehashed ) empty_buckets++;
01224 else if( n->next ) overpopulated_buckets++;
01225 #endif
01226 #if TBB_USE_ASSERT
01227 for( ; is_valid(n); n = n->next ) {
01228 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
01229 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
01230 }
01231 #endif
01232 }
01233 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01234 #if TBB_USE_PERFORMANCE_WARNINGS
01235 if( buckets > current_size) empty_buckets -= buckets - current_size;
01236 else overpopulated_buckets -= current_size - buckets;
01237 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01238 tbb::internal::runtime_warning(
01239 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01240 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01241 reported = true;
01242 }
01243 #endif
01244 }
01245
01246 template<typename Key, typename T, typename HashCompare, typename A>
01247 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
01248 hashcode_t m = my_mask;
01249 __TBB_ASSERT((m&(m+1))==0, NULL);
01250 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01251 #if TBB_USE_PERFORMANCE_WARNINGS
01252 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
01253 static bool reported = false;
01254 #endif
01255 bucket *bp = 0;
01256
01257 for( segment_index_t b = 0; b <= m; b++ ) {
01258 if( b & (b-2) ) ++bp;
01259 else bp = get_bucket( b );
01260 node_base *n = bp->node_list;
01261 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01262 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
01263 #if TBB_USE_PERFORMANCE_WARNINGS
01264 if( n == internal::empty_rehashed ) empty_buckets++;
01265 else if( n == internal::rehash_req ) buckets--;
01266 else if( n->next ) overpopulated_buckets++;
01267 #endif
01268 #if __TBB_EXTRA_DEBUG
01269 for(; is_valid(n); n = n->next ) {
01270 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
01271 h &= m;
01272 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
01273 }
01274 #endif
01275 }
01276 #if TBB_USE_PERFORMANCE_WARNINGS
01277 if( buckets > current_size) empty_buckets -= buckets - current_size;
01278 else overpopulated_buckets -= current_size - buckets;
01279 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01280 tbb::internal::runtime_warning(
01281 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01282 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01283 reported = true;
01284 }
01285 #endif
01286 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01287 my_size = 0;
01288 segment_index_t s = segment_index_of( m );
01289 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
01290 cache_aligned_allocator<bucket> alloc;
01291 do {
01292 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
01293 segment_ptr_t buckets_ptr = my_table[s];
01294 size_type sz = segment_size( s ? s : 1 );
01295 for( segment_index_t i = 0; i < sz; i++ )
01296 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
01297 buckets_ptr[i].node_list = n->next;
01298 delete_node( n );
01299 }
01300 if( s >= first_block)
01301 alloc.deallocate( buckets_ptr, sz );
01302 else if( s == embedded_block && embedded_block != first_block )
01303 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
01304 if( s >= embedded_block ) my_table[s] = 0;
01305 } while(s-- > 0);
01306 my_mask = embedded_buckets - 1;
01307 }
01308
01309 template<typename Key, typename T, typename HashCompare, typename A>
01310 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
01311 reserve( source.my_size );
01312 hashcode_t mask = source.my_mask;
01313 if( my_mask == mask ) {
01314 bucket *dst = 0, *src = 0;
01315 bool rehash_required = false;
01316 for( hashcode_t k = 0; k <= mask; k++ ) {
01317 if( k & (k-2) ) ++dst,src++;
01318 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
01319 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
01320 node *n = static_cast<node*>( src->node_list );
01321 if( n == internal::rehash_req ) {
01322 rehash_required = true;
01323 dst->node_list = internal::rehash_req;
01324 } else for(; n; n = static_cast<node*>( n->next ) ) {
01325 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
01326 ++my_size;
01327 }
01328 }
01329 if( rehash_required ) rehash();
01330 } else internal_copy( source.begin(), source.end() );
01331 }
01332
01333 template<typename Key, typename T, typename HashCompare, typename A>
01334 template<typename I>
01335 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
01336 hashcode_t m = my_mask;
01337 for(; first != last; ++first) {
01338 hashcode_t h = my_hash_compare.hash( first->first );
01339 bucket *b = get_bucket( h & m );
01340 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
01341 node *n = new( my_allocator ) node(first->first, first->second);
01342 add_to_bucket( b, n );
01343 ++my_size;
01344 }
01345 }
01346
01347 }
01348
01349 using interface4::tbb_hasher;
01350 using interface4::concurrent_hash_map;
01351
01352
01353 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01354 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01355 if(a.size() != b.size()) return false;
01356 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01357 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01358 for(; i != i_end; ++i) {
01359 j = b.equal_range(i->first).first;
01360 if( j == j_end || !(i->second == j->second) ) return false;
01361 }
01362 return true;
01363 }
01364
01365 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01366 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01367 { return !(a == b); }
01368
01369 template<typename Key, typename T, typename HashCompare, typename A>
01370 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01371 { a.swap( b ); }
01372
01373 #if _MSC_VER && !defined(__INTEL_COMPILER)
01374 #pragma warning( pop )
01375 #endif // warning 4127 is back
01376
01377 }
01378
01379 #endif