Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_hash_map.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_hash_map_H
18 #define __TBB_concurrent_hash_map_H
19 
20 #define __TBB_concurrent_hash_map_H_include_area
22 
23 #include "tbb_stddef.h"
24 #include <iterator>
25 #include <utility> // Need std::pair
26 #include <cstring> // Need std::memset
27 #include __TBB_STD_SWAP_HEADER
28 
29 #include "tbb_allocator.h"
30 #include "spin_rw_mutex.h"
31 #include "atomic.h"
32 #include "tbb_exception.h"
33 #include "tbb_profiling.h"
34 #include "aligned_space.h"
38 #if __TBB_INITIALIZER_LISTS_PRESENT
39 #include <initializer_list>
40 #endif
41 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
42 #include <typeinfo>
43 #endif
44 #if __TBB_STATISTICS
45 #include <stdio.h>
46 #endif
47 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
48 // Definition of __TBB_CPP11_RVALUE_REF_PRESENT includes __TBB_CPP11_TUPLE_PRESENT
49 // for most of platforms, tuple present macro was added for logical correctness
50 #include <tuple>
51 #endif
52 
53 namespace tbb {
54 
55 namespace interface5 {
56 
57  template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<const Key, T> > >
59 
61  namespace internal {
62  using namespace tbb::internal;
63 
64 
66  typedef size_t hashcode_t;
76  };
78  static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
80  static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
82  class hash_map_base {
83  public:
85  typedef size_t size_type;
87  typedef size_t hashcode_t;
89  typedef size_t segment_index_t;
100  };
102  static size_type const embedded_block = 1;
104  static size_type const embedded_buckets = 1<<embedded_block;
106  static size_type const first_block = 8; //including embedded_block. perfect with bucket size 16, so the allocations are power of 4096
108  static size_type const pointers_per_table = sizeof(segment_index_t) * 8; // one segment per bit
112  typedef segment_ptr_t segments_table_t[pointers_per_table];
114  atomic<hashcode_t> my_mask;
116  segments_table_t my_table;
118  atomic<size_type> my_size; // It must be in separate cache line from my_mask due to performance effects
120  bucket my_embedded_segment[embedded_buckets];
121 #if __TBB_STATISTICS
122  atomic<unsigned> my_info_resizes; // concurrent ones
123  mutable atomic<unsigned> my_info_restarts; // race collisions
124  atomic<unsigned> my_info_rehashes; // invocations of rehash_bucket
125 #endif
126  hash_map_base() {
128  std::memset(my_table, 0, sizeof(my_table));
129  my_mask = 0;
130  my_size = 0;
131  std::memset(my_embedded_segment, 0, sizeof(my_embedded_segment));
132  for( size_type i = 0; i < embedded_block; i++ ) // fill the table
133  my_table[i] = my_embedded_segment + segment_base(i);
134  my_mask = embedded_buckets - 1;
135  __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
136 #if __TBB_STATISTICS
137  my_info_resizes = 0; // concurrent ones
138  my_info_restarts = 0; // race collisions
139  my_info_rehashes = 0; // invocations of rehash_bucket
140 #endif
141  }
142 
145  return segment_index_t( __TBB_Log2( index|1 ) );
146  }
147 
150  return (segment_index_t(1)<<k & ~segment_index_t(1));
151  }
152 
155  return size_type(1)<<k; // fake value for k==0
156  }
157 
159  static bool is_valid( void *ptr ) {
160  return reinterpret_cast<uintptr_t>(ptr) > uintptr_t(63);
161  }
162 
164  static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
165  if( is_initial ) std::memset( static_cast<void*>(ptr), 0, sz*sizeof(bucket) );
166  else for(size_type i = 0; i < sz; i++, ptr++) {
167  *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
168  ptr->node_list = rehash_req;
169  }
170  }
171 
173  static void add_to_bucket( bucket *b, node_base *n ) {
174  __TBB_ASSERT(b->node_list != rehash_req, NULL);
175  n->next = b->node_list;
176  b->node_list = n; // its under lock and flag is set
177  }
178 
182  enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
184  if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress
185  }
186  };
187 
189  template<typename Allocator>
190  void enable_segment( segment_index_t k, const Allocator& allocator, bool is_initial = false ) {
191  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
192  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
193  bucket_allocator_type bucket_allocator(allocator);
194  __TBB_ASSERT( k, "Zero segment must be embedded" );
195  enable_segment_failsafe watchdog( my_table, k );
196  size_type sz;
197  __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
198  if( k >= first_block ) {
199  sz = segment_size( k );
200  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201  init_buckets( ptr, sz, is_initial );
202  itt_hide_store_word( my_table[k], ptr );
203  sz <<= 1;// double it to get entire capacity of the container
204  } else { // the first block
205  __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
206  sz = segment_size( first_block );
207  segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208  init_buckets( ptr, sz - embedded_buckets, is_initial );
209  ptr -= segment_base(embedded_block);
210  for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets
211  itt_hide_store_word( my_table[i], ptr + segment_base(i) );
212  }
213  itt_store_word_with_release( my_mask, sz-1 );
214  watchdog.my_segment_ptr = 0;
215  }
216 
217  template<typename Allocator>
218  void delete_segment(segment_index_t s, const Allocator& allocator) {
219  typedef typename tbb::internal::allocator_rebind<Allocator, bucket>::type bucket_allocator_type;
220  typedef tbb::internal::allocator_traits<bucket_allocator_type> bucket_allocator_traits;
221  bucket_allocator_type bucket_allocator(allocator);
222  segment_ptr_t buckets_ptr = my_table[s];
223  size_type sz = segment_size( s ? s : 1 );
224 
225  if( s >= first_block) // the first segment or the next
226  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227  else if( s == embedded_block && embedded_block != first_block )
228  bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
229  segment_size(first_block) - embedded_buckets);
230  if( s >= embedded_block ) my_table[s] = 0;
231  }
232 
234  bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere?
235  segment_index_t s = segment_index_of( h );
236  h -= segment_base(s);
237  segment_ptr_t seg = my_table[s];
238  __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
239  return &seg[h];
240  }
241 
242  // internal serial rehashing helper
243  void mark_rehashed_levels( hashcode_t h ) throw () {
244  segment_index_t s = segment_index_of( h );
245  while( segment_ptr_t seg = my_table[++s] )
246  if( seg[h].node_list == rehash_req ) {
247  seg[h].node_list = empty_rehashed;
248  mark_rehashed_levels( h + ((hashcode_t)1<<s) ); // optimized segment_base(s)
249  }
250  }
251 
253  // Splitting into two functions should help inlining
254  inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
255  hashcode_t m_now, m_old = m;
256  m_now = (hashcode_t) itt_load_word_with_acquire( my_mask );
257  if( m_old != m_now )
258  return check_rehashing_collision( h, m_old, m = m_now );
259  return false;
260  }
261 
264  __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m
265  if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event
266  // condition above proves that 'h' has some other bits set beside 'm_old'
267  // find next applicable mask after m_old //TODO: look at bsl instruction
268  for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size
269  ;
270  m_old = (m_old<<1) - 1; // get full mask from a bit
271  __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
272  // check whether it is rehashing/ed
273  if( itt_load_word_with_acquire(get_bucket(h & m_old)->node_list) != rehash_req )
274  {
275 #if __TBB_STATISTICS
276  my_info_restarts++; // race collisions
277 #endif
278  return true;
279  }
280  }
281  return false;
282  }
283 
286  size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted
287  add_to_bucket( b, n );
288  // check load factor
289  if( sz >= mask ) { // TODO: add custom load_factor
290  segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of
291  __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
292  static const segment_ptr_t is_allocating = (segment_ptr_t)2;
293  if( !itt_hide_load_word(my_table[new_seg])
294  && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
295  return new_seg; // The value must be processed
296  }
297  return 0;
298  }
299 
301  template<typename Allocator>
302  void reserve(size_type buckets, const Allocator& allocator) {
303  if( !buckets-- ) return;
304  bool is_initial = !my_size;
305  for( size_type m = my_mask; buckets > m; m = my_mask )
306  enable_segment( segment_index_of( m+1 ), allocator, is_initial );
307  }
310  using std::swap;
311  swap(this->my_mask, table.my_mask);
312  swap(this->my_size, table.my_size);
313  for(size_type i = 0; i < embedded_buckets; i++)
314  swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
315  for(size_type i = embedded_block; i < pointers_per_table; i++)
316  swap(this->my_table[i], table.my_table[i]);
317  }
318 
319 #if __TBB_CPP11_RVALUE_REF_PRESENT
320  void internal_move(hash_map_base&& other) {
321  my_mask = other.my_mask;
322  other.my_mask = embedded_buckets - 1;
323  my_size = other.my_size;
324  other.my_size = 0;
325 
326  for(size_type i = 0; i < embedded_buckets; ++i) {
327  my_embedded_segment[i].node_list = other.my_embedded_segment[i].node_list;
328  other.my_embedded_segment[i].node_list = NULL;
329  }
330 
331  for(size_type i = embedded_block; i < pointers_per_table; ++i) {
332  my_table[i] = other.my_table[i];
333  other.my_table[i] = NULL;
334  }
335  }
336 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
337  };
338 
339  template<typename Iterator>
341 
343 
345  template<typename Container, typename Value>
347  : public std::iterator<std::forward_iterator_tag,Value>
348  {
349  typedef Container map_type;
350  typedef typename Container::node node;
353 
354  template<typename C, typename T, typename U>
355  friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
356 
357  template<typename C, typename T, typename U>
358  friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
359 
360  template<typename C, typename T, typename U>
361  friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
362 
363  template<typename C, typename U>
364  friend class hash_map_iterator;
365 
366  template<typename I>
367  friend class hash_map_range;
368 
369  void advance_to_next_bucket() { // TODO?: refactor to iterator_base class
370  size_t k = my_index+1;
371  __TBB_ASSERT( my_bucket, "advancing an invalid iterator?");
372  while( k <= my_map->my_mask ) {
373  // Following test uses 2's-complement wizardry
374  if( k&(k-2) ) // not the beginning of a segment
375  ++my_bucket;
376  else my_bucket = my_map->get_bucket( k );
377  my_node = static_cast<node*>( my_bucket->node_list );
378  if( hash_map_base::is_valid(my_node) ) {
379  my_index = k; return;
380  }
381  ++k;
382  }
383  my_bucket = 0; my_node = 0; my_index = k; // the end
384  }
385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
386  template<typename Key, typename T, typename HashCompare, typename A>
388 #else
389  public: // workaround
390 #endif
391  const Container *my_map;
393 
395  size_t my_index;
396 
399 
402 
403  hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
404 
405  public:
407  hash_map_iterator(): my_map(), my_index(), my_bucket(), my_node() {}
409  my_map(other.my_map),
410  my_index(other.my_index),
411  my_bucket(other.my_bucket),
412  my_node(other.my_node)
413  {}
414 
416  my_map = other.my_map;
417  my_index = other.my_index;
418  my_bucket = other.my_bucket;
419  my_node = other.my_node;
420  return *this;
421  }
422  Value& operator*() const {
423  __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
424  return my_node->value();
425  }
426  Value* operator->() const {return &operator*();}
427  hash_map_iterator& operator++();
428 
431  hash_map_iterator old(*this);
432  operator++();
433  return old;
434  }
435  };
436 
437  template<typename Container, typename Value>
438  hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
439  my_map(&map),
440  my_index(index),
441  my_bucket(b),
442  my_node( static_cast<node*>(n) )
443  {
444  if( b && !hash_map_base::is_valid(n) )
446  }
447 
448  template<typename Container, typename Value>
450  my_node = static_cast<node*>( my_node->next );
451  if( !my_node ) advance_to_next_bucket();
452  return *this;
453  }
454 
455  template<typename Container, typename T, typename U>
457  return i.my_node == j.my_node && i.my_map == j.my_map;
458  }
459 
460  template<typename Container, typename T, typename U>
462  return i.my_node != j.my_node || i.my_map != j.my_map;
463  }
464 
466 
467  template<typename Iterator>
468  class hash_map_range {
469  typedef typename Iterator::map_type map_type;
470  Iterator my_begin;
471  Iterator my_end;
472  mutable Iterator my_midpoint;
473  size_t my_grainsize;
475  void set_midpoint() const;
476  template<typename U> friend class hash_map_range;
477  public:
479  typedef std::size_t size_type;
480  typedef typename Iterator::value_type value_type;
481  typedef typename Iterator::reference reference;
482  typedef typename Iterator::difference_type difference_type;
483  typedef Iterator iterator;
484 
486  bool empty() const {return my_begin==my_end;}
487 
489  bool is_divisible() const {
490  return my_midpoint!=my_end;
491  }
494  my_end(r.my_end),
496  {
497  r.my_end = my_begin = r.my_midpoint;
498  __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
499  __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
500  set_midpoint();
501  r.set_midpoint();
502  }
504  template<typename U>
506  my_begin(r.my_begin),
507  my_end(r.my_end),
510  {}
512  hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
513  my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
514  my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
515  my_grainsize( grainsize_ )
516  {
517  __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
518  set_midpoint();
519  }
520  const Iterator& begin() const {return my_begin;}
521  const Iterator& end() const {return my_end;}
523  size_type grainsize() const {return my_grainsize;}
524  };
525 
526  template<typename Iterator>
528  // Split by groups of nodes
529  size_t m = my_end.my_index-my_begin.my_index;
530  if( m > my_grainsize ) {
531  m = my_begin.my_index + m/2u;
532  hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
533  my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
534  } else {
535  my_midpoint = my_end;
536  }
537  __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
538  "my_begin is after my_midpoint" );
539  __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
540  "my_midpoint is after my_end" );
541  __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
542  "[my_begin, my_midpoint) range should not be empty" );
543  }
544 
545  } // internal
547 
548 #if _MSC_VER && !defined(__INTEL_COMPILER)
549  // Suppress "conditional expression is constant" warning.
550  #pragma warning( push )
551  #pragma warning( disable: 4127 )
552 #endif
553 
555 
584 template<typename Key, typename T, typename HashCompare, typename Allocator>
586  template<typename Container, typename Value>
588 
589  template<typename I>
591 
592 public:
593  typedef Key key_type;
594  typedef T mapped_type;
595  typedef std::pair<const Key,T> value_type;
596  typedef hash_map_base::size_type size_type;
597  typedef ptrdiff_t difference_type;
598  typedef value_type *pointer;
599  typedef const value_type *const_pointer;
601  typedef const value_type &const_reference;
606  typedef Allocator allocator_type;
607 
608 protected:
609  friend class const_accessor;
610  class node;
614  HashCompare my_hash_compare;
615 
616  class node : public node_base {
617  tbb::aligned_space<value_type> my_value;
618  public:
619  value_type* storage() { return my_value.begin(); }
620  value_type& value() { return *storage(); }
621  };
622 
623  void delete_node( node_base *n ) {
624  node_allocator_traits::destroy(my_allocator, static_cast<node*>(n)->storage());
627  }
628 
632 
635  if(my_node) {
638  }
639  }
640  void dismiss() { my_node = NULL; }
641  };
642 
643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
644  template<typename... Args>
645  static node* create_node(node_allocator_type& allocator, Args&&... args)
646 #else
647  template<typename Arg1, typename Arg2>
649 #endif
650  {
651  node* node_ptr = node_allocator_traits::allocate(allocator, 1);
652  node_scoped_guard guard(node_ptr, allocator);
653  node_allocator_traits::construct(allocator, node_ptr);
654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
655  node_allocator_traits::construct(allocator, node_ptr->storage(), std::forward<Args>(args)...);
656 #else
657  node_allocator_traits::construct(allocator, node_ptr->storage(), tbb::internal::forward<Arg1>(arg1), tbb::internal::forward<Arg2>(arg2));
658 #endif
659  guard.dismiss();
660  return node_ptr;
661  }
662 
663  static node* allocate_node_copy_construct(node_allocator_type& allocator, const Key &key, const T * t){
664  return create_node(allocator, key, *t);
665  }
666 
667 #if __TBB_CPP11_RVALUE_REF_PRESENT
668  static node* allocate_node_move_construct(node_allocator_type& allocator, const Key &key, const T * t){
669  return create_node(allocator, key, std::move(*const_cast<T*>(t)));
670  }
671 #endif
672 
673  static node* allocate_node_default_construct(node_allocator_type& allocator, const Key &key, const T * ){
674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
675  // Emplace construct an empty T object inside the pair
676  return create_node(allocator, std::piecewise_construct,
677  std::forward_as_tuple(key), std::forward_as_tuple());
678 #else
679  // Use of a temporary object is impossible, because create_node takes a non-const reference.
680  // copy-initialization is possible because T is already required to be CopyConstructible.
681  T obj = T();
682  return create_node(allocator, key, tbb::internal::move(obj));
683 #endif
684  }
685 
686  static node* do_not_allocate_node(node_allocator_type& , const Key &, const T * ){
687  __TBB_ASSERT(false,"this dummy function should not be called");
688  return NULL;
689  }
690 
691  node *search_bucket( const key_type &key, bucket *b ) const {
692  node *n = static_cast<node*>( b->node_list );
693  while( is_valid(n) && !my_hash_compare.equal(key, n->value().first) )
694  n = static_cast<node*>( n->next );
695  __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
696  return n;
697  }
698 
700  class bucket_accessor : public bucket::scoped_t {
701  bucket *my_b;
702  public:
703  bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
705  inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
706  my_b = base->get_bucket( h );
707  // TODO: actually, notification is unnecessary here, just hiding double-check
709  && try_acquire( my_b->mutex, /*write=*/true ) )
710  {
711  if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing
712  }
713  else bucket::scoped_t::acquire( my_b->mutex, writer );
714  __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
715  }
717  bool is_writer() { return bucket::scoped_t::is_writer; }
719  bucket *operator() () { return my_b; }
720  };
721 
722  // TODO refactor to hash_base
723  void rehash_bucket( bucket *b_new, const hashcode_t h ) {
724  __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
725  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
726  __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed
727  hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
728 #if __TBB_STATISTICS
729  my_info_rehashes++; // invocations of rehash_bucket
730 #endif
731 
732  bucket_accessor b_old( this, h & mask );
733 
734  mask = (mask<<1) | 1; // get full mask for new bucket
735  __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
736  restart:
737  for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
738  hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->value().first );
739 #if TBB_USE_ASSERT
740  hashcode_t bmask = h & (mask>>1);
741  bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket
742  __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
743 #endif
744  if( (c & mask) == h ) {
745  if( !b_old.is_writer() )
746  if( !b_old.upgrade_to_writer() ) {
747  goto restart; // node ptr can be invalid due to concurrent erase
748  }
749  *p = n->next; // exclude from b_old
750  add_to_bucket( b_new, n );
751  } else p = &n->next; // iterate to next item
752  }
753  }
754 
758  void dismiss() {my_ch_map = 0;}
760  if (my_ch_map){
761  my_ch_map->clear();
762  }
763  }
764  };
765 public:
766 
767  class accessor;
769  class const_accessor : private node::scoped_t /*which derived from no_copy*/ {
770  friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
771  friend class accessor;
772  public:
775 
777  bool empty() const { return !my_node; }
778 
780  void release() {
781  if( my_node ) {
783  my_node = 0;
784  }
785  }
786 
789  __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
790  return my_node->value();
791  }
792 
795  return &operator*();
796  }
797 
799  const_accessor() : my_node(NULL) {}
800 
803  my_node = NULL; // scoped lock's release() is called in its destructor
804  }
805  protected:
806  bool is_writer() { return node::scoped_t::is_writer; }
809  };
810 
812  class accessor: public const_accessor {
813  public:
816 
819  __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
820  return this->my_node->value();
821  }
822 
824  pointer operator->() const {
825  return &operator*();
826  }
827  };
828 
831  : internal::hash_map_base(), my_allocator(a)
832  {}
833 
834  explicit concurrent_hash_map( const HashCompare& compare, const allocator_type& a = allocator_type() )
835  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
836  {}
837 
840  : internal::hash_map_base(), my_allocator(a)
841  {
842  reserve( n, my_allocator );
843  }
844 
845  concurrent_hash_map( size_type n, const HashCompare& compare, const allocator_type& a = allocator_type() )
846  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
847  {
848  reserve( n, my_allocator );
849  }
850 
853  : internal::hash_map_base(),
854  my_allocator(node_allocator_traits::select_on_container_copy_construction(table.get_allocator()))
855  {
856  call_clear_on_leave scope_guard(this);
857  internal_copy(table);
858  scope_guard.dismiss();
859  }
860 
862  : internal::hash_map_base(), my_allocator(a)
863  {
864  call_clear_on_leave scope_guard(this);
865  internal_copy(table);
866  scope_guard.dismiss();
867  }
868 
869 #if __TBB_CPP11_RVALUE_REF_PRESENT
872  : internal::hash_map_base(), my_allocator(std::move(table.get_allocator()))
873  {
874  internal_move(std::move(table));
875  }
876 
879  : internal::hash_map_base(), my_allocator(a)
880  {
881  if (a == table.get_allocator()){
882  internal_move(std::move(table));
883  }else{
884  call_clear_on_leave scope_guard(this);
885  internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
886  scope_guard.dismiss();
887  }
888  }
889 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
890 
892  template<typename I>
894  : internal::hash_map_base(), my_allocator(a)
895  {
896  call_clear_on_leave scope_guard(this);
897  internal_copy(first, last, std::distance(first, last));
898  scope_guard.dismiss();
899  }
900 
901  template<typename I>
902  concurrent_hash_map( I first, I last, const HashCompare& compare, const allocator_type& a = allocator_type() )
903  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
904  {
905  call_clear_on_leave scope_guard(this);
906  internal_copy(first, last, std::distance(first, last));
907  scope_guard.dismiss();
908  }
909 
910 #if __TBB_INITIALIZER_LISTS_PRESENT
911  concurrent_hash_map( std::initializer_list<value_type> il, const allocator_type &a = allocator_type() )
913  : internal::hash_map_base(), my_allocator(a)
914  {
915  call_clear_on_leave scope_guard(this);
916  internal_copy(il.begin(), il.end(), il.size());
917  scope_guard.dismiss();
918  }
919 
920  concurrent_hash_map( std::initializer_list<value_type> il, const HashCompare& compare, const allocator_type& a = allocator_type() )
921  : internal::hash_map_base(), my_allocator(a), my_hash_compare(compare)
922  {
923  call_clear_on_leave scope_guard(this);
924  internal_copy(il.begin(), il.end(), il.size());
925  scope_guard.dismiss();
926  }
927 
928 #endif //__TBB_INITIALIZER_LISTS_PRESENT
929 
932  if( this!=&table ) {
934  clear();
936  internal_copy(table);
937  }
938  return *this;
939  }
940 
941 #if __TBB_CPP11_RVALUE_REF_PRESENT
944  if(this != &table) {
946  internal_move_assign(std::move(table), pocma_type());
947  }
948  return *this;
949  }
950 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
951 
952 #if __TBB_INITIALIZER_LISTS_PRESENT
953  concurrent_hash_map& operator=( std::initializer_list<value_type> il ) {
955  clear();
956  internal_copy(il.begin(), il.end(), il.size());
957  return *this;
958  }
959 #endif //__TBB_INITIALIZER_LISTS_PRESENT
960 
961 
963 
965  void rehash(size_type n = 0);
966 
968  void clear();
969 
972 
973  //------------------------------------------------------------------------
974  // Parallel algorithm support
975  //------------------------------------------------------------------------
976  range_type range( size_type grainsize=1 ) {
977  return range_type( *this, grainsize );
978  }
979  const_range_type range( size_type grainsize=1 ) const {
980  return const_range_type( *this, grainsize );
981  }
982 
983  //------------------------------------------------------------------------
984  // STL support - not thread-safe methods
985  //------------------------------------------------------------------------
986  iterator begin() { return iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
987  iterator end() { return iterator( *this, 0, 0, 0 ); }
988  const_iterator begin() const { return const_iterator( *this, 0, my_embedded_segment, my_embedded_segment->node_list ); }
989  const_iterator end() const { return const_iterator( *this, 0, 0, 0 ); }
990  std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range( key, end() ); }
991  std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range( key, end() ); }
992 
994  size_type size() const { return my_size; }
995 
997  bool empty() const { return my_size == 0; }
998 
1000  size_type max_size() const {return (~size_type(0))/sizeof(node);}
1001 
1003  size_type bucket_count() const { return my_mask+1; }
1004 
1006  allocator_type get_allocator() const { return this->my_allocator; }
1007 
1009  void swap( concurrent_hash_map &table );
1010 
1011  //------------------------------------------------------------------------
1012  // concurrent map operations
1013  //------------------------------------------------------------------------
1014 
1016  size_type count( const Key &key ) const {
1017  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false, &do_not_allocate_node );
1018  }
1019 
1021 
1022  bool find( const_accessor &result, const Key &key ) const {
1023  result.release();
1024  return const_cast<concurrent_hash_map*>(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false, &do_not_allocate_node );
1025  }
1026 
1028 
1029  bool find( accessor &result, const Key &key ) {
1030  result.release();
1031  return lookup(/*insert*/false, key, NULL, &result, /*write=*/true, &do_not_allocate_node );
1032  }
1033 
1035 
1036  bool insert( const_accessor &result, const Key &key ) {
1037  result.release();
1038  return lookup(/*insert*/true, key, NULL, &result, /*write=*/false, &allocate_node_default_construct );
1039  }
1040 
1042 
1043  bool insert( accessor &result, const Key &key ) {
1044  result.release();
1045  return lookup(/*insert*/true, key, NULL, &result, /*write=*/true, &allocate_node_default_construct );
1046  }
1047 
1049 
1050  bool insert( const_accessor &result, const value_type &value ) {
1051  result.release();
1052  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false, &allocate_node_copy_construct );
1053  }
1054 
1056 
1057  bool insert( accessor &result, const value_type &value ) {
1058  result.release();
1059  return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true, &allocate_node_copy_construct );
1060  }
1061 
1063 
1064  bool insert( const value_type &value ) {
1065  return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false, &allocate_node_copy_construct );
1066  }
1067 
1068 #if __TBB_CPP11_RVALUE_REF_PRESENT
1069 
1071  bool insert( const_accessor &result, value_type && value ) {
1072  return generic_move_insert(result, std::move(value));
1073  }
1074 
1076 
1077  bool insert( accessor &result, value_type && value ) {
1078  return generic_move_insert(result, std::move(value));
1079  }
1080 
1082 
1083  bool insert( value_type && value ) {
1085  }
1086 
1087 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1088 
1090  template<typename... Args>
1091  bool emplace( const_accessor &result, Args&&... args ) {
1092  return generic_emplace(result, std::forward<Args>(args)...);
1093  }
1094 
1096 
1097  template<typename... Args>
1098  bool emplace( accessor &result, Args&&... args ) {
1099  return generic_emplace(result, std::forward<Args>(args)...);
1100  }
1101 
1103 
1104  template<typename... Args>
1105  bool emplace( Args&&... args ) {
1106  return generic_emplace(accessor_not_used(), std::forward<Args>(args)...);
1107  }
1108 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1109 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1110 
1112  template<typename I>
1113  void insert( I first, I last ) {
1114  for ( ; first != last; ++first )
1115  insert( *first );
1116  }
1117 
1118 #if __TBB_INITIALIZER_LISTS_PRESENT
1119  void insert( std::initializer_list<value_type> il ) {
1121  insert( il.begin(), il.end() );
1122  }
1123 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1124 
1126 
1127  bool erase( const Key& key );
1128 
1130 
1131  bool erase( const_accessor& item_accessor ) {
1132  return exclude( item_accessor );
1133  }
1134 
1136 
1137  bool erase( accessor& item_accessor ) {
1138  return exclude( item_accessor );
1139  }
1140 
1141 protected:
1143  bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key &, const T * ), node *tmp_n = 0 ) ;
1144 
1145  struct accessor_not_used { void release(){}};
1146  friend const_accessor* accessor_location( accessor_not_used const& ){ return NULL;}
1147  friend const_accessor* accessor_location( const_accessor & a ) { return &a;}
1148 
1149  friend bool is_write_access_needed( accessor const& ) { return true;}
1150  friend bool is_write_access_needed( const_accessor const& ) { return false;}
1151  friend bool is_write_access_needed( accessor_not_used const& ) { return false;}
1152 
1153 #if __TBB_CPP11_RVALUE_REF_PRESENT
1154  template<typename Accessor>
1155  bool generic_move_insert( Accessor && result, value_type && value ) {
1156  result.release();
1157  return lookup(/*insert*/true, value.first, &value.second, accessor_location(result), is_write_access_needed(result), &allocate_node_move_construct );
1158  }
1159 
1160 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1161  template<typename Accessor, typename... Args>
1162  bool generic_emplace( Accessor && result, Args &&... args ) {
1163  result.release();
1164  node * node_ptr = create_node(my_allocator, std::forward<Args>(args)...);
1165  return lookup(/*insert*/true, node_ptr->value().first, NULL, accessor_location(result), is_write_access_needed(result), &do_not_allocate_node, node_ptr );
1166  }
1167 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1168 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1169 
1171  bool exclude( const_accessor &item_accessor );
1172 
1174  template<typename I>
1175  std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
1176 
1178  void internal_copy( const concurrent_hash_map& source );
1179 
1180  template<typename I>
1181  void internal_copy( I first, I last, size_type reserve_size );
1182 
1183 #if __TBB_CPP11_RVALUE_REF_PRESENT
1184  // A compile-time dispatch to allow move assignment of containers with non-movable value_type if POCMA is true_type
1187  internal_move(std::move(other));
1188  }
1189 
1191  if (this->my_allocator == other.my_allocator) {
1192  internal_move(std::move(other));
1193  } else {
1194  //do per element move
1195  internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1196  }
1197  }
1198 #endif
1199 
1201 
1203  const_pointer internal_fast_find( const Key& key ) const {
1204  hashcode_t h = my_hash_compare.hash( key );
1206  node *n;
1207  restart:
1208  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1209  bucket *b = get_bucket( h & m );
1210  // TODO: actually, notification is unnecessary here, just hiding double-check
1211  if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req )
1212  {
1213  bucket::scoped_t lock;
1214  if( lock.try_acquire( b->mutex, /*write=*/true ) ) {
1215  if( b->node_list == internal::rehash_req)
1216  const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m ); //recursive rehashing
1217  }
1218  else lock.acquire( b->mutex, /*write=*/false );
1219  __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
1220  }
1221  n = search_bucket( key, b );
1222  if( n )
1223  return n->storage();
1224  else if( check_mask_race( h, m ) )
1225  goto restart;
1226  return 0;
1227  }
1228 };
1229 
1230 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1231 namespace internal {
1232 using namespace tbb::internal;
1233 
1234 template<template<typename...> typename Map, typename Key, typename T, typename... Args>
1235 using hash_map_t = Map<
1236  Key, T,
1237  std::conditional_t< (sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1238  pack_element_t<0, Args...>, tbb_hash_compare<Key> >,
1239  std::conditional_t< (sizeof...(Args)>0) && is_allocator_v< pack_element_t<sizeof...(Args)-1, Args...> >,
1240  pack_element_t<sizeof...(Args)-1, Args...>, tbb_allocator<std::pair<const Key, T> > >
1241 >;
1242 }
1243 
1244 // Deduction guide for the constructor from two iterators and hash_compare/ allocator
1245 template<typename I, typename... Args>
1246 concurrent_hash_map(I, I, Args...)
1247 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1248 
1249 // Deduction guide for the constructor from an initializer_list and hash_compare/ allocator
1250 // Deduction guide for an initializer_list, hash_compare and allocator is implicit
1251 template<typename Key, typename T, typename CompareOrAllocator>
1252 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1253 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1254 
1255 #endif /* __TBB_CPP17_DEDUCTION_GUIDES_PRESENT */
1256 
1257 template<typename Key, typename T, typename HashCompare, typename A>
1258 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node* (*allocate_node)(node_allocator_type& , const Key&, const T*), node *tmp_n ) {
1259  __TBB_ASSERT( !result || !result->my_node, NULL );
1260  bool return_value;
1261  hashcode_t const h = my_hash_compare.hash( key );
1263  segment_index_t grow_segment = 0;
1264  node *n;
1265  restart:
1266  {//lock scope
1267  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1268  return_value = false;
1269  // get bucket
1270  bucket_accessor b( this, h & m );
1271 
1272  // find a node
1273  n = search_bucket( key, b() );
1274  if( op_insert ) {
1275  // [opt] insert a key
1276  if( !n ) {
1277  if( !tmp_n ) {
1278  tmp_n = allocate_node(my_allocator, key, t);
1279  }
1280  if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion
1281  // Rerun search_list, in case another thread inserted the item during the upgrade.
1282  n = search_bucket( key, b() );
1283  if( is_valid(n) ) { // unfortunately, it did
1284  b.downgrade_to_reader();
1285  goto exists;
1286  }
1287  }
1288  if( check_mask_race(h, m) )
1289  goto restart; // b.release() is done in ~b().
1290  // insert and set flag to grow the container
1291  grow_segment = insert_new_node( b(), n = tmp_n, m );
1292  tmp_n = 0;
1293  return_value = true;
1294  }
1295  } else { // find or count
1296  if( !n ) {
1297  if( check_mask_race( h, m ) )
1298  goto restart; // b.release() is done in ~b(). TODO: replace by continue
1299  return false;
1300  }
1301  return_value = true;
1302  }
1303  exists:
1304  if( !result ) goto check_growth;
1305  // TODO: the following seems as generic/regular operation
1306  // acquire the item
1307  if( !result->try_acquire( n->mutex, write ) ) {
1308  for( tbb::internal::atomic_backoff backoff(true);; ) {
1309  if( result->try_acquire( n->mutex, write ) ) break;
1310  if( !backoff.bounded_pause() ) {
1311  // the wait takes really long, restart the operation
1312  b.release();
1313  __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
1314  __TBB_Yield();
1315  m = (hashcode_t) itt_load_word_with_acquire( my_mask );
1316  goto restart;
1317  }
1318  }
1319  }
1320  }//lock scope
1321  result->my_node = n;
1322  result->my_hash = h;
1323 check_growth:
1324  // [opt] grow the container
1325  if( grow_segment ) {
1326 #if __TBB_STATISTICS
1327  my_info_resizes++; // concurrent ones
1328 #endif
1329  enable_segment( grow_segment, my_allocator );
1330  }
1331  if( tmp_n ) // if op_insert only
1332  delete_node( tmp_n );
1333  return return_value;
1334 }
1335 
1336 template<typename Key, typename T, typename HashCompare, typename A>
1337 template<typename I>
1338 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
1339  hashcode_t h = my_hash_compare.hash( key );
1340  hashcode_t m = my_mask;
1341  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1342  h &= m;
1343  bucket *b = get_bucket( h );
1344  while( b->node_list == internal::rehash_req ) {
1345  m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1346  b = get_bucket( h &= m );
1347  }
1348  node *n = search_bucket( key, b );
1349  if( !n )
1350  return std::make_pair(end_, end_);
1351  iterator lower(*this, h, b, n), upper(lower);
1352  return std::make_pair(lower, ++upper);
1353 }
1354 
1355 template<typename Key, typename T, typename HashCompare, typename A>
1357  __TBB_ASSERT( item_accessor.my_node, NULL );
1358  node_base *const n = item_accessor.my_node;
1359  hashcode_t const h = item_accessor.my_hash;
1361  do {
1362  // get bucket
1363  bucket_accessor b( this, h & m, /*writer=*/true );
1364  node_base **p = &b()->node_list;
1365  while( *p && *p != n )
1366  p = &(*p)->next;
1367  if( !*p ) { // someone else was first
1368  if( check_mask_race( h, m ) )
1369  continue;
1370  item_accessor.release();
1371  return false;
1372  }
1373  __TBB_ASSERT( *p == n, NULL );
1374  *p = n->next; // remove from container
1375  my_size--;
1376  break;
1377  } while(true);
1378  if( !item_accessor.is_writer() ) // need to get exclusive lock
1379  item_accessor.upgrade_to_writer(); // return value means nothing here
1380  item_accessor.release();
1381  delete_node( n ); // Only one thread can delete it
1382  return true;
1383 }
1384 
1385 template<typename Key, typename T, typename HashCompare, typename A>
1387  node_base *n;
1388  hashcode_t const h = my_hash_compare.hash( key );
1390 restart:
1391  {//lock scope
1392  // get bucket
1393  bucket_accessor b( this, h & m );
1394  search:
1395  node_base **p = &b()->node_list;
1396  n = *p;
1397  while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->value().first ) ) {
1398  p = &n->next;
1399  n = *p;
1400  }
1401  if( !n ) { // not found, but mask could be changed
1402  if( check_mask_race( h, m ) )
1403  goto restart;
1404  return false;
1405  }
1406  else if( !b.is_writer() && !b.upgrade_to_writer() ) {
1407  if( check_mask_race( h, m ) ) // contended upgrade, check mask
1408  goto restart;
1409  goto search;
1410  }
1411  *p = n->next;
1412  my_size--;
1413  }
1414  {
1415  typename node::scoped_t item_locker( n->mutex, /*write=*/true );
1416  }
1417  // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor!
1418  delete_node( n ); // Only one thread can delete it due to write lock on the bucket
1419  return true;
1420 }
1421 
1422 template<typename Key, typename T, typename HashCompare, typename A>
1424  typedef typename node_allocator_traits::propagate_on_container_swap pocs_type;
1425  if (this != &table && (pocs_type::value || my_allocator == table.my_allocator)) {
1426  using std::swap;
1427  tbb::internal::allocator_swap(this->my_allocator, table.my_allocator, pocs_type());
1428  swap(this->my_hash_compare, table.my_hash_compare);
1429  internal_swap(table);
1430  }
1431 }
1432 
1433 template<typename Key, typename T, typename HashCompare, typename A>
1435  reserve( sz, my_allocator ); // TODO: add reduction of number of buckets as well
1436  hashcode_t mask = my_mask;
1437  hashcode_t b = (mask+1)>>1; // size or first index of the last segment
1438  __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2
1439  bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing
1440  for(; b <= mask; b++, bp++ ) {
1441  node_base *n = bp->node_list;
1442  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1443  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1444  if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one
1445  hashcode_t h = b; bucket *b_old = bp;
1446  do {
1447  __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
1448  hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit
1449  b_old = get_bucket( h &= m );
1450  } while( b_old->node_list == internal::rehash_req );
1451  // now h - is index of the root rehashed bucket b_old
1452  mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments
1453  for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
1454  hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->value().first );
1455  if( (c & mask) != h ) { // should be rehashed
1456  *p = q->next; // exclude from b_old
1457  bucket *b_new = get_bucket( c & mask );
1458  __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
1459  add_to_bucket( b_new, q );
1460  } else p = &q->next; // iterate to next item
1461  }
1462  }
1463  }
1464 #if TBB_USE_PERFORMANCE_WARNINGS
1465  int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1466  static bool reported = false;
1467 #endif
1468 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469  for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing
1470  if( b & (b-2) ) ++bp; // not the beginning of a segment
1471  else bp = get_bucket( b );
1472  node_base *n = bp->node_list;
1473  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
1474  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
1475 #if TBB_USE_PERFORMANCE_WARNINGS
1476  if( n == internal::empty_rehashed ) empty_buckets++;
1477  else if( n->next ) overpopulated_buckets++;
1478 #endif
1479 #if TBB_USE_ASSERT
1480  for( ; is_valid(n); n = n->next ) {
1481  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first ) & mask;
1482  __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
1483  }
1484 #endif
1485  }
1486 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1487 #if TBB_USE_PERFORMANCE_WARNINGS
1488  if( buckets > current_size) empty_buckets -= buckets - current_size;
1489  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1490  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1492  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1494  typeid(*this).name(),
1495 #else
1496  "concurrent_hash_map",
1497 #endif
1498  current_size, empty_buckets, overpopulated_buckets );
1499  reported = true;
1500  }
1501 #endif
1502 }
1503 
1504 template<typename Key, typename T, typename HashCompare, typename A>
1506  hashcode_t m = my_mask;
1507  __TBB_ASSERT((m&(m+1))==0, "data structure is invalid");
1508 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510  int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics
1511  static bool reported = false;
1512 #endif
1513  bucket *bp = 0;
1514  // check consistency
1515  for( segment_index_t b = 0; b <= m; b++ ) {
1516  if( b & (b-2) ) ++bp; // not the beginning of a segment
1517  else bp = get_bucket( b );
1518  node_base *n = bp->node_list;
1519  __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
1520  __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
1521 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1522  if( n == internal::empty_rehashed ) empty_buckets++;
1523  else if( n == internal::rehash_req ) buckets--;
1524  else if( n->next ) overpopulated_buckets++;
1525 #endif
1526 #if __TBB_EXTRA_DEBUG
1527  for(; is_valid(n); n = n->next ) {
1528  hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->value().first );
1529  h &= m;
1530  __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
1531  }
1532 #endif
1533  }
1534 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535 #if __TBB_STATISTICS
1536  printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537  " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538  current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539  unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) );
1540  my_info_resizes = 0; // concurrent ones
1541  my_info_restarts = 0; // race collisions
1542  my_info_rehashes = 0; // invocations of rehash_bucket
1543 #endif
1544  if( buckets > current_size) empty_buckets -= buckets - current_size;
1545  else overpopulated_buckets -= current_size - buckets; // TODO: load_factor?
1546  if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1548  "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1550  typeid(*this).name(),
1551 #else
1552  "concurrent_hash_map",
1553 #endif
1554  current_size, empty_buckets, overpopulated_buckets );
1555  reported = true;
1556  }
1557 #endif
1558 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1559  my_size = 0;
1560  segment_index_t s = segment_index_of( m );
1561  __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
1562  do {
1563  __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
1564  segment_ptr_t buckets_ptr = my_table[s];
1565  size_type sz = segment_size( s ? s : 1 );
1566  for( segment_index_t i = 0; i < sz; i++ )
1567  for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568  buckets_ptr[i].node_list = n->next;
1569  delete_node( n );
1570  }
1571  delete_segment(s, my_allocator);
1572  } while(s-- > 0);
1573  my_mask = embedded_buckets - 1;
1574 }
1575 
1576 template<typename Key, typename T, typename HashCompare, typename A>
1578  hashcode_t mask = source.my_mask;
1579  if( my_mask == mask ) { // optimized version
1580  reserve( source.my_size, my_allocator ); // TODO: load_factor?
1581  bucket *dst = 0, *src = 0;
1582  bool rehash_required = false;
1583  for( hashcode_t k = 0; k <= mask; k++ ) {
1584  if( k & (k-2) ) ++dst,src++; // not the beginning of a segment
1585  else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1586  __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
1587  node *n = static_cast<node*>( src->node_list );
1588  if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets
1589  rehash_required = true;
1590  dst->node_list = internal::rehash_req;
1591  } else for(; n; n = static_cast<node*>( n->next ) ) {
1592  node* node_ptr = create_node(my_allocator, n->value().first, n->value().second);
1593  add_to_bucket( dst, node_ptr);
1594  ++my_size; // TODO: replace by non-atomic op
1595  }
1596  }
1597  if( rehash_required ) rehash();
1598  } else internal_copy( source.begin(), source.end(), source.my_size );
1599 }
1600 
1601 template<typename Key, typename T, typename HashCompare, typename A>
1602 template<typename I>
1604  reserve( reserve_size, my_allocator ); // TODO: load_factor?
1605  hashcode_t m = my_mask;
1606  for(; first != last; ++first) {
1607  hashcode_t h = my_hash_compare.hash( (*first).first );
1608  bucket *b = get_bucket( h & m );
1609  __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
1610  node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611  add_to_bucket( b, node_ptr );
1612  ++my_size; // TODO: replace by non-atomic op
1613  }
1614 }
1615 
1616 } // namespace interface5
1617 
1619 
1620 
1621 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1623  if(a.size() != b.size()) return false;
1626  for(; i != i_end; ++i) {
1627  j = b.equal_range(i->first).first;
1628  if( j == j_end || !(i->second == j->second) ) return false;
1629  }
1630  return true;
1631 }
1632 
1633 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
1635 { return !(a == b); }
1636 
1637 template<typename Key, typename T, typename HashCompare, typename A>
1639 { a.swap( b ); }
1640 
1641 #if _MSC_VER && !defined(__INTEL_COMPILER)
1642  #pragma warning( pop )
1643 #endif // warning 4127 is back
1644 
1645 } // namespace tbb
1646 
1648 #undef __TBB_concurrent_hash_map_H_include_area
1649 
1650 #endif /* __TBB_concurrent_hash_map_H */
tbb::interface5::concurrent_hash_map::emplace
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.h:1098
tbb::interface5::internal::hash_map_base::bucket::mutex_t
spin_rw_mutex mutex_t
Mutex type for buckets.
Definition: concurrent_hash_map.h:95
tbb::interface5::internal::hash_map_base::my_size
atomic< size_type > my_size
Size of container in stored items.
Definition: concurrent_hash_map.h:118
tbb::interface5::internal::hash_map_base::bucket::node_list
node_base * node_list
Definition: concurrent_hash_map.h:99
tbb::internal::allocator_traits::deallocate
static void deallocate(Alloc &a, pointer p, size_type n)
Definition: _allocator_traits.h:106
tbb::interface5::internal::hash_map_base::my_table
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
Definition: concurrent_hash_map.h:116
tbb::internal::__TBB_load_with_acquire
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
tbb::acquire
@ acquire
Acquire.
Definition: atomic.h:57
tbb::interface5::concurrent_hash_map::rehash_bucket
void rehash_bucket(bucket *b_new, const hashcode_t h)
Definition: concurrent_hash_map.h:723
tbb::interface5::internal::hash_map_range::difference_type
Iterator::difference_type difference_type
Definition: concurrent_hash_map.h:482
tbb::interface5::concurrent_hash_map::allocate_node_move_construct
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
Definition: concurrent_hash_map.h:668
tbb::interface5::concurrent_hash_map::insert
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.h:1083
tbb::interface5::concurrent_hash_map::~concurrent_hash_map
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.h:971
internal
Definition: _flow_graph_async_msg_impl.h:24
tbb::interface5::internal::hash_map_base::hashcode_t
size_t hashcode_t
Type of a hash code.
Definition: concurrent_hash_map.h:87
tbb::interface5::internal::hash_map_iterator::operator++
hash_map_iterator & operator++()
Definition: concurrent_hash_map.h:449
tbb::interface5::concurrent_hash_map::insert
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.h:1077
tbb::interface5::concurrent_hash_map::node
Definition: concurrent_hash_map.h:616
tbb::interface5::concurrent_hash_map::const_accessor::empty
bool empty() const
True if result is empty.
Definition: concurrent_hash_map.h:777
tbb::interface5::internal::hash_map_base::init_buckets
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
Definition: concurrent_hash_map.h:164
tbb::interface5::internal::hash_map_base::insert_new_node
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
Definition: concurrent_hash_map.h:285
tbb::interface5::concurrent_hash_map::allocate_node_copy_construct
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
Definition: concurrent_hash_map.h:663
tbb::interface5::concurrent_hash_map::iterator
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
Definition: concurrent_hash_map.h:602
tbb::interface5::internal::hash_map_base::my_mask
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
Definition: concurrent_hash_map.h:114
tbb::interface5::concurrent_hash_map::node::value
value_type & value()
Definition: concurrent_hash_map.h:620
tbb::interface5::concurrent_hash_map::node_scoped_guard::my_alloc
node_allocator_type & my_alloc
Definition: concurrent_hash_map.h:631
tbb::interface5::concurrent_hash_map::key_type
Key key_type
Definition: concurrent_hash_map.h:593
tbb::interface5::concurrent_hash_map::const_accessor
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.h:769
tbb::interface5::concurrent_hash_map::find
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.h:1022
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
Definition: concurrent_hash_map.h:878
tbb::interface5::internal::hash_map_range::my_end
Iterator my_end
Definition: concurrent_hash_map.h:471
tbb::interface5::internal::hash_map_range::my_begin
Iterator my_begin
Definition: concurrent_hash_map.h:470
tbb::interface5::concurrent_hash_map::clear
void clear()
Clear table.
Definition: concurrent_hash_map.h:1505
tbb::internal::allocator_copy_assignment
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
Definition: _allocator_traits.h:49
tbb::interface5::internal::hash_map_base::segment_ptr_t
bucket * segment_ptr_t
Segment pointer.
Definition: concurrent_hash_map.h:110
__TBB_ASSERT
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
tbb::interface5::concurrent_hash_map::end
iterator end()
Definition: concurrent_hash_map.h:987
__TBB_FORWARDING_REF
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
tbb::interface5::concurrent_hash_map::insert
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.h:1043
tbb::interface5::concurrent_hash_map::const_accessor::const_accessor
const_accessor()
Create empty result.
Definition: concurrent_hash_map.h:799
tbb::internal::allocator_traits::construct
static void construct(Alloc &, PT *p)
Definition: _allocator_traits.h:111
tbb::interface5::internal::hash_map_iterator::operator->
Value * operator->() const
Definition: concurrent_hash_map.h:426
tbb::internal::itt_store_word_with_release
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
Definition: tbb_profiling.h:157
tbb::internal::itt_load_word_with_acquire
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
Definition: tbb_profiling.h:168
tbb::internal::allocator_move_assignment
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
Definition: _allocator_traits.h:59
tbb::interface5::concurrent_hash_map::bucket_accessor::my_b
bucket * my_b
Definition: concurrent_hash_map.h:701
tbb::interface5::internal::hash_map_base
base class of concurrent_hash_map
Definition: concurrent_hash_map.h:82
tbb::interface5::concurrent_hash_map::operator=
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
Definition: concurrent_hash_map.h:931
tbb::internal::allocator_traits::destroy
static void destroy(Alloc &, T *p)
Definition: _allocator_traits.h:133
tbb::interface5::concurrent_hash_map::const_accessor::operator*
const_reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.h:788
tbb::interface5::concurrent_hash_map::bucket_accessor
bucket accessor is to find, rehash, acquire a lock, and access a bucket
Definition: concurrent_hash_map.h:700
tbb::interface5::concurrent_hash_map::insert
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.h:1057
tbb::interface5::concurrent_hash_map::search_bucket
node * search_bucket(const key_type &key, bucket *b) const
Definition: concurrent_hash_map.h:691
tbb::interface5::internal::hash_map_node_base::mutex_t
spin_rw_mutex mutex_t
Mutex type.
Definition: concurrent_hash_map.h:70
tbb::interface5::internal::hash_map_base::enable_segment_failsafe::enable_segment_failsafe
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
Definition: concurrent_hash_map.h:182
tbb::interface5::internal::hash_map_node_base::mutex
mutex_t mutex
Definition: concurrent_hash_map.h:75
tbb::interface5::internal::hash_map_iterator
Meets requirements of a forward iterator for STL *‍/.
Definition: concurrent_hash_map.h:346
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
Definition: concurrent_hash_map.h:845
tbb::interface5::concurrent_hash_map::const_accessor::value_type
const typedef concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.h:774
tbb::interface5::concurrent_hash_map::node_scoped_guard::my_node
node * my_node
Definition: concurrent_hash_map.h:630
tbb::interface5::concurrent_hash_map::reference
value_type & reference
Definition: concurrent_hash_map.h:600
tbb::interface5::concurrent_hash_map::lookup
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
Definition: concurrent_hash_map.h:1258
tbb
The graph class.
Definition: serial/tbb/parallel_for.h:46
tbb::internal::first
Container::iterator first(Container &c)
Definition: _range_iterator.h:46
tbb::interface5::internal::hash_map_iterator::operator++
hash_map_iterator operator++(int)
Post increment.
Definition: concurrent_hash_map.h:430
tbb::interface5::concurrent_hash_map::const_range_type
internal::hash_map_range< const_iterator > const_range_type
Definition: concurrent_hash_map.h:605
tbb::interface5::internal::hash_map_base::reserve
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
Definition: concurrent_hash_map.h:302
tbb::interface5::concurrent_hash_map::value_type
std::pair< const Key, T > value_type
Definition: concurrent_hash_map.h:595
tbb::interface5::internal::hash_map_iterator::operator*
Value & operator*() const
Definition: concurrent_hash_map.h:422
tbb::internal::allocator_swap
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
Definition: _allocator_traits.h:69
tbb::interface5::concurrent_hash_map::internal_move_assign
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
Definition: concurrent_hash_map.h:1190
tbb::interface5::concurrent_hash_map::accessor::operator*
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.h:818
tbb::internal::last
Container::iterator last(Container &c)
Definition: _range_iterator.h:52
tbb::interface5::concurrent_hash_map::erase
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
Definition: concurrent_hash_map.h:1131
tbb::interface5::internal::empty_rehashed
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
Definition: concurrent_hash_map.h:80
tbb::internal::runtime_warning
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
Definition: tbb_assert_impl.h:85
spin_rw_mutex.h
tbb::operator!=
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
Definition: cache_aligned_allocator.h:132
tbb::interface5::concurrent_hash_map::internal_equal_range
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
Definition: concurrent_hash_map.h:1338
tbb::spin_rw_mutex_v3::scoped_lock
The scoped locking pattern.
Definition: spin_rw_mutex.h:86
tbb::interface5::concurrent_hash_map::max_size
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.h:1000
tbb::interface5::concurrent_hash_map::insert
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.h:1071
tbb::internal::allocator_type
Class for determining type of std::allocator<T>::value_type.
Definition: tbb_stddef.h:471
tbb::interface5::concurrent_hash_map::end
const_iterator end() const
Definition: concurrent_hash_map.h:989
tbb::interface5::internal::hash_map_base::enable_segment
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
Definition: concurrent_hash_map.h:190
tbb::interface5::concurrent_hash_map::const_accessor::operator->
const_pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.h:794
tbb::interface5::internal::hash_map_base::enable_segment_failsafe
Exception safety helper.
Definition: concurrent_hash_map.h:180
tbb::interface5::concurrent_hash_map::get_allocator
allocator_type get_allocator() const
return allocator object
Definition: concurrent_hash_map.h:1006
tbb::interface5::concurrent_hash_map::const_iterator
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
Definition: concurrent_hash_map.h:603
tbb::interface5::concurrent_hash_map::call_clear_on_leave::my_ch_map
concurrent_hash_map * my_ch_map
Definition: concurrent_hash_map.h:756
tbb::interface5::concurrent_hash_map::insert
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.h:1064
tbb::interface5::concurrent_hash_map::insert
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.h:1050
tbb::interface5::internal::hash_map_node_base
Node base type.
Definition: concurrent_hash_map.h:68
tbb::interface5::concurrent_hash_map::accessor::operator->
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.h:824
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
Definition: concurrent_hash_map.h:834
tbb::interface5::internal::hash_map_base::get_bucket
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
Definition: concurrent_hash_map.h:234
tbb::interface5::internal::hash_map_iterator::advance_to_next_bucket
void advance_to_next_bucket()
Definition: concurrent_hash_map.h:369
tbb::interface5::internal::hash_map_base::check_mask_race
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
Definition: concurrent_hash_map.h:254
tbb::interface5::internal::hash_map_base::mark_rehashed_levels
void mark_rehashed_levels(hashcode_t h)
Definition: concurrent_hash_map.h:243
tbb::interface5::internal::hash_map_range::hash_map_range
hash_map_range(hash_map_range &r, split)
Split range.
Definition: concurrent_hash_map.h:493
tbb::interface5::concurrent_hash_map::equal_range
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
Definition: concurrent_hash_map.h:991
tbb::interface5::concurrent_hash_map::emplace
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.h:1105
tbb::internal::allocator_traits::allocate
static pointer allocate(Alloc &a, size_type n)
Definition: _allocator_traits.h:102
tbb::split
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416
tbb::interface5::concurrent_hash_map::node_scoped_guard::~node_scoped_guard
~node_scoped_guard()
Definition: concurrent_hash_map.h:634
tbb_profiling.h
_template_helpers.h
tbb::interface5::concurrent_hash_map::my_allocator
node_allocator_type my_allocator
Definition: concurrent_hash_map.h:613
tbb::interface5::concurrent_hash_map::erase
bool erase(accessor &item_accessor)
Erase item by accessor.
Definition: concurrent_hash_map.h:1137
tbb::operator-
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
Definition: tick_count.h:126
tbb::interface5::internal::operator!=
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
Definition: concurrent_hash_map.h:461
tbb::interface5::internal::hash_map_base::my_embedded_segment
bucket my_embedded_segment[embedded_buckets]
Zero segment.
Definition: concurrent_hash_map.h:120
tbb::interface5::concurrent_hash_map::bucket_accessor::operator()
bucket * operator()()
get bucket pointer
Definition: concurrent_hash_map.h:719
_tbb_hash_compare_impl.h
aligned_space.h
tbb::interface5::concurrent_hash_map::const_accessor::my_hash
hashcode_t my_hash
Definition: concurrent_hash_map.h:808
tbb::interface5::concurrent_hash_map::accessor_not_used
Definition: concurrent_hash_map.h:1145
tbb::interface5::internal::hash_map_range::empty
bool empty() const
True if range is empty.
Definition: concurrent_hash_map.h:486
tbb::interface5::internal::hash_map_range::hash_map_range
hash_map_range(hash_map_range< U > &r)
type conversion
Definition: concurrent_hash_map.h:505
tbb::interface5::concurrent_hash_map::bucket_accessor::bucket_accessor
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
Definition: concurrent_hash_map.h:703
tbb::interface5::concurrent_hash_map::range
const_range_type range(size_type grainsize=1) const
Definition: concurrent_hash_map.h:979
tbb::interface5::concurrent_hash_map::allocate_node_default_construct
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
Definition: concurrent_hash_map.h:673
tbb::interface5::concurrent_hash_map::size
size_type size() const
Number of items in table.
Definition: concurrent_hash_map.h:994
tbb::interface5::internal::hash_map_base::segment_size
static size_type segment_size(segment_index_t k)
Definition: concurrent_hash_map.h:154
tbb::interface5::internal::hash_map_base::internal_move
void internal_move(hash_map_base &&other)
Definition: concurrent_hash_map.h:320
tbb::interface5::concurrent_hash_map::internal_copy
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.h:1577
tbb::interface5::internal::hash_map_base::check_rehashing_collision
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
Definition: concurrent_hash_map.h:263
tbb::interface5::internal::hash_map_iterator::map_type
Container map_type
Definition: concurrent_hash_map.h:349
tbb::interface5::concurrent_hash_map::insert
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.h:1113
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
Definition: concurrent_hash_map.h:893
tbb::interface5::concurrent_hash_map::begin
const_iterator begin() const
Definition: concurrent_hash_map.h:988
tbb::interface5::concurrent_hash_map::node::storage
value_type * storage()
Definition: concurrent_hash_map.h:619
tbb::interface5::concurrent_hash_map::call_clear_on_leave::dismiss
void dismiss()
Definition: concurrent_hash_map.h:758
tbb::interface5::concurrent_hash_map::bucket_accessor::is_writer
bool is_writer()
check whether bucket is locked for write
Definition: concurrent_hash_map.h:717
tbb::interface5::concurrent_hash_map::do_not_allocate_node
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
Definition: concurrent_hash_map.h:686
tbb::interface5::concurrent_hash_map::difference_type
ptrdiff_t difference_type
Definition: concurrent_hash_map.h:597
tbb::interface5::internal::hash_map_iterator::my_bucket
const bucket * my_bucket
Pointer to bucket.
Definition: concurrent_hash_map.h:398
tbb::interface5::concurrent_hash_map::range_type
internal::hash_map_range< iterator > range_type
Definition: concurrent_hash_map.h:604
tbb::interface5::internal::hash_map_iterator::hash_map_iterator
hash_map_iterator()
Construct undefined iterator.
Definition: concurrent_hash_map.h:407
tbb::interface5::internal::hash_map_iterator::my_map
const Container * my_map
concurrent_hash_map over which we are iterating.
Definition: concurrent_hash_map.h:392
tbb::interface5::internal::hash_map_node_base::scoped_t
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
Definition: concurrent_hash_map.h:72
tbb::operator==
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
Definition: cache_aligned_allocator.h:129
tbb::interface5::concurrent_hash_map::find
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.h:1029
tbb_exception.h
tbb::interface5::internal::hash_map_base::segment_base
static segment_index_t segment_base(segment_index_t k)
Definition: concurrent_hash_map.h:149
int
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
Definition: ittnotify_static.h:217
tbb::interface5::concurrent_hash_map::const_reference
const typedef value_type & const_reference
Definition: concurrent_hash_map.h:601
tbb::move
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
key
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
Definition: ittnotify_static.h:198
tbb::interface5::internal::rehash_req
static hash_map_node_base *const rehash_req
Incompleteness flag value.
Definition: concurrent_hash_map.h:78
tbb::internal::__TBB_store_with_release
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713
tbb::interface5::internal::hash_map_range::begin
const Iterator & begin() const
Definition: concurrent_hash_map.h:520
tbb::tbb_allocator
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
Definition: tbb_allocator.h:58
tbb::interface5::internal::hash_map_base::internal_swap
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
Definition: concurrent_hash_map.h:309
tbb::interface5::concurrent_hash_map::is_write_access_needed
friend bool is_write_access_needed(accessor const &)
Definition: concurrent_hash_map.h:1149
tbb::interface5::concurrent_hash_map::const_pointer
const typedef value_type * const_pointer
Definition: concurrent_hash_map.h:599
tbb::interface5::internal::hash_map_range::reference
Iterator::reference reference
Definition: concurrent_hash_map.h:481
tbb::interface5::concurrent_hash_map::internal_move_assign
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
Definition: concurrent_hash_map.h:1185
tbb::interface5::internal::hash_map_iterator::node_base
hash_map_base::node_base node_base
Definition: concurrent_hash_map.h:351
tbb::interface5::concurrent_hash_map::node_scoped_guard::node_scoped_guard
node_scoped_guard(node *n, node_allocator_type &alloc)
Definition: concurrent_hash_map.h:633
lock
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
Definition: ittnotify_static.h:121
atomic.h
__TBB_Yield
#define __TBB_Yield()
Definition: ibm_aix51.h:44
tbb::interface5::internal::hash_map_base::enable_segment_failsafe::my_segment_ptr
segment_ptr_t * my_segment_ptr
Definition: concurrent_hash_map.h:181
__TBB_USE_OPTIONAL_RTTI
#define __TBB_USE_OPTIONAL_RTTI
Definition: tbb_config.h:125
tbb::internal::atomic_backoff
Class that implements exponential backoff.
Definition: tbb_machine.h:345
tbb::interface5::internal::hash_map_range::is_divisible
bool is_divisible() const
True if range can be partitioned into two subranges.
Definition: concurrent_hash_map.h:489
tbb::interface5::concurrent_hash_map::call_clear_on_leave::~call_clear_on_leave
~call_clear_on_leave()
Definition: concurrent_hash_map.h:759
tbb::interface5::concurrent_hash_map::is_write_access_needed
friend bool is_write_access_needed(accessor_not_used const &)
Definition: concurrent_hash_map.h:1151
tbb::interface5::internal::hash_map_iterator::bucket
hash_map_base::bucket bucket
Definition: concurrent_hash_map.h:352
tbb::interface5::concurrent_hash_map::size_type
hash_map_base::size_type size_type
Definition: concurrent_hash_map.h:596
tbb::interface5::concurrent_hash_map::call_clear_on_leave::call_clear_on_leave
call_clear_on_leave(concurrent_hash_map *a_ch_map)
Definition: concurrent_hash_map.h:757
tbb::interface5::internal::hash_map_iterator::hash_map_iterator
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
Definition: concurrent_hash_map.h:408
tbb::interface5::concurrent_hash_map::pointer
value_type * pointer
Definition: concurrent_hash_map.h:598
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
Definition: concurrent_hash_map.h:839
tbb::interface5::concurrent_hash_map::bucket_count
size_type bucket_count() const
Returns the current number of buckets.
Definition: concurrent_hash_map.h:1003
tbb::interface5::concurrent_hash_map::generic_move_insert
bool generic_move_insert(Accessor &&result, value_type &&value)
Definition: concurrent_hash_map.h:1155
tbb::internal::no_copy
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
tbb::interface5::concurrent_hash_map::node_scoped_guard::dismiss
void dismiss()
Definition: concurrent_hash_map.h:640
_warning_suppress_disable_notice.h
tbb::interface5::concurrent_hash_map::allocator_type
Allocator allocator_type
Definition: concurrent_hash_map.h:606
tbb::interface5::internal::hashcode_t
size_t hashcode_t
Type of a hash code.
Definition: concurrent_hash_map.h:66
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
Definition: concurrent_hash_map.h:902
tbb::interface5::internal::hash_map_range::value_type
Iterator::value_type value_type
Definition: concurrent_hash_map.h:480
tbb::interface5::concurrent_hash_map::const_accessor
friend class const_accessor
Definition: concurrent_hash_map.h:609
tbb::interface5::concurrent_hash_map::node::my_value
tbb::aligned_space< value_type > my_value
Definition: concurrent_hash_map.h:617
tbb::internal::bool_constant
Definition: tbb_stddef.h:486
tbb::interface5::concurrent_hash_map::node_allocator_type
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
Definition: concurrent_hash_map.h:610
_allocator_traits.h
tbb::interface5::internal::hash_map_base::is_valid
static bool is_valid(void *ptr)
Definition: concurrent_hash_map.h:159
s
void const char const char int ITT_FORMAT __itt_group_sync s
Definition: ittnotify_static.h:91
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
Definition: concurrent_hash_map.h:861
tbb::interface5::internal::hash_map_base::segment_index_of
static segment_index_t segment_index_of(size_type index)
Definition: concurrent_hash_map.h:144
tbb::interface5::concurrent_hash_map::erase
bool erase(const Key &key)
Erase item.
Definition: concurrent_hash_map.h:1386
tbb::interface5::concurrent_hash_map::call_clear_on_leave
Definition: concurrent_hash_map.h:755
tbb::tbb_hash_compare
hash_compare that is default argument for concurrent_hash_map
Definition: _tbb_hash_compare_impl.h:99
tbb::internal::itt_hide_store_word
void itt_hide_store_word(T &dst, T src)
Definition: tbb_profiling.h:210
tbb::internal::allocator_traits
Definition: _allocator_traits.h:82
tbb::internal::allocator_traits::propagate_on_container_move_assignment
tbb::internal::false_type propagate_on_container_move_assignment
Definition: _allocator_traits.h:86
tbb::interface5::internal::hash_map_range::my_grainsize
size_t my_grainsize
Definition: concurrent_hash_map.h:473
__TBB_Log2
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
Definition: concurrent_hash_map.h:920
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.h:852
tbb::interface5::internal::hash_map_range::end
const Iterator & end() const
Definition: concurrent_hash_map.h:521
tbb::interface5::internal::hash_map_iterator::my_index
size_t my_index
Index in hash table for current item.
Definition: concurrent_hash_map.h:395
tbb::interface5::concurrent_hash_map::exclude
bool exclude(const_accessor &item_accessor)
delete item by accessor
Definition: concurrent_hash_map.h:1356
tbb::interface5::concurrent_hash_map::count
size_type count(const Key &key) const
Return count of items (0 or 1)
Definition: concurrent_hash_map.h:1016
tbb::interface5::internal::hash_map_iterator::my_node
node * my_node
Pointer to node that has current item.
Definition: concurrent_hash_map.h:401
tbb::interface5::concurrent_hash_map::const_accessor::~const_accessor
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.h:802
tbb::interface5::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
Definition: concurrent_hash_map.h:830
tbb::interface5::concurrent_hash_map::bucket_accessor::acquire
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
Definition: concurrent_hash_map.h:705
tbb::spin_rw_mutex_v3
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
Definition: spin_rw_mutex.h:38
tbb::interface5::concurrent_hash_map::mapped_type
T mapped_type
Definition: concurrent_hash_map.h:594
tbb::interface5::concurrent_hash_map::accessor_location
friend const_accessor * accessor_location(accessor_not_used const &)
Definition: concurrent_hash_map.h:1146
tbb::interface5::concurrent_hash_map::accessor_not_used::release
void release()
Definition: concurrent_hash_map.h:1145
tbb::interface5::concurrent_hash_map::create_node
static node * create_node(node_allocator_type &allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
Definition: concurrent_hash_map.h:648
tbb::interface5::internal::hash_map_iterator::hash_map_iterator
friend class hash_map_iterator
Definition: concurrent_hash_map.h:364
tbb::interface5::concurrent_hash_map::is_write_access_needed
friend bool is_write_access_needed(const_accessor const &)
Definition: concurrent_hash_map.h:1150
tbb::interface5::concurrent_hash_map::accessor_location
friend const_accessor * accessor_location(const_accessor &a)
Definition: concurrent_hash_map.h:1147
tbb::interface5::concurrent_hash_map::node_scoped_guard
Definition: concurrent_hash_map.h:629
tbb::interface5::internal::hash_map_base::bucket::mutex
mutex_t mutex
Definition: concurrent_hash_map.h:98
tbb::interface5::internal::hash_map_range::hash_map_range
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
Definition: concurrent_hash_map.h:512
tbb::interface5::internal::hash_map_base::size_type
size_t size_type
Size type.
Definition: concurrent_hash_map.h:85
tbb::internal::as_atomic
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
tbb::interface5::internal::hash_map_range
Range class used with concurrent_hash_map.
Definition: concurrent_hash_map.h:340
tbb::interface5::concurrent_hash_map::equal_range
std::pair< iterator, iterator > equal_range(const Key &key)
Definition: concurrent_hash_map.h:990
tbb::interface5::concurrent_hash_map::delete_node
void delete_node(node_base *n)
Definition: concurrent_hash_map.h:623
value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
Definition: ittnotify_static.h:192
tbb::interface5::internal::hash_map_range::map_type
Iterator::map_type map_type
Definition: concurrent_hash_map.h:469
tbb::interface5::concurrent_hash_map::range
range_type range(size_type grainsize=1)
Definition: concurrent_hash_map.h:976
tbb::interface5::concurrent_hash_map::insert
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.h:1036
tbb::interface5::concurrent_hash_map::swap
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
Definition: concurrent_hash_map.h:1423
tbb::interface5::concurrent_hash_map::my_hash_compare
HashCompare my_hash_compare
Definition: concurrent_hash_map.h:614
tbb::interface5::concurrent_hash_map::internal_fast_find
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
Definition: concurrent_hash_map.h:1203
tbb::interface5::internal::hash_map_base::add_to_bucket
static void add_to_bucket(bucket *b, node_base *n)
Add node.
Definition: concurrent_hash_map.h:173
tbb::interface5::concurrent_hash_map::node_allocator_traits
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
Definition: concurrent_hash_map.h:612
tbb::internal
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
tbb_stddef.h
tbb::interface5::concurrent_hash_map::emplace
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.h:1091
_warning_suppress_enable_notice.h
p
void const char const char int ITT_FORMAT __itt_group_sync p
Definition: ittnotify_static.h:91
tbb::interface5::internal::hash_map_iterator::node
Container::node node
Definition: concurrent_hash_map.h:350
tbb::internal::itt_hide_load_word
T itt_hide_load_word(const T &src)
Definition: tbb_profiling.h:222
tbb::interface5::internal::hash_map_base::bucket::scoped_t
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
Definition: concurrent_hash_map.h:97
tbb::interface5::internal::hash_map_base::bucket
Bucket type.
Definition: concurrent_hash_map.h:93
tbb::interface5::internal::hash_map_range::set_midpoint
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
Definition: concurrent_hash_map.h:527
tbb::interface5::concurrent_hash_map::rehash
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.h:1434
tbb::interface5::internal::hash_map_range::grainsize
size_type grainsize() const
The grain size for this range.
Definition: concurrent_hash_map.h:523
tbb::interface5::concurrent_hash_map
Unordered map from Key to T.
Definition: concurrent_hash_map.h:58
tbb::interface5::concurrent_hash_map::generic_emplace
bool generic_emplace(Accessor &&result, Args &&... args)
Definition: concurrent_hash_map.h:1162
tbb::swap
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
Definition: concurrent_hash_map.h:1638
tbb::release
@ release
Release.
Definition: atomic.h:59
tbb::interface5::internal::hash_map_base::node_base
hash_map_node_base node_base
Node base type.
Definition: concurrent_hash_map.h:91
tbb_allocator.h
tbb::interface5::internal::operator==
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
Definition: concurrent_hash_map.h:456
tbb::interface5::internal::hash_map_base::delete_segment
void delete_segment(segment_index_t s, const Allocator &allocator)
Definition: concurrent_hash_map.h:218
mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
Definition: ittnotify_static.h:109
tbb::interface5::concurrent_hash_map::accessor::value_type
concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.h:815
tbb::interface5::internal::hash_map_range::my_midpoint
Iterator my_midpoint
Definition: concurrent_hash_map.h:472
tbb::interface5::concurrent_hash_map::empty
bool empty() const
True if size()==0.
Definition: concurrent_hash_map.h:997
tbb::interface5::concurrent_hash_map::const_accessor::release
void release()
Set to null.
Definition: concurrent_hash_map.h:780
tbb::interface5::internal::hash_map_base::segment_index_t
size_t segment_index_t
Segment index type.
Definition: concurrent_hash_map.h:89
tbb::internal::allocator_rebind::type
allocator_traits< Alloc >::template rebind_alloc< T >::other type
Definition: _allocator_traits.h:149
h
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
Definition: ittnotify_static.h:159
tbb::interface5::internal::hash_map_iterator::operator=
hash_map_iterator & operator=(const hash_map_iterator< Container, typename Container::value_type > &other)
Definition: concurrent_hash_map.h:415
tbb::interface5::concurrent_hash_map::begin
iterator begin()
Definition: concurrent_hash_map.h:986
tbb::interface5::concurrent_hash_map::const_accessor::is_writer
bool is_writer()
Definition: concurrent_hash_map.h:806
tbb::interface5::concurrent_hash_map::accessor
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.h:812
tbb::interface5::internal::hash_map_node_base::next
hash_map_node_base * next
Next node in chain.
Definition: concurrent_hash_map.h:74
tbb::interface5::concurrent_hash_map::const_accessor::my_node
node * my_node
Definition: concurrent_hash_map.h:807
tbb::interface5::internal::hash_map_range::iterator
Iterator iterator
Definition: concurrent_hash_map.h:483
tbb::interface5::internal::hash_map_range::size_type
std::size_t size_type
Type for size of a range.
Definition: concurrent_hash_map.h:479
tbb::interface5::internal::hash_map_base::enable_segment_failsafe::~enable_segment_failsafe
~enable_segment_failsafe()
Definition: concurrent_hash_map.h:183

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.