Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
17 #ifndef __TBB_concurrent_hash_map_H
18 #define __TBB_concurrent_hash_map_H
20 #define __TBB_concurrent_hash_map_H_include_area
27 #include __TBB_STD_SWAP_HEADER
38 #if __TBB_INITIALIZER_LISTS_PRESENT
39 #include <initializer_list>
41 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
47 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
55 namespace interface5 {
57 template<
typename Key,
typename T,
typename HashCompare = tbb_hash_compare<Key>,
typename A = tbb_allocator<std::pair<const Key, T> > >
104 static size_type const embedded_buckets = 1<<embedded_block;
120 bucket my_embedded_segment[embedded_buckets];
122 atomic<unsigned> my_info_resizes;
123 mutable atomic<unsigned> my_info_restarts;
124 atomic<unsigned> my_info_rehashes;
128 std::memset(my_table, 0,
sizeof(my_table));
131 std::memset(my_embedded_segment, 0,
sizeof(my_embedded_segment));
132 for(
size_type i = 0; i < embedded_block; i++ )
133 my_table[i] = my_embedded_segment + segment_base(i);
134 my_mask = embedded_buckets - 1;
135 __TBB_ASSERT( embedded_block <= first_block,
"The first block number must include embedded blocks");
138 my_info_restarts = 0;
139 my_info_rehashes = 0;
160 return reinterpret_cast<uintptr_t
>(ptr) > uintptr_t(63);
165 if( is_initial ) std::memset(
static_cast<void*
>(ptr), 0, sz*
sizeof(
bucket) );
166 else for(
size_type i = 0; i < sz; i++, ptr++) {
167 *
reinterpret_cast<intptr_t*
>(&ptr->
mutex) = 0;
184 if( my_segment_ptr ) *my_segment_ptr = 0;
189 template<
typename Allocator>
193 bucket_allocator_type bucket_allocator(allocator);
197 __TBB_ASSERT( !is_valid(my_table[k]),
"Wrong concurrent assignment");
198 if( k >= first_block ) {
199 sz = segment_size( k );
200 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz);
201 init_buckets( ptr, sz, is_initial );
205 __TBB_ASSERT( k == embedded_block,
"Wrong segment index" );
206 sz = segment_size( first_block );
207 segment_ptr_t ptr = bucket_allocator_traits::allocate(bucket_allocator, sz - embedded_buckets);
208 init_buckets( ptr, sz - embedded_buckets, is_initial );
209 ptr -= segment_base(embedded_block);
217 template<
typename Allocator>
221 bucket_allocator_type bucket_allocator(allocator);
225 if(
s >= first_block)
226 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr, sz);
227 else if(
s == embedded_block && embedded_block != first_block )
228 bucket_allocator_traits::deallocate(bucket_allocator, buckets_ptr,
229 segment_size(first_block) - embedded_buckets);
230 if(
s >= embedded_block ) my_table[
s] = 0;
236 h -= segment_base(
s);
238 __TBB_ASSERT( is_valid(seg),
"hashcode must be cut by valid mask for allocated segments" );
258 return check_rehashing_collision(
h, m_old, m = m_now );
265 if( (
h & m_old) != (
h & m) ) {
268 for( ++m_old; !(
h & m_old); m_old <<= 1 )
270 m_old = (m_old<<1) - 1;
287 add_to_bucket( b, n );
291 __TBB_ASSERT( is_valid(my_table[new_seg-1]),
"new allocations must not publish new mask until segment has allocated");
294 &&
as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL )
301 template<
typename Allocator>
303 if( !buckets-- )
return;
304 bool is_initial = !my_size;
305 for(
size_type m = my_mask; buckets > m; m = my_mask )
306 enable_segment( segment_index_of( m+1 ), allocator, is_initial );
313 for(
size_type i = 0; i < embedded_buckets; i++)
315 for(
size_type i = embedded_block; i < pointers_per_table; i++)
319 #if __TBB_CPP11_RVALUE_REF_PRESENT
321 my_mask = other.my_mask;
322 other.my_mask = embedded_buckets - 1;
323 my_size = other.my_size;
326 for(
size_type i = 0; i < embedded_buckets; ++i) {
327 my_embedded_segment[i].
node_list = other.my_embedded_segment[i].node_list;
328 other.my_embedded_segment[i].node_list = NULL;
331 for(
size_type i = embedded_block; i < pointers_per_table; ++i) {
332 my_table[i] = other.my_table[i];
333 other.my_table[i] = NULL;
336 #endif // __TBB_CPP11_RVALUE_REF_PRESENT
339 template<
typename Iterator>
345 template<
typename Container,
typename Value>
347 :
public std::iterator<std::forward_iterator_tag,Value>
350 typedef typename Container::node
node;
354 template<
typename C,
typename T,
typename U>
357 template<
typename C,
typename T,
typename U>
360 template<
typename C,
typename T,
typename U>
363 template<
typename C,
typename U>
370 size_t k = my_index+1;
371 __TBB_ASSERT( my_bucket,
"advancing an invalid iterator?");
372 while( k <= my_map->my_mask ) {
376 else my_bucket = my_map->get_bucket( k );
377 my_node =
static_cast<node*
>( my_bucket->node_list );
379 my_index = k;
return;
383 my_bucket = 0; my_node = 0; my_index = k;
385 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
386 template<
typename Key,
typename T,
typename HashCompare,
typename A>
391 const Container *my_map;
409 my_map(other.my_map),
410 my_index(other.my_index),
411 my_bucket(other.my_bucket),
412 my_node(other.my_node)
424 return my_node->value();
437 template<
typename Container,
typename Value>
442 my_node( static_cast<
node*>(n) )
448 template<
typename Container,
typename Value>
450 my_node =
static_cast<node*
>( my_node->next );
451 if( !my_node ) advance_to_next_bucket();
455 template<
typename Container,
typename T,
typename U>
460 template<
typename Container,
typename T,
typename U>
467 template<
typename Iterator>
468 class hash_map_range {
497 r.my_end =
my_begin = r.my_midpoint;
499 __TBB_ASSERT( !r.empty(),
"Splitting despite the range is not divisible" );
513 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
514 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
517 __TBB_ASSERT( grainsize_>0,
"grainsize must be positive" );
526 template<
typename Iterator>
529 size_t m = my_end.my_index-my_begin.my_index;
530 if( m > my_grainsize ) {
531 m = my_begin.my_index + m/2u;
533 my_midpoint = Iterator(*my_begin.my_map,m,b,b->
node_list);
535 my_midpoint = my_end;
537 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
538 "my_begin is after my_midpoint" );
540 "my_midpoint is after my_end" );
541 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
542 "[my_begin, my_midpoint) range should not be empty" );
548 #if _MSC_VER && !defined(__INTEL_COMPILER)
550 #pragma warning( push )
551 #pragma warning( disable: 4127 )
584 template<
typename Key,
typename T,
typename HashCompare,
typename Allocator>
586 template<
typename Container,
typename Value>
616 class node :
public node_base {
643 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
644 template<
typename... Args>
647 template<
typename Arg1,
typename Arg2>
654 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
667 #if __TBB_CPP11_RVALUE_REF_PRESENT
674 #if __TBB_CPP11_RVALUE_REF_PRESENT && __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_TUPLE_PRESENT
676 return create_node(allocator, std::piecewise_construct,
677 std::forward_as_tuple(
key), std::forward_as_tuple());
687 __TBB_ASSERT(
false,
"this dummy function should not be called");
692 node *n =
static_cast<node*
>( b->node_list );
694 n =
static_cast<node*
>( n->next );
706 my_b = base->get_bucket(
h );
709 && try_acquire(
my_b->mutex,
true ) )
717 bool is_writer() {
return bucket::scoped_t::is_writer; }
724 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex),
"b_new must be locked (for write)");
725 __TBB_ASSERT(
h > 1,
"The lowermost buckets can't be rehashed" );
741 bmask = bmask==0? 1 : ( 1u<<(
__TBB_Log2( bmask )+1 ) ) - 1;
742 __TBB_ASSERT( (c & bmask) == (
h & bmask),
"hash() function changed for key in table" );
744 if( (c &
mask) ==
h ) {
746 if( !b_old.upgrade_to_writer() ) {
750 add_to_bucket( b_new, n );
869 #if __TBB_CPP11_RVALUE_REF_PRESENT
881 if (a == table.get_allocator()){
885 internal_copy(std::make_move_iterator(table.begin()), std::make_move_iterator(table.end()), table.size());
889 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
910 #if __TBB_INITIALIZER_LISTS_PRESENT
915 call_clear_on_leave scope_guard(
this);
917 scope_guard.dismiss();
928 #endif //__TBB_INITIALIZER_LISTS_PRESENT
941 #if __TBB_CPP11_RVALUE_REF_PRESENT
950 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
952 #if __TBB_INITIALIZER_LISTS_PRESENT
959 #endif //__TBB_INITIALIZER_LISTS_PRESENT
997 bool empty()
const {
return my_size == 0; }
1068 #if __TBB_CPP11_RVALUE_REF_PRESENT
1087 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1090 template<
typename... Args>
1097 template<
typename... Args>
1104 template<
typename... Args>
1108 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1109 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1112 template<
typename I>
1118 #if __TBB_INITIALIZER_LISTS_PRESENT
1119 void insert( std::initializer_list<value_type> il ) {
1121 insert( il.begin(), il.end() );
1123 #endif //__TBB_INITIALIZER_LISTS_PRESENT
1132 return exclude( item_accessor );
1138 return exclude( item_accessor );
1153 #if __TBB_CPP11_RVALUE_REF_PRESENT
1154 template<
typename Accessor>
1160 #if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1161 template<
typename Accessor,
typename... Args>
1167 #endif //__TBB_CPP11_VARIADIC_TEMPLATES_PRESENT
1168 #endif //__TBB_CPP11_RVALUE_REF_PRESENT
1174 template<
typename I>
1180 template<
typename I>
1183 #if __TBB_CPP11_RVALUE_REF_PRESENT
1191 if (this->my_allocator == other.my_allocator) {
1195 internal_copy(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), other.size());
1208 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1209 bucket *b = get_bucket(
h & m );
1213 bucket::scoped_t
lock;
1214 if(
lock.try_acquire( b->mutex,
true ) ) {
1218 else lock.acquire( b->mutex,
false );
1224 else if( check_mask_race(
h, m ) )
1230 #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT
1234 template<
template<
typename...>
typename Map,
typename Key,
typename T,
typename... Args>
1235 using hash_map_t = Map<
1237 std::conditional_t< (
sizeof...(Args)>0) && !is_allocator_v< pack_element_t<0, Args...> >,
1239 std::conditional_t< (
sizeof...(Args)>0) && is_allocator_v< pack_element_t<
sizeof...(Args)-1, Args...> >,
1245 template<
typename I,
typename... Args>
1246 concurrent_hash_map(I, I, Args...)
1247 -> internal::hash_map_t<concurrent_hash_map, internal::iterator_key_t<I>,internal::iterator_mapped_t<I>, Args...>;
1251 template<
typename Key,
typename T,
typename CompareOrAllocator>
1252 concurrent_hash_map(std::initializer_list<std::pair<const Key, T>>, CompareOrAllocator)
1253 -> internal::hash_map_t<concurrent_hash_map, Key, T, CompareOrAllocator>;
1257 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1258 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup(
bool op_insert,
const Key &
key,
const T *t,
const_accessor *result,
bool write,
node* (*allocate_node)(
node_allocator_type& ,
const Key&,
const T*),
node *tmp_n ) {
1263 segment_index_t grow_segment = 0;
1267 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1268 return_value =
false;
1273 n = search_bucket(
key, b() );
1278 tmp_n = allocate_node(my_allocator,
key, t);
1280 if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1282 n = search_bucket(
key, b() );
1284 b.downgrade_to_reader();
1288 if( check_mask_race(
h, m) )
1291 grow_segment = insert_new_node( b(), n = tmp_n, m );
1293 return_value =
true;
1297 if( check_mask_race(
h, m ) )
1301 return_value =
true;
1304 if( !result )
goto check_growth;
1307 if( !result->try_acquire( n->mutex, write ) ) {
1309 if( result->try_acquire( n->mutex, write ) )
break;
1310 if( !backoff.bounded_pause() ) {
1313 __TBB_ASSERT( !op_insert || !return_value,
"Can't acquire new item in locked bucket?" );
1325 if( grow_segment ) {
1326 #if __TBB_STATISTICS
1329 enable_segment( grow_segment, my_allocator );
1332 delete_node( tmp_n );
1333 return return_value;
1336 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1337 template<
typename I>
1341 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1343 bucket *b = get_bucket(
h );
1346 b = get_bucket(
h &= m );
1348 node *n = search_bucket(
key, b );
1350 return std::make_pair(end_, end_);
1351 iterator lower(*
this,
h, b, n), upper(lower);
1352 return std::make_pair(lower, ++upper);
1355 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1358 node_base *
const n = item_accessor.
my_node;
1364 node_base **
p = &b()->node_list;
1365 while( *
p && *
p != n )
1368 if( check_mask_race(
h, m ) )
1379 item_accessor.upgrade_to_writer();
1385 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1395 node_base **
p = &b()->node_list;
1397 while( is_valid(n) && !my_hash_compare.equal(
key,
static_cast<node*
>(n)->
value().first ) ) {
1402 if( check_mask_race(
h, m ) )
1406 else if( !b.
is_writer() && !b.upgrade_to_writer() ) {
1407 if( check_mask_race(
h, m ) )
1415 typename node::scoped_t item_locker( n->mutex,
true );
1422 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1429 internal_swap(table);
1433 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1435 reserve( sz, my_allocator );
1439 bucket *bp = get_bucket( b );
1440 for(; b <=
mask; b++, bp++ ) {
1441 node_base *n = bp->node_list;
1443 __TBB_ASSERT( *
reinterpret_cast<intptr_t*
>(&bp->mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1447 __TBB_ASSERT(
h > 1,
"The lowermost buckets can't be rehashed" );
1449 b_old = get_bucket(
h &= m );
1452 mark_rehashed_levels(
h );
1453 for( node_base **
p = &b_old->node_list, *q = *
p; is_valid(q); q = *
p ) {
1455 if( (c &
mask) !=
h ) {
1457 bucket *b_new = get_bucket( c &
mask );
1459 add_to_bucket( b_new, q );
1460 }
else p = &q->next;
1464 #if TBB_USE_PERFORMANCE_WARNINGS
1465 int current_size =
int(my_size), buckets =
int(
mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
1466 static bool reported =
false;
1468 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1469 for( b = 0; b <=
mask; b++ ) {
1470 if( b & (b-2) ) ++bp;
1471 else bp = get_bucket( b );
1472 node_base *n = bp->node_list;
1473 __TBB_ASSERT( *
reinterpret_cast<intptr_t*
>(&bp->mutex) == 0,
"concurrent or unexpectedly terminated operation during rehash() execution" );
1475 #if TBB_USE_PERFORMANCE_WARNINGS
1477 else if( n->next ) overpopulated_buckets++;
1480 for( ; is_valid(n); n = n->next ) {
1482 __TBB_ASSERT(
h == b,
"hash() function changed for key in table or internal error" );
1486 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
1487 #if TBB_USE_PERFORMANCE_WARNINGS
1488 if( buckets > current_size) empty_buckets -= buckets - current_size;
1489 else overpopulated_buckets -= current_size - buckets;
1490 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1492 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1494 typeid(*this).name(),
1496 "concurrent_hash_map",
1498 current_size, empty_buckets, overpopulated_buckets );
1504 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1507 __TBB_ASSERT((m&(m+1))==0,
"data structure is invalid");
1508 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1509 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1510 int current_size =
int(my_size), buckets =
int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
1511 static bool reported =
false;
1515 for( segment_index_t b = 0; b <= m; b++ ) {
1516 if( b & (b-2) ) ++bp;
1517 else bp = get_bucket( b );
1518 node_base *n = bp->node_list;
1520 __TBB_ASSERT( *
reinterpret_cast<intptr_t*
>(&bp->mutex) == 0,
"concurrent or unexpectedly terminated operation during clear() execution" );
1521 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1524 else if( n->next ) overpopulated_buckets++;
1526 #if __TBB_EXTRA_DEBUG
1527 for(; is_valid(n); n = n->next ) {
1534 #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1535 #if __TBB_STATISTICS
1536 printf(
"items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d"
1537 " concurrent: resizes=%u rehashes=%u restarts=%u\n",
1538 current_size,
int(m+1), buckets, empty_buckets, overpopulated_buckets,
1539 unsigned(my_info_resizes),
unsigned(my_info_rehashes),
unsigned(my_info_restarts) );
1540 my_info_resizes = 0;
1541 my_info_restarts = 0;
1542 my_info_rehashes = 0;
1544 if( buckets > current_size) empty_buckets -= buckets - current_size;
1545 else overpopulated_buckets -= current_size - buckets;
1546 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
1548 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
1550 typeid(*this).name(),
1552 "concurrent_hash_map",
1554 current_size, empty_buckets, overpopulated_buckets );
1558 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS
1560 segment_index_t
s = segment_index_of( m );
1561 __TBB_ASSERT(
s+1 == pointers_per_table || !my_table[
s+1],
"wrong mask or concurrent grow" );
1563 __TBB_ASSERT( is_valid( my_table[
s] ),
"wrong mask or concurrent grow" );
1564 segment_ptr_t buckets_ptr = my_table[
s];
1566 for( segment_index_t i = 0; i < sz; i++ )
1567 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
1568 buckets_ptr[i].node_list = n->next;
1571 delete_segment(
s, my_allocator);
1573 my_mask = embedded_buckets - 1;
1576 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1579 if( my_mask ==
mask ) {
1580 reserve( source.my_size, my_allocator );
1581 bucket *dst = 0, *src = 0;
1582 bool rehash_required =
false;
1584 if( k & (k-2) ) ++dst,src++;
1585 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
1587 node *n =
static_cast<node*
>( src->node_list );
1589 rehash_required =
true;
1591 }
else for(; n; n =
static_cast<node*
>( n->next ) ) {
1592 node* node_ptr = create_node(my_allocator, n->
value().first, n->
value().second);
1593 add_to_bucket( dst, node_ptr);
1597 if( rehash_required ) rehash();
1598 }
else internal_copy( source.
begin(), source.
end(), source.my_size );
1601 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1602 template<
typename I>
1604 reserve( reserve_size, my_allocator );
1607 hashcode_t h = my_hash_compare.hash( (*first).first );
1608 bucket *b = get_bucket(
h & m );
1610 node* node_ptr = create_node(my_allocator, (*first).first, (*first).second);
1611 add_to_bucket( b, node_ptr );
1621 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1623 if(a.
size() != b.
size())
return false;
1626 for(; i != i_end; ++i) {
1628 if( j == j_end || !(i->second == j->second) )
return false;
1633 template<
typename Key,
typename T,
typename HashCompare,
typename A1,
typename A2>
1635 {
return !(a == b); }
1637 template<
typename Key,
typename T,
typename HashCompare,
typename A>
1641 #if _MSC_VER && !defined(__INTEL_COMPILER)
1642 #pragma warning( pop )
1643 #endif // warning 4127 is back
1648 #undef __TBB_concurrent_hash_map_H_include_area
bool emplace(accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
spin_rw_mutex mutex_t
Mutex type for buckets.
atomic< size_type > my_size
Size of container in stored items.
static void deallocate(Alloc &a, pointer p, size_type n)
segments_table_t my_table
Segment pointers table. Also prevents false sharing between my_mask and my_size.
T __TBB_load_with_acquire(const volatile T &location)
void rehash_bucket(bucket *b_new, const hashcode_t h)
Iterator::difference_type difference_type
static node * allocate_node_move_construct(node_allocator_type &allocator, const Key &key, const T *t)
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
~concurrent_hash_map()
Clear table and destroy it.
size_t hashcode_t
Type of a hash code.
hash_map_iterator & operator++()
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
bool empty() const
True if result is empty.
static void init_buckets(segment_ptr_t ptr, size_type sz, bool is_initial)
Initialize buckets.
segment_index_t insert_new_node(bucket *b, node_base *n, hashcode_t mask)
Insert a node and check for load factor.
static node * allocate_node_copy_construct(node_allocator_type &allocator, const Key &key, const T *t)
internal::hash_map_iterator< concurrent_hash_map, value_type > iterator
atomic< hashcode_t > my_mask
Hash mask = sum of allocated segment sizes - 1.
node_allocator_type & my_alloc
Combines data access, locking, and garbage collection.
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
concurrent_hash_map(concurrent_hash_map &&table, const allocator_type &a)
Move constructor.
void allocator_copy_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
bucket * segment_ptr_t
Segment pointer.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
#define __TBB_FORWARDING_REF(A)
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
const_accessor()
Create empty result.
static void construct(Alloc &, PT *p)
Value * operator->() const
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
T itt_load_word_with_acquire(const tbb::atomic< T > &src)
void allocator_move_assignment(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
base class of concurrent_hash_map
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment.
static void destroy(Alloc &, T *p)
const_reference operator*() const
Return reference to associated value in hash table.
bucket accessor is to find, rehash, acquire a lock, and access a bucket
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
node * search_bucket(const key_type &key, bucket *b) const
spin_rw_mutex mutex_t
Mutex type.
enable_segment_failsafe(segments_table_t &table, segment_index_t k)
Meets requirements of a forward iterator for STL */.
concurrent_hash_map(size_type n, const HashCompare &compare, const allocator_type &a=allocator_type())
const typedef concurrent_hash_map::value_type value_type
Type of value.
bool lookup(bool op_insert, const Key &key, const T *t, const_accessor *result, bool write, node *(*allocate_node)(node_allocator_type &, const Key &, const T *), node *tmp_n=0)
Insert or find item and optionally acquire a lock on the item.
Container::iterator first(Container &c)
hash_map_iterator operator++(int)
Post increment.
internal::hash_map_range< const_iterator > const_range_type
void reserve(size_type buckets, const Allocator &allocator)
Prepare enough segments for number of buckets.
std::pair< const Key, T > value_type
Value & operator*() const
void allocator_swap(MyAlloc &my_allocator, OtherAlloc &other_allocator, traits_true_type)
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_false_type)
reference operator*() const
Return reference to associated value in hash table.
Container::iterator last(Container &c)
bool erase(const_accessor &item_accessor)
Erase item by const_accessor.
static hash_map_node_base *const empty_rehashed
Rehashed empty bucket flag.
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
bool operator!=(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
std::pair< I, I > internal_equal_range(const Key &key, I end) const
Returns an iterator for an item defined by the key, or for the next item after it (if upper==true)
The scoped locking pattern.
size_type max_size() const
Upper bound on size.
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Class for determining type of std::allocator<T>::value_type.
const_iterator end() const
void enable_segment(segment_index_t k, const Allocator &allocator, bool is_initial=false)
Enable segment.
const_pointer operator->() const
Return pointer to associated value in hash table.
allocator_type get_allocator() const
return allocator object
internal::hash_map_iterator< concurrent_hash_map, const value_type > const_iterator
concurrent_hash_map * my_ch_map
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
pointer operator->() const
Return pointer to associated value in hash table.
concurrent_hash_map(const HashCompare &compare, const allocator_type &a=allocator_type())
bucket * get_bucket(hashcode_t h) const
Get bucket by (masked) hashcode.
void advance_to_next_bucket()
bool check_mask_race(const hashcode_t h, hashcode_t &m) const
Check for mask race.
void mark_rehashed_levels(hashcode_t h)
hash_map_range(hash_map_range &r, split)
Split range.
std::pair< const_iterator, const_iterator > equal_range(const Key &key) const
bool emplace(Args &&... args)
Insert item by copying if there is no such key present already.
static pointer allocate(Alloc &a, size_type n)
Dummy type that distinguishes splitting constructor from copy constructor.
node_allocator_type my_allocator
bool erase(accessor &item_accessor)
Erase item by accessor.
tick_count::interval_t operator-(const tick_count &t1, const tick_count &t0)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
bucket my_embedded_segment[embedded_buckets]
Zero segment.
bucket * operator()()
get bucket pointer
bool empty() const
True if range is empty.
hash_map_range(hash_map_range< U > &r)
type conversion
bucket_accessor(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
const_range_type range(size_type grainsize=1) const
static node * allocate_node_default_construct(node_allocator_type &allocator, const Key &key, const T *)
size_type size() const
Number of items in table.
static size_type segment_size(segment_index_t k)
void internal_move(hash_map_base &&other)
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
bool check_rehashing_collision(const hashcode_t h, hashcode_t m_old, hashcode_t m) const
Process mask race, check for rehashing collision.
void insert(I first, I last)
Insert range [first, last)
concurrent_hash_map(I first, I last, const allocator_type &a=allocator_type())
Construction with copying iteration range and given allocator instance.
const_iterator begin() const
bool is_writer()
check whether bucket is locked for write
static node * do_not_allocate_node(node_allocator_type &, const Key &, const T *)
ptrdiff_t difference_type
const bucket * my_bucket
Pointer to bucket.
internal::hash_map_range< iterator > range_type
hash_map_iterator()
Construct undefined iterator.
const Container * my_map
concurrent_hash_map over which we are iterating.
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
bool operator==(const cache_aligned_allocator< T > &, const cache_aligned_allocator< U > &)
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
static segment_index_t segment_base(segment_index_t k)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
const typedef value_type & const_reference
void move(tbb_thread &t1, tbb_thread &t2)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
static hash_map_node_base *const rehash_req
Incompleteness flag value.
void __TBB_store_with_release(volatile T &location, V value)
const Iterator & begin() const
Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5.
void internal_swap(hash_map_base &table)
Swap hash_map_bases.
friend bool is_write_access_needed(accessor const &)
const typedef value_type * const_pointer
Iterator::reference reference
void internal_move_assign(concurrent_hash_map &&other, tbb::internal::traits_true_type)
hash_map_base::node_base node_base
node_scoped_guard(node *n, node_allocator_type &alloc)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
segment_ptr_t * my_segment_ptr
#define __TBB_USE_OPTIONAL_RTTI
Class that implements exponential backoff.
bool is_divisible() const
True if range can be partitioned into two subranges.
friend bool is_write_access_needed(accessor_not_used const &)
hash_map_base::bucket bucket
hash_map_base::size_type size_type
call_clear_on_leave(concurrent_hash_map *a_ch_map)
hash_map_iterator(const hash_map_iterator< Container, typename Container::value_type > &other)
concurrent_hash_map(size_type n, const allocator_type &a=allocator_type())
Construct empty table with n preallocated buckets. This number serves also as initial concurrency lev...
size_type bucket_count() const
Returns the current number of buckets.
bool generic_move_insert(Accessor &&result, value_type &&value)
Base class for types that should not be copied or assigned.
size_t hashcode_t
Type of a hash code.
concurrent_hash_map(I first, I last, const HashCompare &compare, const allocator_type &a=allocator_type())
Iterator::value_type value_type
friend class const_accessor
tbb::aligned_space< value_type > my_value
tbb::internal::allocator_rebind< Allocator, node >::type node_allocator_type
static bool is_valid(void *ptr)
void const char const char int ITT_FORMAT __itt_group_sync s
concurrent_hash_map(const concurrent_hash_map &table, const allocator_type &a)
static segment_index_t segment_index_of(size_type index)
bool erase(const Key &key)
Erase item.
hash_compare that is default argument for concurrent_hash_map
void itt_hide_store_word(T &dst, T src)
tbb::internal::false_type propagate_on_container_move_assignment
intptr_t __TBB_Log2(uintptr_t x)
concurrent_hash_map(std::initializer_list< value_type > il, const HashCompare &compare, const allocator_type &a=allocator_type())
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
const Iterator & end() const
size_t my_index
Index in hash table for current item.
bool exclude(const_accessor &item_accessor)
delete item by accessor
size_type count(const Key &key) const
Return count of items (0 or 1)
node * my_node
Pointer to node that has current item.
~const_accessor()
Destroy result after releasing the underlying reference.
concurrent_hash_map(const allocator_type &a=allocator_type())
Construct empty table.
void acquire(concurrent_hash_map *base, const hashcode_t h, bool writer=false)
find a bucket by masked hashcode, optionally rehash, and acquire the lock
Fast, unfair, spinning reader-writer lock with backoff and writer-preference.
friend const_accessor * accessor_location(accessor_not_used const &)
static node * create_node(node_allocator_type &allocator, __TBB_FORWARDING_REF(Arg1) arg1, __TBB_FORWARDING_REF(Arg2) arg2)
friend class hash_map_iterator
friend bool is_write_access_needed(const_accessor const &)
friend const_accessor * accessor_location(const_accessor &a)
hash_map_range(const map_type &map, size_type grainsize_=1)
Init range with container and grainsize specified.
size_t size_type
Size type.
atomic< T > & as_atomic(T &t)
Range class used with concurrent_hash_map.
std::pair< iterator, iterator > equal_range(const Key &key)
void delete_node(node_base *n)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
Iterator::map_type map_type
range_type range(size_type grainsize=1)
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
void swap(concurrent_hash_map &table)
swap two instances. Iterators are invalidated
HashCompare my_hash_compare
const_pointer internal_fast_find(const Key &key) const
Fast find when no concurrent erasure is used. For internal use inside TBB only!
static void add_to_bucket(bucket *b, node_base *n)
Add node.
tbb::internal::allocator_traits< node_allocator_type > node_allocator_traits
Identifiers declared inside namespace internal should never be used directly by client code.
bool emplace(const_accessor &result, Args &&... args)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
void const char const char int ITT_FORMAT __itt_group_sync p
T itt_hide_load_word(const T &src)
mutex_t::scoped_lock scoped_t
Scoped lock type for mutex.
void set_midpoint() const
Set my_midpoint to point approximately half way between my_begin and my_end.
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
size_type grainsize() const
The grain size for this range.
Unordered map from Key to T.
bool generic_emplace(Accessor &&result, Args &&... args)
void swap(concurrent_hash_map< Key, T, HashCompare, A > &a, concurrent_hash_map< Key, T, HashCompare, A > &b)
hash_map_node_base node_base
Node base type.
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
void delete_segment(segment_index_t s, const Allocator &allocator)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
concurrent_hash_map::value_type value_type
Type of value.
bool empty() const
True if size()==0.
void release()
Set to null.
size_t segment_index_t
Segment index type.
allocator_traits< Alloc >::template rebind_alloc< T >::other type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
hash_map_iterator & operator=(const hash_map_iterator< Container, typename Container::value_type > &other)
Allows write access to elements and combines data access, locking, and garbage collection.
hash_map_node_base * next
Next node in chain.
std::size_t size_type
Type for size of a range.
~enable_segment_failsafe()
Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.