1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
 26 #define SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
 27 
 28 #include "memory/allocation.hpp"
 29 #include "runtime/mutex.hpp"
 30 #include "utilities/globalCounter.hpp"
 31 #include "utilities/globalDefinitions.hpp"
 32 #include "utilities/growableArray.hpp"
 33 #include "utilities/tableStatistics.hpp"
 34 
 35 // A mostly concurrent-hash-table where the read-side is wait-free, inserts are
 36 // CAS and deletes mutual exclude each other on per bucket-basis. VALUE is the
 37 // type kept inside each Node and CONFIG contains hash and allocation methods.
 38 // A CALLBACK_FUNC and LOOKUP_FUNC needs to be provided for get and insert.
 39 
 40 class Thread;
 41 class Mutex;
 42 
 43 template <typename CONFIG, MEMFLAGS F>
 44 class ConcurrentHashTable : public CHeapObj<F> {
 45   typedef typename CONFIG::Value VALUE;
 46  private:
 47   // _stats_rate is null if statistics are not enabled.
 48   TableRateStatistics* _stats_rate;
 49   inline void safe_stats_add() {
 50     if (_stats_rate != nullptr) {
 51       _stats_rate->add();
 52     }
 53   }
 54   inline void safe_stats_remove() {
 55     if (_stats_rate != nullptr) {
 56       _stats_rate->remove();
 57     }
 58   }
 59   // Calculate statistics. Item sizes are calculated with VALUE_SIZE_FUNC.
 60   template <typename VALUE_SIZE_FUNC>
 61   TableStatistics statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f);
 62 
 63   // This is the internal node structure.
 64   // Only constructed with placement new from memory allocated with MEMFLAGS of
 65   // the InternalTable or user-defined memory.
 66   class Node {
 67    private:
 68     Node * volatile _next;
 69     VALUE _value;
 70    public:
 71     Node(const VALUE& value, Node* next = nullptr)
 72       : _next(next), _value(value) {
 73       assert((((uintptr_t)this) & ((uintptr_t)0x3)) == 0,
 74              "Must 16 bit aligned.");
 75     }
 76 
 77     Node* next() const;
 78     void set_next(Node* node)         { _next = node; }
 79     Node* const volatile * next_ptr() { return &_next; }
 80 
 81     VALUE* value()                    { return &_value; }
 82 
 83     // Creates a node.
 84     static Node* create_node(void* context, const VALUE& value, Node* next = nullptr) {
 85       return new (CONFIG::allocate_node(context, sizeof(Node), value)) Node(value, next);
 86     }
 87     // Destroys a node.
 88     static void destroy_node(void* context, Node* node) {
 89       CONFIG::free_node(context, (void*)node, node->_value);
 90     }
 91 
 92     void print_on(outputStream* st) const {};
 93     void print_value_on(outputStream* st) const {};
 94 
 95     static bool is_dynamic_sized_value_compatible() {
 96       // To support dynamically sized Value types, where part of the payload is
 97       // allocated beyond the end of the object, it must be that the _value
 98       // field ends where the Node object ends. (No end padding).
 99       return offset_of(Node, _value) + sizeof(_value) == sizeof(Node);
100     }
101   };
102 
103   // Only constructed with placement new from an array allocated with MEMFLAGS
104   // of InternalTable.
105   class Bucket {
106    private:
107 
108     // Embedded state in two low bits in first pointer is a spinlock with 3
109     // states, unlocked, locked, redirect. You must never busy-spin on trylock()
110     // or call lock() without _resize_lock, that would deadlock. Redirect can
111     // only be installed by owner and is the final state of a bucket.
112     // The only two valid flows are:
113     // unlocked -> locked -> unlocked
114     // unlocked -> locked -> redirect
115     // Locked state only applies to an updater.
116     // Reader only check for redirect.
117     Node * volatile _first;
118 
119     static const uintptr_t STATE_LOCK_BIT     = 0x1;
120     static const uintptr_t STATE_REDIRECT_BIT = 0x2;
121     static const uintptr_t STATE_MASK         = 0x3;
122 
123     // Get the first pointer unmasked.
124     Node* first_raw() const;
125 
126     // Methods to manipulate the embedded.
127     static bool is_state(Node* node, uintptr_t bits) {
128       return (bits & (uintptr_t)node) == bits;
129     }
130 
131     static Node* set_state(Node* n, uintptr_t bits) {
132       return (Node*)(bits | (uintptr_t)n);
133     }
134 
135     static uintptr_t get_state(Node* node) {
136       return (((uintptr_t)node) & STATE_MASK);
137     }
138 
139     static Node* clear_state(Node* node) {
140       return (Node*)(((uintptr_t)node) & (~(STATE_MASK)));
141     }
142 
143     static Node* clear_set_state(Node* node, Node* state) {
144       return (Node*)(((uintptr_t)clear_state(node)) ^ get_state(state));
145     }
146 
147    public:
148     // A bucket is only one pointer with the embedded state.
149     Bucket() : _first(nullptr) {};
150 
151     // Get the first pointer unmasked.
152     Node* first() const;
153 
154     // Get a pointer to the const first pointer. Do not deference this
155     // pointer, the pointer pointed to _may_ contain an embedded state. Such
156     // pointer should only be used as input to release_assign_node_ptr.
157     Node* const volatile * first_ptr() { return &_first; }
158 
159     // This is the only place where a pointer to a Node pointer that potentially
160     // is _first should be changed. Otherwise we destroy the embedded state. We
161     // only give out pointer to const Node pointer to avoid accidental
162     // assignment, thus here we must cast const part away. Method is not static
163     // due to an assert.
164     void release_assign_node_ptr(Node* const volatile * dst, Node* node) const;
165 
166     // This method assigns this buckets last Node next ptr to input Node.
167     void release_assign_last_node_next(Node* node);
168 
169     // Setting the first pointer must be done with CAS.
170     bool cas_first(Node *node, Node* expect);
171 
172     // Returns true if this bucket is redirecting to a new table.
173     // Redirect is a terminal state and will never change.
174     bool have_redirect() const;
175 
176     // Return true if this bucket is locked for updates.
177     bool is_locked() const;
178 
179     // Return true if this bucket was locked.
180     bool trylock();
181 
182     // The bucket might be invalid, due to a concurrent resize. The lock()
183     // method do no respect that and can deadlock if caller do not hold
184     // _resize_lock.
185     void lock();
186 
187     // Unlocks this bucket.
188     void unlock();
189 
190     // Installs redirect in this bucket.
191     // Prior to doing so you must have successfully locked this bucket.
192     void redirect();
193   };
194 
195   // The backing storage table holding the buckets and it's size and mask-bits.
196   // Table is always a power of two for two reasons:
197   // - Re-size can only change the size into half or double
198   //   (any pow 2 would also be possible).
199   // - Use masking of hash for bucket index.
200   class InternalTable : public CHeapObj<F> {
201    private:
202     Bucket* _buckets;        // Bucket array.
203    public:
204     const size_t _log2_size; // Size in log2.
205     const size_t _size;      // Size in log10.
206 
207     // The mask used on hash for selecting bucket.
208     // The masked value is guaranteed be to inside the buckets array.
209     const size_t _hash_mask;
210 
211     // Create a backing table
212     InternalTable(size_t log2_size);
213     ~InternalTable();
214 
215     Bucket* get_buckets() { return _buckets; }
216     Bucket* get_bucket(size_t idx) { return &_buckets[idx]; }
217 
218     size_t get_mem_size() {
219       return sizeof(*this) + _size * sizeof(Bucket);
220     }
221   };
222 
223   // For materializing a supplied value.
224   class LazyValueRetrieve {
225    private:
226     const VALUE& _val;
227    public:
228     LazyValueRetrieve(const VALUE& val) : _val(val) {}
229     const VALUE& operator()() { return _val; }
230   };
231 
232   void* _context;
233 
234   InternalTable* _table;      // Active table.
235   InternalTable* _new_table;  // Table we are resizing to.
236 
237   const size_t _log2_size_limit;  // The biggest size.
238   const size_t _log2_start_size;  // Start size.
239   const size_t _grow_hint;        // Number of linked items
240 
241   volatile bool _size_limit_reached;
242 
243   // We serialize resizers and other bulk operations which do not support
244   // concurrent resize with this lock.
245   Mutex* _resize_lock;
246   // Since we need to drop mutex for safepoints, but stop other threads from
247   // taking the mutex after a safepoint this bool is the actual state. After
248   // acquiring the mutex you must check if this is already locked. If so you
249   // must drop the mutex until the real lock holder grabs the mutex.
250   volatile Thread* _resize_lock_owner;
251 
252   // Return true if lock mutex/state succeeded.
253   bool try_resize_lock(Thread* locker);
254   // Returns when both mutex and state are proper locked.
255   void lock_resize_lock(Thread* locker);
256   // Unlocks mutex and state.
257   void unlock_resize_lock(Thread* locker);
258 
259   // This method sets the _invisible_epoch and do a write_synchronize.
260   // Subsequent calls check the state of _invisible_epoch and determine if the
261   // write_synchronize can be avoided. If not, it sets the _invisible_epoch
262   // again and do a write_synchronize.
263   void write_synchonize_on_visible_epoch(Thread* thread);
264   // To be-able to avoid write_synchronize in resize and other bulk operation,
265   // this field keep tracks if a version of the hash-table was ever been seen.
266   // We the working thread pointer as tag for debugging. The _invisible_epoch
267   // can only be used by the owner of _resize_lock.
268   volatile Thread* _invisible_epoch;
269 
270   // Scoped critical section, which also handles the invisible epochs.
271   // An invisible epoch/version do not need a write_synchronize().
272   class ScopedCS: public StackObj {
273    protected:
274     Thread* _thread;
275     ConcurrentHashTable<CONFIG, F>* _cht;
276     GlobalCounter::CSContext _cs_context;
277    public:
278     ScopedCS(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht);
279     ~ScopedCS();
280   };
281 
282 
283   // When doing deletes, we need to store the pointers until the next
284   // visible epoch.  In the normal case (good hash function and
285   // reasonable sizing), we can save these pointers on the stack
286   // (there should not be more than a few entries per bucket). But if
287   // the hash function is bad and/or the sizing of the table is bad,
288   // we can not use a fixed size stack buffer alone. We will use a
289   // heap buffer as fall-back when the stack is not enough, and then
290   // we have to pay for a dynamic allocation.  `StackBufferSize` tells
291   // the size of optimistic stack buffer that will almost always be
292   // used.
293   static const size_t StackBufferSize = 256;
294 
295   // Simple getters and setters for the internal table.
296   InternalTable* get_table() const;
297   InternalTable* get_new_table() const;
298   InternalTable* set_table_from_new();
299 
300   // Destroys all nodes.
301   void free_nodes();
302 
303   // Mask away high bits of hash.
304   static size_t bucket_idx_hash(InternalTable* table, const uintx hash) {
305     return ((size_t)hash) & table->_hash_mask;
306   }
307 
308   // Returns bucket for hash for that internal table.
309   Bucket* get_bucket_in(InternalTable* table, const uintx hash) const {
310     size_t bucket_index = bucket_idx_hash(table, hash);
311     return table->get_bucket(bucket_index);
312   }
313 
314   // Return correct bucket for reading and handles resizing.
315   Bucket* get_bucket(const uintx hash) const;
316 
317   // Return correct bucket for updates and handles resizing.
318   Bucket* get_bucket_locked(Thread* thread, const uintx hash);
319 
320   // Finds a node.
321   template <typename LOOKUP_FUNC>
322   Node* get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f,
323                  bool* have_dead, size_t* loops = nullptr) const;
324 
325   // Method for shrinking.
326   bool internal_shrink_prolog(Thread* thread, size_t log2_size);
327   void internal_shrink_epilog(Thread* thread);
328   void internal_shrink_range(Thread* thread, size_t start, size_t stop);
329   bool internal_shrink(Thread* thread, size_t size_limit_log2);
330   void internal_reset(size_t log2_size);
331 
332   // Methods for growing.
333   bool unzip_bucket(Thread* thread, InternalTable* old_table,
334                     InternalTable* new_table, size_t even_index,
335                     size_t odd_index);
336   bool internal_grow_prolog(Thread* thread, size_t log2_size);
337   void internal_grow_epilog(Thread* thread);
338   void internal_grow_range(Thread* thread, size_t start, size_t stop);
339   bool internal_grow(Thread* thread, size_t log2_size);
340 
341   // Get a value.
342   template <typename LOOKUP_FUNC>
343   VALUE* internal_get(Thread* thread, LOOKUP_FUNC& lookup_f,
344                       bool* grow_hint = nullptr);
345 
346   // Insert and get current value.
347   template <typename LOOKUP_FUNC, typename FOUND_FUNC>
348   bool internal_insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
349                            FOUND_FUNC& foundf, bool* grow_hint, bool* clean_hint);
350 
351   // Returns true if an item matching LOOKUP_FUNC is removed.
352   // Calls DELETE_FUNC before destroying the node.
353   template <typename LOOKUP_FUNC, typename DELETE_FUNC>
354   bool internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f,
355                        DELETE_FUNC& delete_f);
356 
357   // Visits nodes with FUNC.
358   template <typename FUNC>
359   static bool visit_nodes(Bucket* bucket, FUNC& visitor_f);
360 
361   // During shrink/grow we cannot guarantee that we only visit nodes once, with
362   // current algorithm. To keep it simple caller will have locked
363   // _resize_lock.
364   template <typename FUNC>
365   void do_scan_locked(Thread* thread, FUNC& scan_f);
366 
367   // Check for dead items in a bucket.
368   template <typename EVALUATE_FUNC>
369   size_t delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f,
370                             size_t num_del, Node** ndel, GrowableArrayCHeap<Node*, F>& ndel_heap);
371 
372   // Check for dead items in this table. During shrink/grow we cannot guarantee
373   // that we only visit nodes once. To keep it simple caller will have locked
374   // _resize_lock.
375   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
376   void do_bulk_delete_locked(Thread* thread, EVALUATE_FUNC& eval_f
377                              , DELETE_FUNC& del_f) {
378     do_bulk_delete_locked_for(thread, 0, _table->_size, eval_f, del_f);
379   }
380 
381   // To have prefetching for a VALUE that is pointer during
382   // do_bulk_delete_locked, we have this helper classes. One for non-pointer
383   // case without prefect and one for pointer with prefect.
384   template <bool b, typename EVALUATE_FUNC>
385   struct HaveDeletables {
386     static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
387                                Bucket* prefetch_bucket);
388   };
389   template<typename EVALUATE_FUNC>
390   struct HaveDeletables<true, EVALUATE_FUNC> {
391     static bool have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f,
392                                Bucket* prefetch_bucket);
393   };
394 
395   // Check for dead items in this table with range. During shrink/grow we cannot
396   // guarantee that we only visit nodes once. To keep it simple caller will
397   // have locked _resize_lock.
398   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
399   void do_bulk_delete_locked_for(Thread* thread, size_t start_idx,
400                                  size_t stop_idx, EVALUATE_FUNC& eval_f,
401                                  DELETE_FUNC& del_f, bool is_mt = false);
402 
403   // Method to delete one items.
404   template <typename LOOKUP_FUNC>
405   void delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f);
406 
407  public:
408   // Default sizes
409   static const size_t DEFAULT_MAX_SIZE_LOG2 = 21;
410   static const size_t DEFAULT_START_SIZE_LOG2 = 13;
411   static const size_t DEFAULT_GROW_HINT = 4; // Chain length
412   static const bool DEFAULT_ENABLE_STATISTICS = false;
413   static const Mutex::Rank DEFAULT_MUTEX_RANK = static_cast<Mutex::Rank>(static_cast<int>(Mutex::nosafepoint) - 2);
414   ConcurrentHashTable(size_t log2size = DEFAULT_START_SIZE_LOG2,
415                       size_t log2size_limit = DEFAULT_MAX_SIZE_LOG2,
416                       size_t grow_hint = DEFAULT_GROW_HINT,
417                       bool enable_statistics = DEFAULT_ENABLE_STATISTICS,
418                       Mutex::Rank rank = DEFAULT_MUTEX_RANK,
419                       void* context = nullptr);
420 
421   explicit ConcurrentHashTable(Mutex::Rank rank, void* context, size_t log2size = DEFAULT_START_SIZE_LOG2, bool enable_statistics = DEFAULT_ENABLE_STATISTICS) :
422     ConcurrentHashTable(log2size, DEFAULT_MAX_SIZE_LOG2, DEFAULT_GROW_HINT, enable_statistics, rank, context) {}
423 
424   ~ConcurrentHashTable();
425 
426   size_t get_mem_size(Thread* thread);
427 
428   size_t get_size_log2(Thread* thread);
429   static size_t get_node_size() { return sizeof(Node); }
430   static size_t get_dynamic_node_size(size_t value_size);
431   bool is_max_size_reached() { return _size_limit_reached; }
432 
433   // This means no paused bucket resize operation is going to resume
434   // on this table.
435   bool is_safepoint_safe() { return _resize_lock_owner == nullptr; }
436 
437   // Re-size operations.
438   bool shrink(Thread* thread, size_t size_limit_log2 = 0);
439   bool grow(Thread* thread, size_t size_limit_log2 = 0);
440   // Unsafe reset and resize the table. This method assumes that we
441   // want to clear and maybe resize the internal table without the
442   // overhead of clearing individual items in the table.
443   void unsafe_reset(size_t size_log2 = 0);
444 
445   // All callbacks for get are under critical sections. Other callbacks may be
446   // under critical section or may have locked parts of table. Calling any
447   // methods on the table during a callback is not supported.Only MultiGetHandle
448   // supports multiple gets.
449 
450   // Get methods return true on found item with LOOKUP_FUNC and FOUND_FUNC is
451   // called.
452   template <typename LOOKUP_FUNC, typename FOUND_FUNC>
453   bool get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& foundf,
454            bool* grow_hint = nullptr);
455 
456   // Returns true true if the item was inserted, duplicates are found with
457   // LOOKUP_FUNC.
458   template <typename LOOKUP_FUNC>
459   bool insert(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value,
460               bool* grow_hint = nullptr, bool* clean_hint = nullptr) {
461     struct NOP {
462         void operator()(...) const {}
463     } nop;
464     return internal_insert_get(thread, lookup_f, value, nop, grow_hint, clean_hint);
465   }
466 
467   // Returns true if the item was inserted, duplicates are found with
468   // LOOKUP_FUNC then FOUND_FUNC is called.
469   template <typename LOOKUP_FUNC, typename FOUND_FUNC>
470   bool insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, VALUE& value, FOUND_FUNC& foundf,
471                   bool* grow_hint = nullptr, bool* clean_hint = nullptr) {
472     return internal_insert_get(thread, lookup_f, value, foundf, grow_hint, clean_hint);
473   }
474 
475   // This does a fast unsafe insert and can thus only be used when there is no
476   // risk for a duplicates and no other threads uses this table.
477   bool unsafe_insert(const VALUE& value);
478 
479   // Returns true if items was deleted matching LOOKUP_FUNC and
480   // prior to destruction DELETE_FUNC is called.
481   template <typename LOOKUP_FUNC, typename DELETE_FUNC>
482   bool remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& del_f) {
483     return internal_remove(thread, lookup_f, del_f);
484   }
485 
486   // Same without DELETE_FUNC.
487   template <typename LOOKUP_FUNC>
488   bool remove(Thread* thread, LOOKUP_FUNC& lookup_f) {
489     struct {
490       void operator()(VALUE*) {}
491     } ignore_del_f;
492     return internal_remove(thread, lookup_f, ignore_del_f);
493   }
494 
495   // Visit all items with SCAN_FUNC if no concurrent resize. Takes the resize
496   // lock to avoid concurrent resizes. Else returns false.
497   template <typename SCAN_FUNC>
498   bool try_scan(Thread* thread, SCAN_FUNC& scan_f);
499 
500   // Visit all items with SCAN_FUNC when the resize lock is obtained.
501   template <typename SCAN_FUNC>
502   void do_scan(Thread* thread, SCAN_FUNC& scan_f);
503 
504   // Visits nodes for buckets in range [start_idx, stop_id) with FUNC.
505   template <typename FUNC>
506   static bool do_scan_for_range(FUNC& scan_f, size_t start_idx, size_t stop_idx, InternalTable *table);
507 
508   // Visit all items with SCAN_FUNC without any protection.
509   // It will assume there is no other thread accessing this
510   // table during the safepoint. Must be called with VM thread.
511   template <typename SCAN_FUNC>
512   void do_safepoint_scan(SCAN_FUNC& scan_f);
513 
514   // Destroying items matching EVALUATE_FUNC, before destroying items
515   // DELETE_FUNC is called, if resize lock is obtained. Else returns false.
516   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
517   bool try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f,
518                        DELETE_FUNC& del_f);
519 
520   // Destroying items matching EVALUATE_FUNC, before destroying items
521   // DELETE_FUNC is called, when the resize lock is successfully obtained.
522   template <typename EVALUATE_FUNC, typename DELETE_FUNC>
523   void bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f);
524 
525   // Gets statistics if available, if not return old one. Item sizes are calculated with
526   // VALUE_SIZE_FUNC.
527   template <typename VALUE_SIZE_FUNC>
528   TableStatistics statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old);
529 
530   // Writes statistics to the outputStream. Item sizes are calculated with
531   // VALUE_SIZE_FUNC.
532   template <typename VALUE_SIZE_FUNC>
533   void statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st,
534                      const char* table_name);
535 
536   // Moves all nodes from this table to to_cht with new hash code.
537   // Must be done at a safepoint.
538   void rehash_nodes_to(Thread* thread, ConcurrentHashTable<CONFIG, F>* to_cht);
539 
540   // Scoped multi getter.
541   class MultiGetHandle : private ScopedCS {
542    public:
543     MultiGetHandle(Thread* thread, ConcurrentHashTable<CONFIG, F>* cht)
544       : ScopedCS(thread, cht) {}
545     // In the MultiGetHandle scope you can lookup items matching LOOKUP_FUNC.
546     // The VALUEs are safe as long as you never save the VALUEs outside the
547     // scope, e.g. after ~MultiGetHandle().
548     template <typename LOOKUP_FUNC>
549     VALUE* get(LOOKUP_FUNC& lookup_f, bool* grow_hint = nullptr);
550   };
551 
552  private:
553   class BucketsOperation;
554 
555  public:
556   class BulkDeleteTask;
557   class GrowTask;
558   class ScanTask;
559 };
560 
561 #endif // SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP