1 /*
  2  * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "logging/log.hpp"
 26 #include "runtime/interfaceSupport.inline.hpp"
 27 #include "runtime/javaThread.hpp"
 28 #include "runtime/mutexLocker.hpp"
 29 #include "runtime/objectMonitorTable.hpp"
 30 #include "runtime/safepoint.hpp"
 31 #include "runtime/thread.hpp"
 32 #include "runtime/timerTrace.hpp"
 33 #include "runtime/trimNativeHeap.hpp"
 34 #include "utilities/debug.hpp"
 35 #include "utilities/globalDefinitions.hpp"
 36 
 37 // -----------------------------------------------------------------------------
 38 // Theory of operations -- Object Monitor Table:
 39 //
 40 // The OMT (Object Monitor Table) is a concurrent hash table specifically
 41 // designed so that it (in the normal case) can be searched from C2 generated
 42 // code.
 43 //
 44 // In its simplest form it consists of:
 45 //  1. An array of pointers.
 46 //  2. The size (capacity) of the array, which is always a power of two.
 47 //
 48 // When you want to find a monitor associated with an object, you extract the
 49 // hash value of the object. Then calculate an index by taking the hash value
 50 // and bit-wise AND it with the capacity mask (i.e., size-1) of the OMT. Now
 51 // use that index into the OMT's array of pointers. If the pointer is non
 52 // null, check if it's a monitor pointer that is associated with the object.
 53 // If so you're done. If the pointer is non null, but associated with another
 54 // object, you start looking at (index+1), (index+2) and so on, until you
 55 // either find the correct monitor, or a null pointer. Finding a null pointer
 56 // means that the monitor is simply not in the OMT.
 57 //
 58 // If the size of the pointer array is significantly larger than the number of
 59 // pointers in it, the chance of finding the monitor at the hash index
 60 // (without any further linear searching) is quite high. It is also straight
 61 // forward to generate C2 code for this, which for the fast path doesn't
 62 // contain any branching at all. See: C2_MacroAssembler::fast_lock().
 63 //
 64 // When the number of monitors (pointers in the array) reaches above the
 65 // allowed limit (defined by the GROW_LOAD_FACTOR symbol) we need to grow the
 66 // table.
 67 //
 68 // A simple description of how growing the OMT is done, is to say that we
 69 // allocate a new table (twice as large as the old one), and then copy all the
 70 // old monitor pointers from the old table to the new.
 71 //
 72 // But since the OMT is a concurrent hash table and things need to work for
 73 // other clients of the OMT while we grow it, it gets a bit more
 74 // complicated.
 75 //
 76 // The new and (potentially several) old table(s) may exist at the same
 77 // time. The newest is always called the "current", and the older ones are
 78 // singly linked using a "prev" pointer.
 79 //
 80 // As soon as we have allocated and linked in the new "current" OMT, all new
 81 // monitor pointers will be added to the new table. Effectively making the
 82 // atomic switch from "old current" to "new current" a linearization point.
 83 //
 84 // After that we start to go through all the indexes in the old table. If the
 85 // index is empty (the pointer is null) we put a "tombstone" into that index,
 86 // which will prevent any future concurrent insert from ending up in that
 87 // index.
 88 //
 89 // If the index contains a monitor pointer, we insert that monitor pointer
 90 // into the OMT which can be considered as one generation newer. If the index
 91 // contains a "removed" pointer, we just ignore it.
 92 //
 93 // We use special pointer values for "tombstone" and "removed". Any pointer
 94 // that is not null, not a tombstone and not removed, is considered to be a
 95 // pointer to a monitor.
 96 //
 97 // When all the monitor pointers from an old OMT have been transferred to the
 98 // new OMT, the old table is unlinked.
 99 //
100 // This copying from an old OMT to one generation newer OMT, will continue
101 // until all the monitor pointers from old OMTs have been transferred to the
102 // newest "current" OMT.
103 //
104 // The memory for old, unlinked OMTs will be freed after a thread-local
105 // handshake with all Java threads.
106 //
107 // Searching the OMT for a monitor pointer while there are several generations
108 // of the OMT, will start from the oldest OMT.
109 //
110 // A note about the GROW_LOAD_FACTOR: In order to guarantee that the add and
111 // search algorithms can't loop forever, we must make sure that there is at
112 // least one null pointer in the array to stop them from dead looping.
113 // Furthermore, when we grow the OMT, we must make sure that the new "current"
114 // can accommodate all the monitors from all older OMTs, while still being so
115 // sparsely populated that the C2 generated code will likely find what it's
116 // searching for at the hash index, without needing further linear searching.
117 // The grow load factor is set to 12.5%, which satisfies the above
118 // requirements. Don't change it for fun, it might backfire.
119 // -----------------------------------------------------------------------------
120 
121 Atomic<ObjectMonitorTable::Table*> ObjectMonitorTable::_curr;
122 
123 class ObjectMonitorTable::Table : public CHeapObj<mtObjectMonitor> {
124   friend class ObjectMonitorTable;
125 
126   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
127   const size_t _capacity_mask;       // One less than its power-of-two capacity
128   Atomic<Table*> _prev;              // Set while growing/rebuilding
129   Atomic<Entry>* _buckets;           // The payload
130   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(_capacity_mask) + sizeof(_prev) + sizeof(_buckets));
131   Atomic<size_t> _items_count;
132   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(_items_count));
133 
134   static Entry as_entry(ObjectMonitor* monitor) {
135     Entry entry = static_cast<Entry>((uintptr_t)monitor);
136     assert(entry >= Entry::below_is_special, "Must be! (entry: " PTR_FORMAT ")", intptr_t(entry));
137     return entry;
138   }
139 
140   static ObjectMonitor* as_monitor(Entry entry) {
141     assert(entry >= Entry::below_is_special, "Must be! (entry: " PTR_FORMAT ")", intptr_t(entry));
142     return reinterpret_cast<ObjectMonitor*>(entry);
143   }
144 
145   static Entry empty() {
146     return Entry::empty;
147   }
148 
149   static Entry tombstone() {
150     return Entry::tombstone;
151   }
152 
153   static Entry removed() {
154     return Entry::removed;
155   }
156 
157   // Make sure we leave space for previous versions to relocate too.
158   bool try_inc_items_count() {
159     for (;;) {
160       size_t population = _items_count.load_relaxed();
161       if (should_grow(population)) {
162         return false;
163       }
164       if (_items_count.compare_set(population, population + 1, memory_order_relaxed)) {
165         return true;
166       }
167     }
168   }
169 
170   double get_load_factor(size_t count) {
171     return (double)count / (double)capacity();
172   }
173 
174   void inc_items_count() {
175     _items_count.add_then_fetch(1u, memory_order_relaxed);
176   }
177 
178   void dec_items_count() {
179     _items_count.sub_then_fetch(1u, memory_order_relaxed);
180   }
181 
182 public:
183   Table(size_t capacity, Table* prev)
184     : _capacity_mask(capacity - 1),
185       _prev(prev),
186       _buckets(NEW_C_HEAP_ARRAY(Atomic<Entry>, capacity, mtObjectMonitor)),
187       _items_count(0)
188   {
189     for (size_t i = 0; i < capacity; ++i) {
190       ::new (_buckets + i) Atomic<Entry>(empty());
191     }
192   }
193 
194   ~Table() {
195     FREE_C_HEAP_ARRAY(Atomic<Entry>, _buckets);
196   }
197 
198   Table* prev() {
199     return _prev.load_relaxed();
200   }
201 
202   size_t capacity() {
203     return _capacity_mask + 1;
204   }
205 
206   bool should_grow(size_t population) {
207     return get_load_factor(population) > GROW_LOAD_FACTOR;
208   }
209 
210   bool should_grow() {
211     return should_grow(_items_count.load_relaxed());
212   }
213 
214   size_t total_items() {
215     size_t current_items = _items_count.load_relaxed();
216     Table* prev = _prev.load_relaxed();
217     if (prev != nullptr) {
218       return prev->total_items() + current_items;
219     }
220     return current_items;
221   }
222 
223   ObjectMonitor* get(oop obj, intptr_t hash) {
224     // Acquire tombstones and relocations in case prev transitioned to null
225     Table* prev = _prev.load_acquire();
226     if (prev != nullptr) {
227       ObjectMonitor* result = prev->get(obj, hash);
228       if (result != nullptr) {
229         return result;
230       }
231     }
232 
233     const size_t start_index = size_t(hash) & _capacity_mask;
234     size_t index = start_index;
235 
236     for (;;) {
237       Atomic<Entry>& bucket = _buckets[index];
238       Entry entry = bucket.load_acquire();
239 
240       if (entry == tombstone() || entry == empty()) {
241         // Not found
242         break;
243       }
244 
245       if (entry != removed() && as_monitor(entry)->object_peek() == obj) {
246         // Found matching monitor.
247         return as_monitor(entry);
248       }
249 
250       index = (index + 1) & _capacity_mask;
251       assert(index != start_index, "invariant");
252     }
253 
254     // Rebuilding could have started by now, but if a monitor has been inserted
255     // in a newer table, it was inserted after the get linearization point.
256     return nullptr;
257   }
258 
259   ObjectMonitor* prepare_insert(oop obj, intptr_t hash) {
260     // Acquire any tombstones and relocations if prev transitioned to null.
261     Table* prev = _prev.load_acquire();
262     if (prev != nullptr) {
263       ObjectMonitor* result = prev->prepare_insert(obj, hash);
264       if (result != nullptr) {
265         return result;
266       }
267     }
268 
269     const size_t start_index = size_t(hash) & _capacity_mask;
270     size_t index = start_index;
271 
272     for (;;) {
273       Atomic<Entry>& bucket = _buckets[index];
274       Entry entry = bucket.load_acquire();
275 
276       if (entry == empty()) {
277         // Found an empty slot to install the new monitor in.
278         // To avoid concurrent inserts succeeding, place a tombstone here.
279         Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_relaxed);
280         if (result == entry) {
281           // Success! Nobody will try to insert here again, except reinsert from rebuilding.
282           return nullptr;
283         }
284         entry = result;
285       }
286 
287       if (entry == tombstone()) {
288         // Can't insert into this table.
289         return nullptr;
290       }
291 
292       if (entry != removed() && as_monitor(entry)->object_peek() == obj) {
293         // Found matching monitor.
294         return as_monitor(entry);
295       }
296 
297       index = (index + 1) & _capacity_mask;
298       assert(index != start_index, "invariant");
299     }
300   }
301 
302   ObjectMonitor* get_set(oop obj, Entry new_monitor, intptr_t hash) {
303     // Acquire any tombstones and relocations if prev transitioned to null.
304     Table* prev = _prev.load_acquire();
305     if (prev != nullptr) {
306       // Sprinkle tombstones in previous tables to force concurrent inserters
307       // to the latest table. We only really want to try inserting in the
308       // latest table.
309       ObjectMonitor* result = prev->prepare_insert(obj, hash);
310       if (result != nullptr) {
311         return result;
312       }
313     }
314 
315     const size_t start_index = size_t(hash) & _capacity_mask;
316     size_t index = start_index;
317 
318     for (;;) {
319       Atomic<Entry>& bucket = _buckets[index];
320       Entry entry = bucket.load_acquire();
321 
322       if (entry == empty()) {
323         // Empty slot to install the new monitor
324         if (try_inc_items_count()) {
325           // Succeeding in claiming an item.
326           Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel);
327           if (result == entry) {
328             // Success - already incremented.
329             return as_monitor(new_monitor);
330           }
331 
332           // Something else was installed in place.
333           dec_items_count();
334           entry = result;
335         } else {
336           // Out of allowance; leave space for rebuilding to succeed.
337           // To avoid concurrent inserts succeeding, place a tombstone here.
338           Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_acq_rel);
339           if (result == entry) {
340             // Success; nobody will try to insert here again, except reinsert from rebuilding.
341             return nullptr;
342           }
343           entry = result;
344         }
345       }
346 
347       if (entry == tombstone()) {
348         // Can't insert into this table.
349         return nullptr;
350       }
351 
352       if (entry != removed() && as_monitor(entry)->object_peek() == obj) {
353         // Found matching monitor.
354         return as_monitor(entry);
355       }
356 
357       index = (index + 1) & _capacity_mask;
358       assert(index != start_index, "invariant");
359     }
360   }
361 
362   void remove(oop obj, Entry old_monitor, intptr_t hash) {
363     assert(old_monitor >= Entry::below_is_special,
364            "Must be! (old_monitor: " PTR_FORMAT ")", intptr_t(old_monitor));
365 
366     const size_t start_index = size_t(hash) & _capacity_mask;
367     size_t index = start_index;
368 
369     for (;;) {
370       Atomic<Entry>& bucket = _buckets[index];
371       Entry entry = bucket.load_acquire();
372 
373       if (entry == empty()) {
374         // The monitor does not exist in this table.
375         break;
376       }
377 
378       if (entry == tombstone()) {
379         // Stop searching at tombstones.
380         break;
381       }
382 
383       if (entry == old_monitor) {
384         // Found matching entry; remove it
385         bool result = bucket.compare_set(entry, removed(), memory_order_relaxed);
386         assert(result, "should not fail");
387         break;
388       }
389 
390       index = (index + 1) & _capacity_mask;
391       assert(index != start_index, "invariant");
392     }
393 
394     // Old versions are removed after newer versions to ensure that observing
395     // the monitor removed and then doing a subsequent lookup results in there
396     // still not being a monitor, instead of flickering back to being there.
397     // Only the deflation thread rebuilds and unlinks tables, so we do not need
398     // any concurrency safe prev read below.
399     if (_prev.load_relaxed() != nullptr) {
400       _prev.load_relaxed()->remove(obj, old_monitor, hash);
401     }
402   }
403 
404   void reinsert(oop obj, Entry new_monitor) {
405     intptr_t hash = obj->mark().hash();
406 
407     const size_t start_index = size_t(hash) & _capacity_mask;
408     size_t index = start_index;
409 
410     for (;;) {
411       Atomic<Entry>& bucket = _buckets[index];
412       Entry entry = bucket.load_acquire();
413 
414       if (entry == empty()) {
415         // Empty slot to install the new monitor.
416         Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel);
417         if (result == entry) {
418           // Success - unconditionally increment.
419           inc_items_count();
420           return;
421         }
422 
423         // Another monitor was installed.
424         entry = result;
425       }
426 
427       if (entry == tombstone()) {
428         // A concurrent inserter did not get enough allowance in the table.
429         // But reinsert always succeeds - we will take the spot.
430         Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel);
431         if (result == entry) {
432           // Success - unconditionally increment.
433           inc_items_count();
434           return;
435         }
436 
437         // Another monitor was installed.
438         entry = result;
439       }
440 
441       if (entry == removed()) {
442         // A removed entry can be flipped back with reinsertion.
443         Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_release);
444         if (result == entry) {
445           // Success - but don't increment; the initial entry did that for us.
446           return;
447         }
448 
449         // Another monitor was installed.
450         entry = result;
451       }
452 
453       assert(entry != empty(), "invariant");
454       assert(entry != tombstone(), "invariant");
455       assert(entry != removed(), "invariant");
456       assert(as_monitor(entry)->object_peek() != obj, "invariant");
457       index = (index + 1) & _capacity_mask;
458       assert(index != start_index, "invariant");
459     }
460   }
461 
462   void rebuild() {
463     Table* prev = _prev.load_relaxed();
464     if (prev == nullptr) {
465       // Base case for recursion - no previous version.
466       return;
467     }
468 
469     // Finish rebuilding up to prev as target so we can use prev as source.
470     prev->rebuild();
471 
472     JavaThread* current = JavaThread::current();
473 
474     // Relocate entries from prev.
475     for (size_t index = 0; index <= prev->_capacity_mask; index++) {
476       if ((index & 127) == 0) {
477         // Poll for safepoints to improve time to safepoint
478         ThreadBlockInVM tbivm(current);
479       }
480 
481       Atomic<Entry>& bucket = prev->_buckets[index];
482       Entry entry = bucket.load_acquire();
483 
484       if (entry == empty()) {
485         // Empty slot; put a tombstone there.
486         Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_acq_rel);
487         if (result == empty()) {
488           // Success; move to next entry.
489           continue;
490         }
491 
492         // Concurrent insert; relocate.
493         entry = result;
494       }
495 
496       if (entry != tombstone() && entry != removed()) {
497         // A monitor
498         ObjectMonitor* monitor = as_monitor(entry);
499         oop obj = monitor->object_peek();
500         if (obj != nullptr) {
501           // In the current implementation the deflation thread drives
502           // the rebuilding, and it will already have removed any entry
503           // it has deflated. The assert is only here to make sure.
504           assert(!monitor->is_being_async_deflated(), "Should be");
505           // Re-insert still live monitor.
506           reinsert(obj, entry);
507         }
508       }
509     }
510 
511     // Unlink this table, releasing the tombstones and relocations.
512     _prev.release_store(nullptr);
513   }
514 };
515 
516 void ObjectMonitorTable::create() {
517   _curr.store_relaxed(new Table(128, nullptr));
518 }
519 
520 ObjectMonitor* ObjectMonitorTable::monitor_get(oop obj) {
521   const intptr_t hash = obj->mark().hash();
522   Table* curr = _curr.load_acquire();
523   ObjectMonitor* monitor = curr->get(obj, hash);
524   return monitor;
525 }
526 
527 // Returns a new table to try inserting into.
528 ObjectMonitorTable::Table* ObjectMonitorTable::grow_table(Table* curr) {
529   Table* result;
530   Table* new_table = _curr.load_acquire();
531   if (new_table != curr) {
532     // Table changed; no need to try further
533     return new_table;
534   }
535 
536   {
537     // Use MonitorDeflation_lock to only allow one inflating thread to
538     // attempt to allocate the new table.
539     MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
540 
541     new_table = _curr.load_acquire();
542     if (new_table != curr) {
543       // Table changed; no need to try further
544       return new_table;
545     }
546 
547     new_table = new Table(curr->capacity() << 1, curr);
548     result = _curr.compare_exchange(curr, new_table, memory_order_acq_rel);
549     if (result == curr) {
550       log_info(monitorinflation)("Growing object monitor table (capacity: %zu)",
551                                  new_table->capacity());
552       // Since we grew the table (we have a new current) we need to
553       // notify the deflation thread to rebuild the table (to get rid of
554       // old currents).
555       ObjectSynchronizer::set_is_async_deflation_requested(true);
556       ml.notify_all();
557       return new_table;
558     }
559   }
560 
561   // Somebody else started rebuilding; restart in their new table.
562   delete new_table;
563 
564   return result;
565 }
566 
567 ObjectMonitor* ObjectMonitorTable::monitor_put_get(ObjectMonitor* monitor, oop obj) {
568   const intptr_t hash = obj->mark().hash();
569   Table* curr =  _curr.load_acquire();
570 
571   for (;;) {
572     // Curr is the latest table and is reasonably loaded.
573     ObjectMonitor* result = curr->get_set(obj, curr->as_entry(monitor), hash);
574     if (result != nullptr) {
575       return result;
576     }
577     // The table's limit was reached, we need to grow it.
578     curr = grow_table(curr);
579   }
580 }
581 
582 void ObjectMonitorTable::remove_monitor_entry(ObjectMonitor* monitor) {
583   oop obj = monitor->object_peek();
584   if (obj == nullptr) {
585     // Defer removal until subsequent rebuilding.
586     return;
587   }
588   const intptr_t hash = obj->mark().hash();
589   Table* curr =  _curr.load_acquire();
590   curr->remove(obj, curr->as_entry(monitor), hash);
591   assert(monitor_get(obj) != monitor, "should have been removed");
592 }
593 
594 // Before handshake; rebuild and unlink tables.
595 void ObjectMonitorTable::rebuild(GrowableArray<Table*>* delete_list) {
596   Table* new_table;
597   {
598     // Concurrent inserts while in the middle of rebuilding can result
599     // in the population count increasing past the load factor limit.
600     // For this to be okay we need to bound how much it may exceed the
601     // limit. A sequence of tables with doubling capacity may
602     // eventually, after rebuilding, reach the maximum population of
603     // max_population(table_1) + max_population(table_1*2) + ... +
604     // max_population(table_1*2^n).
605     // I.e. max_population(2*table_1 *2^n) - max_population(table_1).
606     // With a 12.5% load factor, the implication is that as long as
607     // rebuilding a table will double its capacity, the maximum load
608     // after rebuilding is less than 25%. However, we can't always
609     // double the size each time we rebuild the table. Instead we
610     // recursively estimate the population count of the chain of
611     // tables (the current, and all the previous currents). If the sum
612     // of the population is less than the growing factor, we do not
613     // need to grow the table. If the new concurrently rebuilding
614     // table is immediately filled up by concurrent inserts, then the
615     // worst case load factor after the rebuild may be twice as large,
616     // which is still guaranteed to be less than a 50% load. If this
617     // happens, it will cause subsequent rebuilds to increase the
618     // table capacity, keeping the worst case less than 50%, until the
619     // load factor eventually becomes less than 12.5% again. So in
620     // some sense this allows us to be fooled once, but not twice. So,
621     // given the growing threshold of 12.5%, it is impossible for the
622     // tables to reach a load factor above 50%. Which is more than
623     // enough to guarantee the function of this concurrent hash table.
624     Table* curr =  _curr.load_acquire();
625     size_t need_to_accomodate = curr->total_items();
626     size_t new_capacity = curr->should_grow(need_to_accomodate)
627       ? curr->capacity() << 1
628       : curr->capacity();
629     new_table = new Table(new_capacity, curr);
630     Table* result = _curr.compare_exchange(curr, new_table, memory_order_acq_rel);
631     if (result != curr) {
632       // Somebody else racingly started rebuilding. Delete the
633       // new_table and treat somebody else's table as the new one.
634       delete new_table;
635       new_table = result;
636     }
637     log_info(monitorinflation)("Rebuilding object monitor table (capacity: %zu)",
638                                new_table->capacity());
639   }
640 
641   for (Table* curr = new_table->prev(); curr != nullptr; curr = curr->prev()) {
642     delete_list->append(curr);
643   }
644 
645   // Rebuild with the new table as target.
646   new_table->rebuild();
647 }
648 
649 // After handshake; destroy old tables
650 void ObjectMonitorTable::destroy(GrowableArray<Table*>* delete_list) {
651   for (ObjectMonitorTable::Table* table: *delete_list) {
652     delete table;
653   }
654 }
655 
656 address ObjectMonitorTable::current_table_address() {
657   return reinterpret_cast<address>(&_curr) + _curr.value_offset_in_bytes();
658 }
659 
660 ByteSize ObjectMonitorTable::table_capacity_mask_offset() {
661   return byte_offset_of(Table, _capacity_mask);
662 }
663 
664 ByteSize ObjectMonitorTable::table_buckets_offset() {
665   // Assumptions made from the emitted code about the layout.
666   STATIC_ASSERT(sizeof(Atomic<Entry>) == sizeof(Entry*));
667   STATIC_ASSERT(Atomic<Entry>::value_offset_in_bytes() == 0);
668 
669   return byte_offset_of(Table, _buckets);
670 }