1 /*
  2  * Copyright (c) 2024, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "logging/log.hpp"
 26 #include "runtime/interfaceSupport.inline.hpp"
 27 #include "runtime/javaThread.hpp"
 28 #include "runtime/mutexLocker.hpp"
 29 #include "runtime/objectMonitor.inline.hpp"
 30 #include "runtime/objectMonitorTable.hpp"
 31 #include "runtime/safepoint.hpp"
 32 #include "runtime/synchronizer.hpp"
 33 #include "runtime/thread.hpp"
 34 #include "runtime/timerTrace.hpp"
 35 #include "runtime/trimNativeHeap.hpp"
 36 #include "utilities/debug.hpp"
 37 #include "utilities/globalDefinitions.hpp"
 38 
 39 // -----------------------------------------------------------------------------
 40 // Theory of operations -- Object Monitor Table:
 41 //
 42 // The OMT (Object Monitor Table) is a concurrent hash table specifically
 43 // designed so that it (in the normal case) can be searched from C2 generated
 44 // code.
 45 //
 46 // In its simplest form it consists of:
 47 //  1. An array of pointers.
 48 //  2. The size (capacity) of the array, which is always a power of two.
 49 //
 50 // When you want to find a monitor associated with an object, you extract the
 51 // hash value of the object. Then calculate an index by taking the hash value
 52 // and bit-wise AND it with the capacity mask (i.e., size-1) of the OMT. Now
 53 // use that index into the OMT's array of pointers. If the pointer is non
 54 // null, check if it's a monitor pointer that is associated with the object.
 55 // If so you're done. If the pointer is non null, but associated with another
 56 // object, you start looking at (index+1), (index+2) and so on, until you
 57 // either find the correct monitor, or a null pointer. Finding a null pointer
 58 // means that the monitor is simply not in the OMT.
 59 //
 60 // If the size of the pointer array is significantly larger than the number of
 61 // pointers in it, the chance of finding the monitor at the hash index
 62 // (without any further linear searching) is quite high. It is also straight
 63 // forward to generate C2 code for this, which for the fast path doesn't
 64 // contain any branching at all. See: C2_MacroAssembler::fast_lock().
 65 //
 66 // When the number of monitors (pointers in the array) reaches above the
 67 // allowed limit (defined by the GROW_LOAD_FACTOR symbol) we need to grow the
 68 // table.
 69 //
 70 // A simple description of how growing the OMT is done, is to say that we
 71 // allocate a new table (twice as large as the old one), and then copy all the
 72 // old monitor pointers from the old table to the new.
 73 //
 74 // But since the OMT is a concurrent hash table and things need to work for
 75 // other clients of the OMT while we grow it, it gets a bit more
 76 // complicated.
 77 //
 78 // The new and (potentially several) old table(s) may exist at the same
 79 // time. The newest is always called the "current", and the older ones are
 80 // singly linked using a "prev" pointer.
 81 //
 82 // As soon as we have allocated and linked in the new "current" OMT, all new
 83 // monitor pointers will be added to the new table. Effectively making the
 84 // atomic switch from "old current" to "new current" a linearization point.
 85 //
 86 // After that we start to go through all the indexes in the old table. If the
 87 // index is empty (the pointer is null) we put a "tombstone" into that index,
 88 // which will prevent any future concurrent insert from ending up in that
 89 // index.
 90 //
 91 // If the index contains a monitor pointer, we insert that monitor pointer
 92 // into the OMT which can be considered as one generation newer. If the index
 93 // contains a "removed" pointer, we just ignore it.
 94 //
 95 // We use special pointer values for "tombstone" and "removed". Any pointer
 96 // that is not null, not a tombstone and not removed, is considered to be a
 97 // pointer to a monitor.
 98 //
 99 // When all the monitor pointers from an old OMT have been transferred to the
100 // new OMT, the old table is unlinked.
101 //
102 // This copying from an old OMT to one generation newer OMT, will continue
103 // until all the monitor pointers from old OMTs have been transferred to the
104 // newest "current" OMT.
105 //
106 // The memory for old, unlinked OMTs will be freed after a thread-local
107 // handshake with all Java threads.
108 //
109 // Searching the OMT for a monitor pointer while there are several generations
110 // of the OMT, will start from the oldest OMT.
111 //
112 // A note about the GROW_LOAD_FACTOR: In order to guarantee that the add and
113 // search algorithms can't loop forever, we must make sure that there is at
114 // least one null pointer in the array to stop them from dead looping.
115 // Furthermore, when we grow the OMT, we must make sure that the new "current"
116 // can accommodate all the monitors from all older OMTs, while still being so
117 // sparsely populated that the C2 generated code will likely find what it's
118 // searching for at the hash index, without needing further linear searching.
119 // The grow load factor is set to 12.5%, which satisfies the above
120 // requirements. Don't change it for fun, it might backfire.
121 // -----------------------------------------------------------------------------
122 
123 // Get the identity hash from an oop, handling both compact and legacy headers.
124 // Returns 0 if the object has not been hashed yet (meaning no monitor exists
125 // for it in the table).
126 static intptr_t object_hash(oop obj) {
127   markWord mark = obj->mark();
128   if (UseCompactObjectHeaders) {
129     if (!mark.is_hashed()) {
130       return 0;
131     }
132     return static_cast<intptr_t>(ObjectSynchronizer::get_hash(mark, obj));
133   } else {
134     return mark.hash();
135   }
136 }
137 
138 Atomic<ObjectMonitorTable::Table*> ObjectMonitorTable::_curr;
139 
140 class ObjectMonitorTable::Table : public CHeapObj<mtObjectMonitor> {
141   friend class ObjectMonitorTable;
142 
143   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
144   const size_t _capacity_mask;       // One less than its power-of-two capacity
145   Atomic<Table*> _prev;              // Set while growing/rebuilding
146   Atomic<Entry>* _buckets;           // The payload
147   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(_capacity_mask) + sizeof(_prev) + sizeof(_buckets));
148   Atomic<size_t> _items_count;
149   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(_items_count));
150 
151   static Entry as_entry(ObjectMonitor* monitor) {
152     Entry entry = static_cast<Entry>((uintptr_t)monitor);
153     assert(entry >= Entry::below_is_special, "Must be! (entry: " PTR_FORMAT ")", intptr_t(entry));
154     return entry;
155   }
156 
157   static ObjectMonitor* as_monitor(Entry entry) {
158     assert(entry >= Entry::below_is_special, "Must be! (entry: " PTR_FORMAT ")", intptr_t(entry));
159     return reinterpret_cast<ObjectMonitor*>(entry);
160   }
161 
162   static Entry empty() {
163     return Entry::empty;
164   }
165 
166   static Entry tombstone() {
167     return Entry::tombstone;
168   }
169 
170   static Entry removed() {
171     return Entry::removed;
172   }
173 
174   // Make sure we leave space for previous versions to relocate too.
175   bool try_inc_items_count() {
176     for (;;) {
177       size_t population = _items_count.load_relaxed();
178       if (should_grow(population)) {
179         return false;
180       }
181       if (_items_count.compare_set(population, population + 1, memory_order_relaxed)) {
182         return true;
183       }
184     }
185   }
186 
187   double get_load_factor(size_t count) {
188     return (double)count / (double)capacity();
189   }
190 
191   void inc_items_count() {
192     _items_count.add_then_fetch(1u, memory_order_relaxed);
193   }
194 
195   void dec_items_count() {
196     _items_count.sub_then_fetch(1u, memory_order_relaxed);
197   }
198 
199 public:
200   Table(size_t capacity, Table* prev)
201     : _capacity_mask(capacity - 1),
202       _prev(prev),
203       _buckets(NEW_C_HEAP_ARRAY(Atomic<Entry>, capacity, mtObjectMonitor)),
204       _items_count(0)
205   {
206     for (size_t i = 0; i < capacity; ++i) {
207       ::new (_buckets + i) Atomic<Entry>(empty());
208     }
209   }
210 
211   ~Table() {
212     FREE_C_HEAP_ARRAY(Atomic<Entry>, _buckets);
213   }
214 
215   Table* prev() {
216     return _prev.load_relaxed();
217   }
218 
219   size_t capacity() {
220     return _capacity_mask + 1;
221   }
222 
223   bool should_grow(size_t population) {
224     return get_load_factor(population) > GROW_LOAD_FACTOR;
225   }
226 
227   bool should_grow() {
228     return should_grow(_items_count.load_relaxed());
229   }
230 
231   size_t total_items() {
232     size_t current_items = _items_count.load_relaxed();
233     Table* prev = _prev.load_relaxed();
234     if (prev != nullptr) {
235       return prev->total_items() + current_items;
236     }
237     return current_items;
238   }
239 
240   ObjectMonitor* get(oop obj, intptr_t hash) {
241     // Acquire tombstones and relocations in case prev transitioned to null
242     Table* prev = _prev.load_acquire();
243     if (prev != nullptr) {
244       ObjectMonitor* result = prev->get(obj, hash);
245       if (result != nullptr) {
246         return result;
247       }
248     }
249 
250     const size_t start_index = size_t(hash) & _capacity_mask;
251     size_t index = start_index;
252 
253     for (;;) {
254       Atomic<Entry>& bucket = _buckets[index];
255       Entry entry = bucket.load_acquire();
256 
257       if (entry == tombstone() || entry == empty()) {
258         // Not found
259         break;
260       }
261 
262       if (entry != removed() && as_monitor(entry)->object_peek() == obj) {
263         // Found matching monitor.
264         return as_monitor(entry);
265       }
266 
267       index = (index + 1) & _capacity_mask;
268       assert(index != start_index, "invariant");
269     }
270 
271     // Rebuilding could have started by now, but if a monitor has been inserted
272     // in a newer table, it was inserted after the get linearization point.
273     return nullptr;
274   }
275 
276   ObjectMonitor* prepare_insert(oop obj, intptr_t hash) {
277     // Acquire any tombstones and relocations if prev transitioned to null.
278     Table* prev = _prev.load_acquire();
279     if (prev != nullptr) {
280       ObjectMonitor* result = prev->prepare_insert(obj, hash);
281       if (result != nullptr) {
282         return result;
283       }
284     }
285 
286     const size_t start_index = size_t(hash) & _capacity_mask;
287     size_t index = start_index;
288 
289     for (;;) {
290       Atomic<Entry>& bucket = _buckets[index];
291       Entry entry = bucket.load_acquire();
292 
293       if (entry == empty()) {
294         // Found an empty slot to install the new monitor in.
295         // To avoid concurrent inserts succeeding, place a tombstone here.
296         Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_relaxed);
297         if (result == entry) {
298           // Success! Nobody will try to insert here again, except reinsert from rebuilding.
299           return nullptr;
300         }
301         entry = result;
302       }
303 
304       if (entry == tombstone()) {
305         // Can't insert into this table.
306         return nullptr;
307       }
308 
309       if (entry != removed() && as_monitor(entry)->object_peek() == obj) {
310         // Found matching monitor.
311         return as_monitor(entry);
312       }
313 
314       index = (index + 1) & _capacity_mask;
315       assert(index != start_index, "invariant");
316     }
317   }
318 
319   ObjectMonitor* get_set(oop obj, Entry new_monitor, intptr_t hash) {
320     // Acquire any tombstones and relocations if prev transitioned to null.
321     Table* prev = _prev.load_acquire();
322     if (prev != nullptr) {
323       // Sprinkle tombstones in previous tables to force concurrent inserters
324       // to the latest table. We only really want to try inserting in the
325       // latest table.
326       ObjectMonitor* result = prev->prepare_insert(obj, hash);
327       if (result != nullptr) {
328         return result;
329       }
330     }
331 
332     const size_t start_index = size_t(hash) & _capacity_mask;
333     size_t index = start_index;
334 
335     for (;;) {
336       Atomic<Entry>& bucket = _buckets[index];
337       Entry entry = bucket.load_acquire();
338 
339       if (entry == empty()) {
340         // Empty slot to install the new monitor
341         if (try_inc_items_count()) {
342           // Succeeding in claiming an item.
343           Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel);
344           if (result == entry) {
345             // Success - already incremented.
346             return as_monitor(new_monitor);
347           }
348 
349           // Something else was installed in place.
350           dec_items_count();
351           entry = result;
352         } else {
353           // Out of allowance; leave space for rebuilding to succeed.
354           // To avoid concurrent inserts succeeding, place a tombstone here.
355           Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_acq_rel);
356           if (result == entry) {
357             // Success; nobody will try to insert here again, except reinsert from rebuilding.
358             return nullptr;
359           }
360           entry = result;
361         }
362       }
363 
364       if (entry == tombstone()) {
365         // Can't insert into this table.
366         return nullptr;
367       }
368 
369       if (entry != removed() && as_monitor(entry)->object_peek() == obj) {
370         // Found matching monitor.
371         return as_monitor(entry);
372       }
373 
374       index = (index + 1) & _capacity_mask;
375       assert(index != start_index, "invariant");
376     }
377   }
378 
379   void remove(oop obj, Entry old_monitor, intptr_t hash) {
380     assert(old_monitor >= Entry::below_is_special,
381            "Must be! (old_monitor: " PTR_FORMAT ")", intptr_t(old_monitor));
382 
383     const size_t start_index = size_t(hash) & _capacity_mask;
384     size_t index = start_index;
385 
386     for (;;) {
387       Atomic<Entry>& bucket = _buckets[index];
388       Entry entry = bucket.load_acquire();
389 
390       if (entry == empty()) {
391         // The monitor does not exist in this table.
392         break;
393       }
394 
395       if (entry == tombstone()) {
396         // Stop searching at tombstones.
397         break;
398       }
399 
400       if (entry == old_monitor) {
401         // Found matching entry; remove it
402         bool result = bucket.compare_set(entry, removed(), memory_order_relaxed);
403         assert(result, "should not fail");
404         break;
405       }
406 
407       index = (index + 1) & _capacity_mask;
408       assert(index != start_index, "invariant");
409     }
410 
411     // Old versions are removed after newer versions to ensure that observing
412     // the monitor removed and then doing a subsequent lookup results in there
413     // still not being a monitor, instead of flickering back to being there.
414     // Only the deflation thread rebuilds and unlinks tables, so we do not need
415     // any concurrency safe prev read below.
416     if (_prev.load_relaxed() != nullptr) {
417       _prev.load_relaxed()->remove(obj, old_monitor, hash);
418     }
419   }
420 
421   void reinsert(oop obj, Entry new_monitor) {
422     intptr_t hash = as_monitor(new_monitor)->hash();
423 
424     const size_t start_index = size_t(hash) & _capacity_mask;
425     size_t index = start_index;
426 
427     for (;;) {
428       Atomic<Entry>& bucket = _buckets[index];
429       Entry entry = bucket.load_acquire();
430 
431       if (entry == empty()) {
432         // Empty slot to install the new monitor.
433         Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel);
434         if (result == entry) {
435           // Success - unconditionally increment.
436           inc_items_count();
437           return;
438         }
439 
440         // Another monitor was installed.
441         entry = result;
442       }
443 
444       if (entry == tombstone()) {
445         // A concurrent inserter did not get enough allowance in the table.
446         // But reinsert always succeeds - we will take the spot.
447         Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_acq_rel);
448         if (result == entry) {
449           // Success - unconditionally increment.
450           inc_items_count();
451           return;
452         }
453 
454         // Another monitor was installed.
455         entry = result;
456       }
457 
458       if (entry == removed()) {
459         // A removed entry can be flipped back with reinsertion.
460         Entry result = bucket.compare_exchange(entry, new_monitor, memory_order_release);
461         if (result == entry) {
462           // Success - but don't increment; the initial entry did that for us.
463           return;
464         }
465 
466         // Another monitor was installed.
467         entry = result;
468       }
469 
470       assert(entry != empty(), "invariant");
471       assert(entry != tombstone(), "invariant");
472       assert(entry != removed(), "invariant");
473       assert(as_monitor(entry)->object_peek() != obj, "invariant");
474       index = (index + 1) & _capacity_mask;
475       assert(index != start_index, "invariant");
476     }
477   }
478 
479   void rebuild() {
480     Table* prev = _prev.load_relaxed();
481     if (prev == nullptr) {
482       // Base case for recursion - no previous version.
483       return;
484     }
485 
486     // Finish rebuilding up to prev as target so we can use prev as source.
487     prev->rebuild();
488 
489     JavaThread* current = JavaThread::current();
490 
491     // Relocate entries from prev.
492     for (size_t index = 0; index <= prev->_capacity_mask; index++) {
493       if ((index & 127) == 0) {
494         // Poll for safepoints to improve time to safepoint
495         ThreadBlockInVM tbivm(current);
496       }
497 
498       Atomic<Entry>& bucket = prev->_buckets[index];
499       Entry entry = bucket.load_acquire();
500 
501       if (entry == empty()) {
502         // Empty slot; put a tombstone there.
503         Entry result = bucket.compare_exchange(entry, tombstone(), memory_order_acq_rel);
504         if (result == empty()) {
505           // Success; move to next entry.
506           continue;
507         }
508 
509         // Concurrent insert; relocate.
510         entry = result;
511       }
512 
513       if (entry != tombstone() && entry != removed()) {
514         // A monitor
515         ObjectMonitor* monitor = as_monitor(entry);
516         oop obj = monitor->object_peek();
517         if (obj != nullptr) {
518           // In the current implementation the deflation thread drives
519           // the rebuilding, and it will already have removed any entry
520           // it has deflated. The assert is only here to make sure.
521           assert(!monitor->is_being_async_deflated(), "Should be");
522           // Re-insert still live monitor.
523           reinsert(obj, entry);
524         }
525       }
526     }
527 
528     // Unlink this table, releasing the tombstones and relocations.
529     _prev.release_store(nullptr);
530   }
531 };
532 
533 void ObjectMonitorTable::create() {
534   _curr.store_relaxed(new Table(128, nullptr));
535 }
536 
537 ObjectMonitor* ObjectMonitorTable::monitor_get(oop obj) {
538   const intptr_t hash = object_hash(obj);
539   if (hash == 0) return nullptr;
540   Table* curr = _curr.load_acquire();
541   ObjectMonitor* monitor = curr->get(obj, hash);
542   return monitor;
543 }
544 
545 // Returns a new table to try inserting into.
546 ObjectMonitorTable::Table* ObjectMonitorTable::grow_table(Table* curr) {
547   Table* result;
548   Table* new_table = _curr.load_acquire();
549   if (new_table != curr) {
550     // Table changed; no need to try further
551     return new_table;
552   }
553 
554   {
555     // Use MonitorDeflation_lock to only allow one inflating thread to
556     // attempt to allocate the new table.
557     MonitorLocker ml(MonitorDeflation_lock, Mutex::_no_safepoint_check_flag);
558 
559     new_table = _curr.load_acquire();
560     if (new_table != curr) {
561       // Table changed; no need to try further
562       return new_table;
563     }
564 
565     new_table = new Table(curr->capacity() << 1, curr);
566     result = _curr.compare_exchange(curr, new_table, memory_order_acq_rel);
567     if (result == curr) {
568       log_info(monitorinflation)("Growing object monitor table (capacity: %zu)",
569                                  new_table->capacity());
570       // Since we grew the table (we have a new current) we need to
571       // notify the deflation thread to rebuild the table (to get rid of
572       // old currents).
573       ObjectSynchronizer::set_is_async_deflation_requested(true);
574       ml.notify_all();
575       return new_table;
576     }
577   }
578 
579   // Somebody else started rebuilding; restart in their new table.
580   delete new_table;
581 
582   return result;
583 }
584 
585 ObjectMonitor* ObjectMonitorTable::monitor_put_get(ObjectMonitor* monitor, oop obj) {
586   const intptr_t hash = monitor->hash();
587   Table* curr =  _curr.load_acquire();
588 
589   for (;;) {
590     // Curr is the latest table and is reasonably loaded.
591     ObjectMonitor* result = curr->get_set(obj, curr->as_entry(monitor), hash);
592     if (result != nullptr) {
593       return result;
594     }
595     // The table's limit was reached, we need to grow it.
596     curr = grow_table(curr);
597   }
598 }
599 
600 void ObjectMonitorTable::remove_monitor_entry(ObjectMonitor* monitor) {
601   oop obj = monitor->object_peek();
602   if (obj == nullptr) {
603     // Defer removal until subsequent rebuilding.
604     return;
605   }
606   const intptr_t hash = monitor->hash();
607   Table* curr =  _curr.load_acquire();
608   curr->remove(obj, curr->as_entry(monitor), hash);
609   assert(monitor_get(obj) != monitor, "should have been removed");
610 }
611 
612 // Before handshake; rebuild and unlink tables.
613 void ObjectMonitorTable::rebuild(GrowableArray<Table*>* delete_list) {
614   Table* new_table;
615   {
616     // Concurrent inserts while in the middle of rebuilding can result
617     // in the population count increasing past the load factor limit.
618     // For this to be okay we need to bound how much it may exceed the
619     // limit. A sequence of tables with doubling capacity may
620     // eventually, after rebuilding, reach the maximum population of
621     // max_population(table_1) + max_population(table_1*2) + ... +
622     // max_population(table_1*2^n).
623     // I.e. max_population(2*table_1 *2^n) - max_population(table_1).
624     // With a 12.5% load factor, the implication is that as long as
625     // rebuilding a table will double its capacity, the maximum load
626     // after rebuilding is less than 25%. However, we can't always
627     // double the size each time we rebuild the table. Instead we
628     // recursively estimate the population count of the chain of
629     // tables (the current, and all the previous currents). If the sum
630     // of the population is less than the growing factor, we do not
631     // need to grow the table. If the new concurrently rebuilding
632     // table is immediately filled up by concurrent inserts, then the
633     // worst case load factor after the rebuild may be twice as large,
634     // which is still guaranteed to be less than a 50% load. If this
635     // happens, it will cause subsequent rebuilds to increase the
636     // table capacity, keeping the worst case less than 50%, until the
637     // load factor eventually becomes less than 12.5% again. So in
638     // some sense this allows us to be fooled once, but not twice. So,
639     // given the growing threshold of 12.5%, it is impossible for the
640     // tables to reach a load factor above 50%. Which is more than
641     // enough to guarantee the function of this concurrent hash table.
642     Table* curr =  _curr.load_acquire();
643     size_t need_to_accomodate = curr->total_items();
644     size_t new_capacity = curr->should_grow(need_to_accomodate)
645       ? curr->capacity() << 1
646       : curr->capacity();
647     new_table = new Table(new_capacity, curr);
648     Table* result = _curr.compare_exchange(curr, new_table, memory_order_acq_rel);
649     if (result != curr) {
650       // Somebody else racingly started rebuilding. Delete the
651       // new_table and treat somebody else's table as the new one.
652       delete new_table;
653       new_table = result;
654     }
655     log_info(monitorinflation)("Rebuilding object monitor table (capacity: %zu)",
656                                new_table->capacity());
657   }
658 
659   for (Table* curr = new_table->prev(); curr != nullptr; curr = curr->prev()) {
660     delete_list->append(curr);
661   }
662 
663   // Rebuild with the new table as target.
664   new_table->rebuild();
665 }
666 
667 // After handshake; destroy old tables
668 void ObjectMonitorTable::destroy(GrowableArray<Table*>* delete_list) {
669   for (ObjectMonitorTable::Table* table: *delete_list) {
670     delete table;
671   }
672 }
673 
674 address ObjectMonitorTable::current_table_address() {
675   return reinterpret_cast<address>(&_curr) + _curr.value_offset_in_bytes();
676 }
677 
678 ByteSize ObjectMonitorTable::table_capacity_mask_offset() {
679   return byte_offset_of(Table, _capacity_mask);
680 }
681 
682 ByteSize ObjectMonitorTable::table_buckets_offset() {
683   // Assumptions made from the emitted code about the layout.
684   STATIC_ASSERT(sizeof(Atomic<Entry>) == sizeof(Entry*));
685   STATIC_ASSERT(Atomic<Entry>::value_offset_in_bytes() == 0);
686 
687   return byte_offset_of(Table, _buckets);
688 }