1 /*
  2  * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/cdsConfig.hpp"
 27 #include "classfile/classLoaderData.hpp"
 28 #include "classfile/vmClasses.hpp"
 29 #include "gc/shared/allocTracer.hpp"
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "gc/shared/collectedHeap.inline.hpp"
 33 #include "gc/shared/gcLocker.inline.hpp"
 34 #include "gc/shared/gcHeapSummary.hpp"
 35 #include "gc/shared/stringdedup/stringDedup.hpp"
 36 #include "gc/shared/gcTrace.hpp"
 37 #include "gc/shared/gcTraceTime.inline.hpp"
 38 #include "gc/shared/gcVMOperations.hpp"
 39 #include "gc/shared/gcWhen.hpp"
 40 #include "gc/shared/gc_globals.hpp"
 41 #include "gc/shared/memAllocator.hpp"
 42 #include "gc/shared/tlab_globals.hpp"
 43 #include "logging/log.hpp"
 44 #include "logging/logStream.hpp"
 45 #include "memory/classLoaderMetaspace.hpp"
 46 #include "memory/metaspace.hpp"
 47 #include "memory/metaspaceUtils.hpp"
 48 #include "memory/resourceArea.hpp"
 49 #include "memory/universe.hpp"
 50 #include "oops/instanceMirrorKlass.hpp"
 51 #include "oops/oop.inline.hpp"
 52 #include "runtime/handles.inline.hpp"
 53 #include "runtime/init.hpp"
 54 #include "runtime/javaThread.hpp"
 55 #include "runtime/perfData.hpp"
 56 #include "runtime/threadSMR.hpp"
 57 #include "runtime/vmThread.hpp"
 58 #include "services/heapDumper.hpp"
 59 #include "utilities/align.hpp"
 60 #include "utilities/copy.hpp"
 61 #include "utilities/events.hpp"
 62 
 63 class ClassLoaderData;
 64 
 65 size_t CollectedHeap::_lab_alignment_reserve = SIZE_MAX;
 66 Klass* CollectedHeap::_filler_object_klass = nullptr;
 67 size_t CollectedHeap::_filler_array_max_size = 0;
 68 size_t CollectedHeap::_stack_chunk_max_size = 0;
 69 
 70 class GCMessage : public FormatBuffer<1024> {
 71  public:
 72   bool is_before;
 73 };
 74 
 75 template <>
 76 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
 77   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
 78   st->print_raw(m);
 79 }
 80 
 81 class GCHeapLog : public EventLogBase<GCMessage> {
 82  private:
 83   void log_heap(CollectedHeap* heap, bool before);
 84 
 85  public:
 86   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {}
 87 
 88   void log_heap_before(CollectedHeap* heap) {
 89     log_heap(heap, true);
 90   }
 91   void log_heap_after(CollectedHeap* heap) {
 92     log_heap(heap, false);
 93   }
 94 };
 95 
 96 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
 97   if (!should_log()) {
 98     return;
 99   }
100 
101   double timestamp = fetch_timestamp();
102   MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);
103   int index = compute_log_index();
104   _records[index].thread = nullptr; // Its the GC thread so it's not that interesting.
105   _records[index].timestamp = timestamp;
106   _records[index].data.is_before = before;
107   stringStream st(_records[index].data.buffer(), _records[index].data.size());
108 
109   st.print_cr("{Heap %s GC invocations=%u (full %u):",
110                  before ? "before" : "after",
111                  heap->total_collections(),
112                  heap->total_full_collections());
113 
114   heap->print_on(&st);
115   st.print_cr("}");
116 }
117 
118 ParallelObjectIterator::ParallelObjectIterator(uint thread_num) :
119   _impl(Universe::heap()->parallel_object_iterator(thread_num))
120 {}
121 
122 ParallelObjectIterator::~ParallelObjectIterator() {
123   delete _impl;
124 }
125 
126 void ParallelObjectIterator::object_iterate(ObjectClosure* cl, uint worker_id) {
127   _impl->object_iterate(cl, worker_id);
128 }
129 
130 size_t CollectedHeap::unused() const {
131   MutexLocker ml(Heap_lock);
132   return capacity() - used();
133 }
134 
135 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
136   size_t capacity_in_words = capacity() / HeapWordSize;
137 
138   return VirtualSpaceSummary(
139     _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
140 }
141 
142 GCHeapSummary CollectedHeap::create_heap_summary() {
143   VirtualSpaceSummary heap_space = create_heap_space_summary();
144   return GCHeapSummary(heap_space, used());
145 }
146 
147 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
148   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
149     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
150   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
151     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
152   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(),
153                           MetaspaceUtils::get_combined_statistics(),
154                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
155 }
156 
157 bool CollectedHeap::contains_null(const oop* p) const {
158   return *p == nullptr;
159 }
160 
161 void CollectedHeap::print_heap_before_gc() {
162   LogTarget(Debug, gc, heap) lt;
163   if (lt.is_enabled()) {
164     LogStream ls(lt);
165     ls.print_cr("Heap before GC invocations=%u (full %u):", total_collections(), total_full_collections());
166     ResourceMark rm;
167     print_on(&ls);
168   }
169 
170   if (_gc_heap_log != nullptr) {
171     _gc_heap_log->log_heap_before(this);
172   }
173 }
174 
175 void CollectedHeap::print_heap_after_gc() {
176   LogTarget(Debug, gc, heap) lt;
177   if (lt.is_enabled()) {
178     LogStream ls(lt);
179     ls.print_cr("Heap after GC invocations=%u (full %u):", total_collections(), total_full_collections());
180     ResourceMark rm;
181     print_on(&ls);
182   }
183 
184   if (_gc_heap_log != nullptr) {
185     _gc_heap_log->log_heap_after(this);
186   }
187 }
188 
189 void CollectedHeap::print() const { print_on(tty); }
190 
191 void CollectedHeap::print_on_error(outputStream* st) const {
192   st->print_cr("Heap:");
193   print_extended_on(st);
194   st->cr();
195 
196   BarrierSet* bs = BarrierSet::barrier_set();
197   if (bs != nullptr) {
198     bs->print_on(st);
199   }
200 }
201 
202 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
203   const GCHeapSummary& heap_summary = create_heap_summary();
204   gc_tracer->report_gc_heap_summary(when, heap_summary);
205 
206   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
207   gc_tracer->report_metaspace_summary(when, metaspace_summary);
208 }
209 
210 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
211   trace_heap(GCWhen::BeforeGC, gc_tracer);
212 }
213 
214 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
215   trace_heap(GCWhen::AfterGC, gc_tracer);
216 }
217 
218 // Default implementation, for collectors that don't support the feature.
219 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
220   return false;
221 }
222 
223 bool CollectedHeap::is_oop(oop object) const {
224   if (!is_object_aligned(object)) {
225     return false;
226   }
227 
228   if (!is_in(object)) {
229     return false;
230   }
231 
232   // With compact headers, we can't safely access the class, due
233   // to possibly forwarded objects.
234   if (!UseCompactObjectHeaders && !Metaspace::contains(object->klass_raw())) {
235     return false;
236   }
237 
238   return true;
239 }
240 
241 // Memory state functions.
242 
243 
244 CollectedHeap::CollectedHeap() :
245   _capacity_at_last_gc(0),
246   _used_at_last_gc(0),
247   _is_gc_active(false),
248   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
249   _total_collections(0),
250   _total_full_collections(0),
251   _gc_cause(GCCause::_no_gc),
252   _gc_lastcause(GCCause::_no_gc)
253 {
254   // If the minimum object size is greater than MinObjAlignment, we can
255   // end up with a shard at the end of the buffer that's smaller than
256   // the smallest object.  We can't allow that because the buffer must
257   // look like it's full of objects when we retire it, so we make
258   // sure we have enough space for a filler int array object.
259   size_t min_size = min_dummy_object_size();
260   _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
261 
262   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
263   const size_t elements_per_word = HeapWordSize / sizeof(jint);
264   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
265                                              max_len / elements_per_word);
266 
267   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
268   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
269 
270   if (UsePerfData) {
271     EXCEPTION_MARK;
272 
273     // create the gc cause jvmstat counters
274     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
275                              80, GCCause::to_string(_gc_cause), CHECK);
276 
277     _perf_gc_lastcause =
278                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
279                              80, GCCause::to_string(_gc_lastcause), CHECK);
280   }
281 
282   // Create the ring log
283   if (LogEvents) {
284     _gc_heap_log = new GCHeapLog();
285   } else {
286     _gc_heap_log = nullptr;
287   }
288 }
289 
290 // This interface assumes that it's being called by the
291 // vm thread. It collects the heap assuming that the
292 // heap lock is already held and that we are executing in
293 // the context of the vm thread.
294 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
295   Thread* thread = Thread::current();
296   assert(thread->is_VM_thread(), "Precondition#1");
297   assert(Heap_lock->is_locked(), "Precondition#2");
298   GCCauseSetter gcs(this, cause);
299   switch (cause) {
300     case GCCause::_codecache_GC_threshold:
301     case GCCause::_codecache_GC_aggressive:
302     case GCCause::_heap_inspection:
303     case GCCause::_heap_dump:
304     case GCCause::_metadata_GC_threshold: {
305       HandleMark hm(thread);
306       do_full_collection(false);        // don't clear all soft refs
307       break;
308     }
309     case GCCause::_metadata_GC_clear_soft_refs: {
310       HandleMark hm(thread);
311       do_full_collection(true);         // do clear all soft refs
312       break;
313     }
314     default:
315       ShouldNotReachHere(); // Unexpected use of this function
316   }
317 }
318 
319 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
320                                                             size_t word_size,
321                                                             Metaspace::MetadataType mdtype) {
322   uint loop_count = 0;
323   uint gc_count = 0;
324   uint full_gc_count = 0;
325 
326   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
327 
328   do {
329     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
330     if (result != nullptr) {
331       return result;
332     }
333 
334     if (GCLocker::is_active_and_needs_gc()) {
335       // If the GCLocker is active, just expand and allocate.
336       // If that does not succeed, wait if this thread is not
337       // in a critical section itself.
338       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
339       if (result != nullptr) {
340         return result;
341       }
342       JavaThread* jthr = JavaThread::current();
343       if (!jthr->in_critical()) {
344         // Wait for JNI critical section to be exited
345         GCLocker::stall_until_clear();
346         // The GC invoked by the last thread leaving the critical
347         // section will be a young collection and a full collection
348         // is (currently) needed for unloading classes so continue
349         // to the next iteration to get a full GC.
350         continue;
351       } else {
352         if (CheckJNICalls) {
353           fatal("Possible deadlock due to allocating while"
354                 " in jni critical section");
355         }
356         return nullptr;
357       }
358     }
359 
360     {  // Need lock to get self consistent gc_count's
361       MutexLocker ml(Heap_lock);
362       gc_count      = Universe::heap()->total_collections();
363       full_gc_count = Universe::heap()->total_full_collections();
364     }
365 
366     // Generate a VM operation
367     VM_CollectForMetadataAllocation op(loader_data,
368                                        word_size,
369                                        mdtype,
370                                        gc_count,
371                                        full_gc_count,
372                                        GCCause::_metadata_GC_threshold);
373     VMThread::execute(&op);
374 
375     // If GC was locked out, try again. Check before checking success because the
376     // prologue could have succeeded and the GC still have been locked out.
377     if (op.gc_locked()) {
378       continue;
379     }
380 
381     if (op.prologue_succeeded()) {
382       return op.result();
383     }
384     loop_count++;
385     if ((QueuedAllocationWarningCount > 0) &&
386         (loop_count % QueuedAllocationWarningCount == 0)) {
387       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
388                             " size=" SIZE_FORMAT, loop_count, word_size);
389     }
390   } while (true);  // Until a GC is done
391 }
392 
393 MemoryUsage CollectedHeap::memory_usage() {
394   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
395 }
396 
397 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
398   if (UsePerfData) {
399     _gc_lastcause = _gc_cause;
400     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
401     _perf_gc_cause->set_value(GCCause::to_string(v));
402   }
403   _gc_cause = v;
404 }
405 
406 // Returns the header size in words aligned to the requirements of the
407 // array object type.
408 static int int_array_header_size() {
409   size_t typesize_in_bytes = arrayOopDesc::header_size_in_bytes();
410   return (int)align_up(typesize_in_bytes, HeapWordSize)/HeapWordSize;
411 }
412 
413 size_t CollectedHeap::max_tlab_size() const {
414   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
415   // This restriction could be removed by enabling filling with multiple arrays.
416   // If we compute that the reasonable way as
417   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
418   // we'll overflow on the multiply, so we do the divide first.
419   // We actually lose a little by dividing first,
420   // but that just makes the TLAB  somewhat smaller than the biggest array,
421   // which is fine, since we'll be able to fill that.
422   size_t max_int_size = int_array_header_size() +
423               sizeof(jint) *
424               ((juint) max_jint / (size_t) HeapWordSize);
425   return align_down(max_int_size, MinObjAlignment);
426 }
427 
428 size_t CollectedHeap::filler_array_hdr_size() {
429   return align_object_offset(int_array_header_size()); // align to Long
430 }
431 
432 size_t CollectedHeap::filler_array_min_size() {
433   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
434 }
435 
436 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
437   Copy::fill_to_words(start + filler_array_hdr_size(),
438                       words - filler_array_hdr_size(), value);
439 }
440 
441 #ifdef ASSERT
442 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
443 {
444   assert(words >= min_fill_size(), "too small to fill");
445   assert(is_object_aligned(words), "unaligned size");
446 }
447 
448 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
449 {
450   if (ZapFillerObjects && zap) {
451     zap_filler_array_with(start, words, 0XDEAFBABE);
452   }
453 }
454 #endif // ASSERT
455 
456 void
457 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
458 {
459   assert(words >= filler_array_min_size(), "too small for an array");
460   assert(words <= filler_array_max_size(), "too big for a single object");
461 
462   const size_t payload_size = words - filler_array_hdr_size();
463   const size_t len = payload_size * HeapWordSize / sizeof(jint);
464   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
465 
466   ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
467   allocator.initialize(start);
468   if (CDSConfig::is_dumping_heap()) {
469     // This array is written into the CDS archive. Make sure it
470     // has deterministic contents.
471     zap_filler_array_with(start, words, 0);
472   } else {
473     DEBUG_ONLY(zap_filler_array(start, words, zap);)
474   }
475 }
476 
477 void
478 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
479 {
480   assert(words <= filler_array_max_size(), "too big for a single object");
481 
482   if (words >= filler_array_min_size()) {
483     fill_with_array(start, words, zap);
484   } else if (words > 0) {
485     assert(words == min_fill_size(), "unaligned size");
486     ObjAllocator allocator(CollectedHeap::filler_object_klass(), words);
487     allocator.initialize(start);
488   }
489 }
490 
491 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
492 {
493   DEBUG_ONLY(fill_args_check(start, words);)
494   HandleMark hm(Thread::current());  // Free handles before leaving.
495   fill_with_object_impl(start, words, zap);
496 }
497 
498 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
499 {
500   DEBUG_ONLY(fill_args_check(start, words);)
501   HandleMark hm(Thread::current());  // Free handles before leaving.
502 
503   // Multiple objects may be required depending on the filler array maximum size. Fill
504   // the range up to that with objects that are filler_array_max_size sized. The
505   // remainder is filled with a single object.
506   const size_t min = min_fill_size();
507   const size_t max = filler_array_max_size();
508   while (words > max) {
509     const size_t cur = (words - max) >= min ? max : max - min;
510     fill_with_array(start, cur, zap);
511     start += cur;
512     words -= cur;
513   }
514 
515   fill_with_object_impl(start, words, zap);
516 }
517 
518 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
519   CollectedHeap::fill_with_object(start, end, zap);
520 }
521 
522 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
523                                            size_t requested_size,
524                                            size_t* actual_size) {
525   guarantee(false, "thread-local allocation buffers not supported");
526   return nullptr;
527 }
528 
529 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
530   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
531          "Should only be called at a safepoint or at start-up");
532 
533   ThreadLocalAllocStats stats;
534 
535   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
536     BarrierSet::barrier_set()->make_parsable(thread);
537     if (UseTLAB) {
538       if (retire_tlabs) {
539         thread->tlab().retire(&stats);
540       } else {
541         thread->tlab().make_parsable();
542       }
543     }
544   }
545 
546   stats.publish();
547 }
548 
549 void CollectedHeap::resize_all_tlabs() {
550   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
551          "Should only resize tlabs at safepoint");
552 
553   if (UseTLAB && ResizeTLAB) {
554     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
555       thread->tlab().resize();
556     }
557   }
558 }
559 
560 jlong CollectedHeap::millis_since_last_whole_heap_examined() {
561   return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC;
562 }
563 
564 void CollectedHeap::record_whole_heap_examined_timestamp() {
565   _last_whole_heap_examined_time_ns = os::javaTimeNanos();
566 }
567 
568 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
569   assert(timer != nullptr, "timer is null");
570   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
571     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
572     HeapDumper::dump_heap();
573   }
574 
575   LogTarget(Trace, gc, classhisto) lt;
576   if (lt.is_enabled()) {
577     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
578     ResourceMark rm;
579     LogStream ls(lt);
580     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
581     inspector.doit();
582   }
583 }
584 
585 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
586   full_gc_dump(timer, true);
587 }
588 
589 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
590   full_gc_dump(timer, false);
591 }
592 
593 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
594   // It is important to do this in a way such that concurrent readers can't
595   // temporarily think something is in the heap.  (Seen this happen in asserts.)
596   _reserved.set_word_size(0);
597   _reserved.set_start((HeapWord*)rs.base());
598   _reserved.set_end((HeapWord*)rs.end());
599 }
600 
601 void CollectedHeap::post_initialize() {
602   StringDedup::initialize();
603   initialize_serviceability();
604 }
605 
606 #ifndef PRODUCT
607 
608 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
609   // Access to count is not atomic; the value does not have to be exact.
610   if (PromotionFailureALot) {
611     const size_t gc_num = total_collections();
612     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
613     if (elapsed_gcs >= PromotionFailureALotInterval) {
614       // Test for unsigned arithmetic wrap-around.
615       if (++*count >= PromotionFailureALotCount) {
616         *count = 0;
617         return true;
618       }
619     }
620   }
621   return false;
622 }
623 
624 bool CollectedHeap::promotion_should_fail() {
625   return promotion_should_fail(&_promotion_failure_alot_count);
626 }
627 
628 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
629   if (PromotionFailureALot) {
630     _promotion_failure_alot_gc_number = total_collections();
631     *count = 0;
632   }
633 }
634 
635 void CollectedHeap::reset_promotion_should_fail() {
636   reset_promotion_should_fail(&_promotion_failure_alot_count);
637 }
638 
639 #endif  // #ifndef PRODUCT
640 
641 // It's the caller's responsibility to ensure glitch-freedom
642 // (if required).
643 void CollectedHeap::update_capacity_and_used_at_gc() {
644   _capacity_at_last_gc = capacity();
645   _used_at_last_gc     = used();
646 }