1 /*
  2  * Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.hpp"
 27 #include "classfile/vmClasses.hpp"
 28 #include "gc/shared/allocTracer.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/collectedHeap.inline.hpp"
 32 #include "gc/shared/gcLocker.inline.hpp"
 33 #include "gc/shared/gcHeapSummary.hpp"
 34 #include "gc/shared/stringdedup/stringDedup.hpp"
 35 #include "gc/shared/gcTrace.hpp"
 36 #include "gc/shared/gcTraceTime.inline.hpp"
 37 #include "gc/shared/gcVMOperations.hpp"
 38 #include "gc/shared/gcWhen.hpp"
 39 #include "gc/shared/gc_globals.hpp"
 40 #include "gc/shared/memAllocator.hpp"
 41 #include "gc/shared/tlab_globals.hpp"
 42 #include "logging/log.hpp"
 43 #include "logging/logStream.hpp"
 44 #include "memory/classLoaderMetaspace.hpp"
 45 #include "memory/metaspaceUtils.hpp"
 46 #include "memory/resourceArea.hpp"
 47 #include "memory/universe.hpp"
 48 #include "oops/instanceMirrorKlass.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/handles.inline.hpp"
 51 #include "runtime/init.hpp"
 52 #include "runtime/javaThread.hpp"
 53 #include "runtime/perfData.hpp"
 54 #include "runtime/threadSMR.hpp"
 55 #include "runtime/vmThread.hpp"
 56 #include "services/heapDumper.hpp"
 57 #include "utilities/align.hpp"
 58 #include "utilities/copy.hpp"
 59 #include "utilities/events.hpp"
 60 
 61 class ClassLoaderData;
 62 
 63 size_t CollectedHeap::_lab_alignment_reserve = ~(size_t)0;
 64 Klass* CollectedHeap::_filler_object_klass = NULL;
 65 size_t CollectedHeap::_filler_array_max_size = 0;
 66 size_t CollectedHeap::_stack_chunk_max_size = 0;
 67 
 68 class GCMessage : public FormatBuffer<1024> {
 69  public:
 70   bool is_before;
 71 };
 72 
 73 template <>
 74 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
 75   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
 76   st->print_raw(m);
 77 }
 78 
 79 class GCHeapLog : public EventLogBase<GCMessage> {
 80  private:
 81   void log_heap(CollectedHeap* heap, bool before);
 82 
 83  public:
 84   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {}
 85 
 86   void log_heap_before(CollectedHeap* heap) {
 87     log_heap(heap, true);
 88   }
 89   void log_heap_after(CollectedHeap* heap) {
 90     log_heap(heap, false);
 91   }
 92 };
 93 
 94 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
 95   if (!should_log()) {
 96     return;
 97   }
 98 
 99   double timestamp = fetch_timestamp();
100   MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);
101   int index = compute_log_index();
102   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
103   _records[index].timestamp = timestamp;
104   _records[index].data.is_before = before;
105   stringStream st(_records[index].data.buffer(), _records[index].data.size());
106 
107   st.print_cr("{Heap %s GC invocations=%u (full %u):",
108                  before ? "before" : "after",
109                  heap->total_collections(),
110                  heap->total_full_collections());
111 
112   heap->print_on(&st);
113   st.print_cr("}");
114 }
115 
116 ParallelObjectIterator::ParallelObjectIterator(uint thread_num) :
117   _impl(Universe::heap()->parallel_object_iterator(thread_num))
118 {}
119 
120 ParallelObjectIterator::~ParallelObjectIterator() {
121   delete _impl;
122 }
123 
124 void ParallelObjectIterator::object_iterate(ObjectClosure* cl, uint worker_id) {
125   _impl->object_iterate(cl, worker_id);
126 }
127 
128 size_t CollectedHeap::unused() const {
129   MutexLocker ml(Heap_lock);
130   return capacity() - used();
131 }
132 
133 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
134   size_t capacity_in_words = capacity() / HeapWordSize;
135 
136   return VirtualSpaceSummary(
137     _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
138 }
139 
140 GCHeapSummary CollectedHeap::create_heap_summary() {
141   VirtualSpaceSummary heap_space = create_heap_space_summary();
142   return GCHeapSummary(heap_space, used());
143 }
144 
145 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
146   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
147     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
148   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
149     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
150   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(),
151                           MetaspaceUtils::get_combined_statistics(),
152                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
153 }
154 
155 void CollectedHeap::print_heap_before_gc() {
156   LogTarget(Debug, gc, heap) lt;
157   if (lt.is_enabled()) {
158     LogStream ls(lt);
159     ls.print_cr("Heap before GC invocations=%u (full %u):", total_collections(), total_full_collections());
160     ResourceMark rm;
161     print_on(&ls);
162   }
163 
164   if (_gc_heap_log != NULL) {
165     _gc_heap_log->log_heap_before(this);
166   }
167 }
168 
169 void CollectedHeap::print_heap_after_gc() {
170   LogTarget(Debug, gc, heap) lt;
171   if (lt.is_enabled()) {
172     LogStream ls(lt);
173     ls.print_cr("Heap after GC invocations=%u (full %u):", total_collections(), total_full_collections());
174     ResourceMark rm;
175     print_on(&ls);
176   }
177 
178   if (_gc_heap_log != NULL) {
179     _gc_heap_log->log_heap_after(this);
180   }
181 }
182 
183 void CollectedHeap::print() const { print_on(tty); }
184 
185 void CollectedHeap::print_on_error(outputStream* st) const {
186   st->print_cr("Heap:");
187   print_extended_on(st);
188   st->cr();
189 
190   BarrierSet* bs = BarrierSet::barrier_set();
191   if (bs != NULL) {
192     bs->print_on(st);
193   }
194 }
195 
196 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
197   const GCHeapSummary& heap_summary = create_heap_summary();
198   gc_tracer->report_gc_heap_summary(when, heap_summary);
199 
200   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
201   gc_tracer->report_metaspace_summary(when, metaspace_summary);
202 }
203 
204 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
205   trace_heap(GCWhen::BeforeGC, gc_tracer);
206 }
207 
208 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
209   trace_heap(GCWhen::AfterGC, gc_tracer);
210 }
211 
212 // Default implementation, for collectors that don't support the feature.
213 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
214   return false;
215 }
216 
217 bool CollectedHeap::is_oop(oop object) const {
218   if (!is_object_aligned(object)) {
219     return false;
220   }
221 
222   if (!is_in(object)) {
223     return false;
224   }
225 
226   if (is_in(object->klass_or_null())) {
227     return false;
228   }
229 
230   return true;
231 }
232 
233 // Memory state functions.
234 
235 
236 CollectedHeap::CollectedHeap() :
237   _capacity_at_last_gc(0),
238   _used_at_last_gc(0),
239   _is_gc_active(false),
240   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
241   _total_collections(0),
242   _total_full_collections(0),
243   _gc_cause(GCCause::_no_gc),
244   _gc_lastcause(GCCause::_no_gc)
245 {
246   // If the minimum object size is greater than MinObjAlignment, we can
247   // end up with a shard at the end of the buffer that's smaller than
248   // the smallest object.  We can't allow that because the buffer must
249   // look like it's full of objects when we retire it, so we make
250   // sure we have enough space for a filler int array object.
251   size_t min_size = min_dummy_object_size();
252   _lab_alignment_reserve = min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
253 
254   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
255   const size_t elements_per_word = HeapWordSize / sizeof(jint);
256   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
257                                              max_len / elements_per_word);
258 
259   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
260   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
261 
262   if (UsePerfData) {
263     EXCEPTION_MARK;
264 
265     // create the gc cause jvmstat counters
266     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
267                              80, GCCause::to_string(_gc_cause), CHECK);
268 
269     _perf_gc_lastcause =
270                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
271                              80, GCCause::to_string(_gc_lastcause), CHECK);
272   }
273 
274   // Create the ring log
275   if (LogEvents) {
276     _gc_heap_log = new GCHeapLog();
277   } else {
278     _gc_heap_log = NULL;
279   }
280 }
281 
282 // This interface assumes that it's being called by the
283 // vm thread. It collects the heap assuming that the
284 // heap lock is already held and that we are executing in
285 // the context of the vm thread.
286 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
287   Thread* thread = Thread::current();
288   assert(thread->is_VM_thread(), "Precondition#1");
289   assert(Heap_lock->is_locked(), "Precondition#2");
290   GCCauseSetter gcs(this, cause);
291   switch (cause) {
292     case GCCause::_codecache_GC_threshold:
293     case GCCause::_heap_inspection:
294     case GCCause::_heap_dump:
295     case GCCause::_metadata_GC_threshold : {
296       HandleMark hm(thread);
297       do_full_collection(false);        // don't clear all soft refs
298       break;
299     }
300     case GCCause::_archive_time_gc:
301     case GCCause::_metadata_GC_clear_soft_refs: {
302       HandleMark hm(thread);
303       do_full_collection(true);         // do clear all soft refs
304       break;
305     }
306     default:
307       ShouldNotReachHere(); // Unexpected use of this function
308   }
309 }
310 
311 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
312                                                             size_t word_size,
313                                                             Metaspace::MetadataType mdtype) {
314   uint loop_count = 0;
315   uint gc_count = 0;
316   uint full_gc_count = 0;
317 
318   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
319 
320   do {
321     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
322     if (result != NULL) {
323       return result;
324     }
325 
326     if (GCLocker::is_active_and_needs_gc()) {
327       // If the GCLocker is active, just expand and allocate.
328       // If that does not succeed, wait if this thread is not
329       // in a critical section itself.
330       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
331       if (result != NULL) {
332         return result;
333       }
334       JavaThread* jthr = JavaThread::current();
335       if (!jthr->in_critical()) {
336         // Wait for JNI critical section to be exited
337         GCLocker::stall_until_clear();
338         // The GC invoked by the last thread leaving the critical
339         // section will be a young collection and a full collection
340         // is (currently) needed for unloading classes so continue
341         // to the next iteration to get a full GC.
342         continue;
343       } else {
344         if (CheckJNICalls) {
345           fatal("Possible deadlock due to allocating while"
346                 " in jni critical section");
347         }
348         return NULL;
349       }
350     }
351 
352     {  // Need lock to get self consistent gc_count's
353       MutexLocker ml(Heap_lock);
354       gc_count      = Universe::heap()->total_collections();
355       full_gc_count = Universe::heap()->total_full_collections();
356     }
357 
358     // Generate a VM operation
359     VM_CollectForMetadataAllocation op(loader_data,
360                                        word_size,
361                                        mdtype,
362                                        gc_count,
363                                        full_gc_count,
364                                        GCCause::_metadata_GC_threshold);
365     VMThread::execute(&op);
366 
367     // If GC was locked out, try again. Check before checking success because the
368     // prologue could have succeeded and the GC still have been locked out.
369     if (op.gc_locked()) {
370       continue;
371     }
372 
373     if (op.prologue_succeeded()) {
374       return op.result();
375     }
376     loop_count++;
377     if ((QueuedAllocationWarningCount > 0) &&
378         (loop_count % QueuedAllocationWarningCount == 0)) {
379       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
380                             " size=" SIZE_FORMAT, loop_count, word_size);
381     }
382   } while (true);  // Until a GC is done
383 }
384 
385 MemoryUsage CollectedHeap::memory_usage() {
386   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
387 }
388 
389 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
390   if (UsePerfData) {
391     _gc_lastcause = _gc_cause;
392     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
393     _perf_gc_cause->set_value(GCCause::to_string(v));
394   }
395   _gc_cause = v;
396 }
397 
398 #ifndef PRODUCT
399 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
400   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
401     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
402     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
403       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
404     }
405   }
406 }
407 #endif // PRODUCT
408 
409 size_t CollectedHeap::max_tlab_size() const {
410   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
411   // This restriction could be removed by enabling filling with multiple arrays.
412   // If we compute that the reasonable way as
413   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
414   // we'll overflow on the multiply, so we do the divide first.
415   // We actually lose a little by dividing first,
416   // but that just makes the TLAB  somewhat smaller than the biggest array,
417   // which is fine, since we'll be able to fill that.
418   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
419               sizeof(jint) *
420               ((juint) max_jint / (size_t) HeapWordSize);
421   return align_down(max_int_size, MinObjAlignment);
422 }
423 
424 size_t CollectedHeap::filler_array_hdr_size() {
425   return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long
426 }
427 
428 size_t CollectedHeap::filler_array_min_size() {
429   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
430 }
431 
432 void CollectedHeap::zap_filler_array_with(HeapWord* start, size_t words, juint value) {
433   Copy::fill_to_words(start + filler_array_hdr_size(),
434                       words - filler_array_hdr_size(), value);
435 }
436 
437 #ifdef ASSERT
438 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
439 {
440   assert(words >= min_fill_size(), "too small to fill");
441   assert(is_object_aligned(words), "unaligned size");
442 }
443 
444 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
445 {
446   if (ZapFillerObjects && zap) {
447     zap_filler_array_with(start, words, 0XDEAFBABE);
448   }
449 }
450 #endif // ASSERT
451 
452 void
453 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
454 {
455   assert(words >= filler_array_min_size(), "too small for an array");
456   assert(words <= filler_array_max_size(), "too big for a single object");
457 
458   const size_t payload_size = words - filler_array_hdr_size();
459   const size_t len = payload_size * HeapWordSize / sizeof(jint);
460   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
461 
462   ObjArrayAllocator allocator(Universe::fillerArrayKlassObj(), words, (int)len, /* do_zero */ false);
463   allocator.initialize(start);
464   if (DumpSharedSpaces) {
465     // This array is written into the CDS archive. Make sure it
466     // has deterministic contents.
467     zap_filler_array_with(start, words, 0);
468   } else {
469     DEBUG_ONLY(zap_filler_array(start, words, zap);)
470   }
471 }
472 
473 void
474 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
475 {
476   assert(words <= filler_array_max_size(), "too big for a single object");
477 
478   if (words >= filler_array_min_size()) {
479     fill_with_array(start, words, zap);
480   } else if (words > 0) {
481     assert(words == min_fill_size(), "unaligned size");
482     ObjAllocator allocator(CollectedHeap::filler_object_klass(), words);
483     allocator.initialize(start);
484   }
485 }
486 
487 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
488 {
489   DEBUG_ONLY(fill_args_check(start, words);)
490   HandleMark hm(Thread::current());  // Free handles before leaving.
491   fill_with_object_impl(start, words, zap);
492 }
493 
494 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
495 {
496   DEBUG_ONLY(fill_args_check(start, words);)
497   HandleMark hm(Thread::current());  // Free handles before leaving.
498 
499   // Multiple objects may be required depending on the filler array maximum size. Fill
500   // the range up to that with objects that are filler_array_max_size sized. The
501   // remainder is filled with a single object.
502   const size_t min = min_fill_size();
503   const size_t max = filler_array_max_size();
504   while (words > max) {
505     const size_t cur = (words - max) >= min ? max : max - min;
506     fill_with_array(start, cur, zap);
507     start += cur;
508     words -= cur;
509   }
510 
511   fill_with_object_impl(start, words, zap);
512 }
513 
514 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
515   CollectedHeap::fill_with_object(start, end, zap);
516 }
517 
518 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
519                                            size_t requested_size,
520                                            size_t* actual_size) {
521   guarantee(false, "thread-local allocation buffers not supported");
522   return NULL;
523 }
524 
525 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
526   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
527          "Should only be called at a safepoint or at start-up");
528 
529   ThreadLocalAllocStats stats;
530 
531   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
532     BarrierSet::barrier_set()->make_parsable(thread);
533     if (UseTLAB) {
534       if (retire_tlabs) {
535         thread->tlab().retire(&stats);
536       } else {
537         thread->tlab().make_parsable();
538       }
539     }
540   }
541 
542   stats.publish();
543 }
544 
545 void CollectedHeap::resize_all_tlabs() {
546   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
547          "Should only resize tlabs at safepoint");
548 
549   if (UseTLAB && ResizeTLAB) {
550     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
551       thread->tlab().resize();
552     }
553   }
554 }
555 
556 jlong CollectedHeap::millis_since_last_whole_heap_examined() {
557   return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC;
558 }
559 
560 void CollectedHeap::record_whole_heap_examined_timestamp() {
561   _last_whole_heap_examined_time_ns = os::javaTimeNanos();
562 }
563 
564 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
565   assert(timer != NULL, "timer is null");
566   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
567     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
568     HeapDumper::dump_heap();
569   }
570 
571   LogTarget(Trace, gc, classhisto) lt;
572   if (lt.is_enabled()) {
573     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
574     ResourceMark rm;
575     LogStream ls(lt);
576     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
577     inspector.doit();
578   }
579 }
580 
581 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
582   full_gc_dump(timer, true);
583 }
584 
585 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
586   full_gc_dump(timer, false);
587 }
588 
589 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
590   // It is important to do this in a way such that concurrent readers can't
591   // temporarily think something is in the heap.  (Seen this happen in asserts.)
592   _reserved.set_word_size(0);
593   _reserved.set_start((HeapWord*)rs.base());
594   _reserved.set_end((HeapWord*)rs.end());
595 }
596 
597 void CollectedHeap::post_initialize() {
598   StringDedup::initialize();
599   initialize_serviceability();
600 }
601 
602 #ifndef PRODUCT
603 
604 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
605   // Access to count is not atomic; the value does not have to be exact.
606   if (PromotionFailureALot) {
607     const size_t gc_num = total_collections();
608     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
609     if (elapsed_gcs >= PromotionFailureALotInterval) {
610       // Test for unsigned arithmetic wrap-around.
611       if (++*count >= PromotionFailureALotCount) {
612         *count = 0;
613         return true;
614       }
615     }
616   }
617   return false;
618 }
619 
620 bool CollectedHeap::promotion_should_fail() {
621   return promotion_should_fail(&_promotion_failure_alot_count);
622 }
623 
624 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
625   if (PromotionFailureALot) {
626     _promotion_failure_alot_gc_number = total_collections();
627     *count = 0;
628   }
629 }
630 
631 void CollectedHeap::reset_promotion_should_fail() {
632   reset_promotion_should_fail(&_promotion_failure_alot_count);
633 }
634 
635 #endif  // #ifndef PRODUCT
636 
637 bool CollectedHeap::supports_object_pinning() const {
638   return false;
639 }
640 
641 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
642   ShouldNotReachHere();
643   return NULL;
644 }
645 
646 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
647   ShouldNotReachHere();
648 }
649 
650 bool CollectedHeap::is_archived_object(oop object) const {
651   return false;
652 }
653 
654 uint32_t CollectedHeap::hash_oop(oop obj) const {
655   const uintptr_t addr = cast_from_oop<uintptr_t>(obj);
656   return static_cast<uint32_t>(addr >> LogMinObjAlignment);
657 }
658 
659 // It's the caller's responsibility to ensure glitch-freedom
660 // (if required).
661 void CollectedHeap::update_capacity_and_used_at_gc() {
662   _capacity_at_last_gc = capacity();
663   _used_at_last_gc     = used();
664 }