1 /*
  2  * Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "classfile/classLoaderData.hpp"
 27 #include "classfile/vmClasses.hpp"
 28 #include "gc/shared/allocTracer.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/collectedHeap.inline.hpp"
 32 #include "gc/shared/gcLocker.inline.hpp"
 33 #include "gc/shared/gcHeapSummary.hpp"
 34 #include "gc/shared/stringdedup/stringDedup.hpp"
 35 #include "gc/shared/gcTrace.hpp"
 36 #include "gc/shared/gcTraceTime.inline.hpp"
 37 #include "gc/shared/gcVMOperations.hpp"
 38 #include "gc/shared/gcWhen.hpp"
 39 #include "gc/shared/gc_globals.hpp"
 40 #include "gc/shared/memAllocator.hpp"
 41 #include "gc/shared/tlab_globals.hpp"
 42 #include "logging/log.hpp"
 43 #include "logging/logStream.hpp"
 44 #include "memory/classLoaderMetaspace.hpp"
 45 #include "memory/metaspaceUtils.hpp"
 46 #include "memory/resourceArea.hpp"
 47 #include "memory/universe.hpp"
 48 #include "oops/instanceMirrorKlass.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/handles.inline.hpp"
 51 #include "runtime/init.hpp"
 52 #include "runtime/perfData.hpp"
 53 #include "runtime/thread.inline.hpp"
 54 #include "runtime/threadSMR.hpp"
 55 #include "runtime/vmThread.hpp"
 56 #include "services/heapDumper.hpp"
 57 #include "utilities/align.hpp"
 58 #include "utilities/copy.hpp"
 59 #include "utilities/events.hpp"
 60 
 61 class ClassLoaderData;
 62 
 63 size_t CollectedHeap::_filler_array_max_size = 0;
 64 
 65 class GCMessage : public FormatBuffer<1024> {
 66  public:
 67   bool is_before;
 68 };
 69 
 70 template <>
 71 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
 72   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
 73   st->print_raw(m);
 74 }
 75 
 76 class GCHeapLog : public EventLogBase<GCMessage> {
 77  private:
 78   void log_heap(CollectedHeap* heap, bool before);
 79 
 80  public:
 81   GCHeapLog() : EventLogBase<GCMessage>("GC Heap History", "gc") {}
 82 
 83   void log_heap_before(CollectedHeap* heap) {
 84     log_heap(heap, true);
 85   }
 86   void log_heap_after(CollectedHeap* heap) {
 87     log_heap(heap, false);
 88   }
 89 };
 90 
 91 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
 92   if (!should_log()) {
 93     return;
 94   }
 95 
 96   double timestamp = fetch_timestamp();
 97   MutexLocker ml(&_mutex, Mutex::_no_safepoint_check_flag);
 98   int index = compute_log_index();
 99   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
100   _records[index].timestamp = timestamp;
101   _records[index].data.is_before = before;
102   stringStream st(_records[index].data.buffer(), _records[index].data.size());
103 
104   st.print_cr("{Heap %s GC invocations=%u (full %u):",
105                  before ? "before" : "after",
106                  heap->total_collections(),
107                  heap->total_full_collections());
108 
109   heap->print_on(&st);
110   st.print_cr("}");
111 }
112 
113 size_t CollectedHeap::unused() const {
114   MutexLocker ml(Heap_lock);
115   return capacity() - used();
116 }
117 
118 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
119   size_t capacity_in_words = capacity() / HeapWordSize;
120 
121   return VirtualSpaceSummary(
122     _reserved.start(), _reserved.start() + capacity_in_words, _reserved.end());
123 }
124 
125 GCHeapSummary CollectedHeap::create_heap_summary() {
126   VirtualSpaceSummary heap_space = create_heap_space_summary();
127   return GCHeapSummary(heap_space, used());
128 }
129 
130 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
131   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
132     MetaspaceUtils::chunk_free_list_summary(Metaspace::NonClassType);
133   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
134     MetaspaceUtils::chunk_free_list_summary(Metaspace::ClassType);
135   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(),
136                           MetaspaceUtils::get_combined_statistics(),
137                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
138 }
139 
140 void CollectedHeap::print_heap_before_gc() {
141   LogTarget(Debug, gc, heap) lt;
142   if (lt.is_enabled()) {
143     LogStream ls(lt);
144     ls.print_cr("Heap before GC invocations=%u (full %u):", total_collections(), total_full_collections());
145     ResourceMark rm;
146     print_on(&ls);
147   }
148 
149   if (_gc_heap_log != NULL) {
150     _gc_heap_log->log_heap_before(this);
151   }
152 }
153 
154 void CollectedHeap::print_heap_after_gc() {
155   LogTarget(Debug, gc, heap) lt;
156   if (lt.is_enabled()) {
157     LogStream ls(lt);
158     ls.print_cr("Heap after GC invocations=%u (full %u):", total_collections(), total_full_collections());
159     ResourceMark rm;
160     print_on(&ls);
161   }
162 
163   if (_gc_heap_log != NULL) {
164     _gc_heap_log->log_heap_after(this);
165   }
166 }
167 
168 void CollectedHeap::print() const { print_on(tty); }
169 
170 void CollectedHeap::print_on_error(outputStream* st) const {
171   st->print_cr("Heap:");
172   print_extended_on(st);
173   st->cr();
174 
175   BarrierSet* bs = BarrierSet::barrier_set();
176   if (bs != NULL) {
177     bs->print_on(st);
178   }
179 }
180 
181 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
182   const GCHeapSummary& heap_summary = create_heap_summary();
183   gc_tracer->report_gc_heap_summary(when, heap_summary);
184 
185   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
186   gc_tracer->report_metaspace_summary(when, metaspace_summary);
187 }
188 
189 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
190   trace_heap(GCWhen::BeforeGC, gc_tracer);
191 }
192 
193 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
194   trace_heap(GCWhen::AfterGC, gc_tracer);
195 }
196 
197 // Default implementation, for collectors that don't support the feature.
198 bool CollectedHeap::supports_concurrent_gc_breakpoints() const {
199   return false;
200 }
201 
202 bool CollectedHeap::is_oop(oop object) const {
203   if (!is_object_aligned(object)) {
204     return false;
205   }
206 
207   if (!is_in(object)) {
208     return false;
209   }
210 
211   return true;
212 }
213 
214 // Memory state functions.
215 
216 
217 CollectedHeap::CollectedHeap() :
218   _capacity_at_last_gc(0),
219   _used_at_last_gc(0),
220   _is_gc_active(false),
221   _last_whole_heap_examined_time_ns(os::javaTimeNanos()),
222   _total_collections(0),
223   _total_full_collections(0),
224   _gc_cause(GCCause::_no_gc),
225   _gc_lastcause(GCCause::_no_gc)
226 {
227   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
228   const size_t elements_per_word = HeapWordSize / sizeof(jint);
229   int header_size_in_bytes = arrayOopDesc::base_offset_in_bytes(T_INT);
230   assert(header_size_in_bytes % sizeof(jint) == 0, "must be aligned to int");
231   int header_size_in_ints = header_size_in_bytes / sizeof(jint);
232   _filler_array_max_size = align_object_size((header_size_in_ints + max_len) / elements_per_word);
233 
234   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
235   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
236 
237   if (UsePerfData) {
238     EXCEPTION_MARK;
239 
240     // create the gc cause jvmstat counters
241     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
242                              80, GCCause::to_string(_gc_cause), CHECK);
243 
244     _perf_gc_lastcause =
245                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
246                              80, GCCause::to_string(_gc_lastcause), CHECK);
247   }
248 
249   // Create the ring log
250   if (LogEvents) {
251     _gc_heap_log = new GCHeapLog();
252   } else {
253     _gc_heap_log = NULL;
254   }
255 }
256 
257 // This interface assumes that it's being called by the
258 // vm thread. It collects the heap assuming that the
259 // heap lock is already held and that we are executing in
260 // the context of the vm thread.
261 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
262   Thread* thread = Thread::current();
263   assert(thread->is_VM_thread(), "Precondition#1");
264   assert(Heap_lock->is_locked(), "Precondition#2");
265   GCCauseSetter gcs(this, cause);
266   switch (cause) {
267     case GCCause::_heap_inspection:
268     case GCCause::_heap_dump:
269     case GCCause::_metadata_GC_threshold : {
270       HandleMark hm(thread);
271       do_full_collection(false);        // don't clear all soft refs
272       break;
273     }
274     case GCCause::_archive_time_gc:
275     case GCCause::_metadata_GC_clear_soft_refs: {
276       HandleMark hm(thread);
277       do_full_collection(true);         // do clear all soft refs
278       break;
279     }
280     default:
281       ShouldNotReachHere(); // Unexpected use of this function
282   }
283 }
284 
285 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
286                                                             size_t word_size,
287                                                             Metaspace::MetadataType mdtype) {
288   uint loop_count = 0;
289   uint gc_count = 0;
290   uint full_gc_count = 0;
291 
292   assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
293 
294   do {
295     MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
296     if (result != NULL) {
297       return result;
298     }
299 
300     if (GCLocker::is_active_and_needs_gc()) {
301       // If the GCLocker is active, just expand and allocate.
302       // If that does not succeed, wait if this thread is not
303       // in a critical section itself.
304       result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
305       if (result != NULL) {
306         return result;
307       }
308       JavaThread* jthr = JavaThread::current();
309       if (!jthr->in_critical()) {
310         // Wait for JNI critical section to be exited
311         GCLocker::stall_until_clear();
312         // The GC invoked by the last thread leaving the critical
313         // section will be a young collection and a full collection
314         // is (currently) needed for unloading classes so continue
315         // to the next iteration to get a full GC.
316         continue;
317       } else {
318         if (CheckJNICalls) {
319           fatal("Possible deadlock due to allocating while"
320                 " in jni critical section");
321         }
322         return NULL;
323       }
324     }
325 
326     {  // Need lock to get self consistent gc_count's
327       MutexLocker ml(Heap_lock);
328       gc_count      = Universe::heap()->total_collections();
329       full_gc_count = Universe::heap()->total_full_collections();
330     }
331 
332     // Generate a VM operation
333     VM_CollectForMetadataAllocation op(loader_data,
334                                        word_size,
335                                        mdtype,
336                                        gc_count,
337                                        full_gc_count,
338                                        GCCause::_metadata_GC_threshold);
339     VMThread::execute(&op);
340 
341     // If GC was locked out, try again. Check before checking success because the
342     // prologue could have succeeded and the GC still have been locked out.
343     if (op.gc_locked()) {
344       continue;
345     }
346 
347     if (op.prologue_succeeded()) {
348       return op.result();
349     }
350     loop_count++;
351     if ((QueuedAllocationWarningCount > 0) &&
352         (loop_count % QueuedAllocationWarningCount == 0)) {
353       log_warning(gc, ergo)("satisfy_failed_metadata_allocation() retries %d times,"
354                             " size=" SIZE_FORMAT, loop_count, word_size);
355     }
356   } while (true);  // Until a GC is done
357 }
358 
359 MemoryUsage CollectedHeap::memory_usage() {
360   return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
361 }
362 
363 void CollectedHeap::set_gc_cause(GCCause::Cause v) {
364   if (UsePerfData) {
365     _gc_lastcause = _gc_cause;
366     _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause));
367     _perf_gc_cause->set_value(GCCause::to_string(v));
368   }
369   _gc_cause = v;
370 }
371 
372 #ifndef PRODUCT
373 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
374   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
375     // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
376     for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
377       assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
378     }
379   }
380 }
381 #endif // PRODUCT
382 
383 size_t CollectedHeap::max_tlab_size() const {
384   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
385   // This restriction could be removed by enabling filling with multiple arrays.
386   // If we compute that the reasonable way as
387   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
388   // we'll overflow on the multiply, so we do the divide first.
389   // We actually lose a little by dividing first,
390   // but that just makes the TLAB  somewhat smaller than the biggest array,
391   // which is fine, since we'll be able to fill that.
392   int header_size_in_bytes = typeArrayOopDesc::base_offset_in_bytes(T_INT);
393   assert(header_size_in_bytes % sizeof(jint) == 0, "header size must align to int");
394   size_t max_int_size = header_size_in_bytes / HeapWordSize +
395               sizeof(jint) *
396               ((juint) max_jint / (size_t) HeapWordSize);
397   return align_down(max_int_size, MinObjAlignment);
398 }
399 
400 size_t CollectedHeap::filler_array_min_size() {
401   int aligned_header_size_words = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
402   return align_object_size(aligned_header_size_words); // align to MinObjAlignment
403 }
404 
405 #ifdef ASSERT
406 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
407 {
408   assert(words >= min_fill_size(), "too small to fill");
409   assert(is_object_aligned(words), "unaligned size");
410 }
411 
412 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
413 {
414   if (ZapFillerObjects && zap) {
415   int payload_start = align_up(arrayOopDesc::base_offset_in_bytes(T_INT), HeapWordSize) / HeapWordSize;
416   Copy::fill_to_words(start + payload_start,
417                       words - payload_start, 0XDEAFBABE);
418   }
419 }
420 #endif // ASSERT
421 
422 void
423 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
424 {
425   assert(words >= filler_array_min_size(), "too small for an array");
426   assert(words <= filler_array_max_size(), "too big for a single object");
427 
428   const size_t payload_size_bytes = words * HeapWordSize - arrayOopDesc::base_offset_in_bytes(T_INT);
429   assert(payload_size_bytes % sizeof(jint) == 0, "must be int aligned");
430   const size_t len = payload_size_bytes / sizeof(jint);
431   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
432 
433   ObjArrayAllocator allocator(Universe::intArrayKlassObj(), words, (int)len, /* do_zero */ false);
434   allocator.initialize(start);
435   DEBUG_ONLY(zap_filler_array(start, words, zap);)
436 }
437 
438 void
439 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
440 {
441   assert(words <= filler_array_max_size(), "too big for a single object");
442 
443   if (words >= filler_array_min_size()) {
444     fill_with_array(start, words, zap);
445   } else if (words > 0) {
446     assert(words == min_fill_size(), "unaligned size");
447     ObjAllocator allocator(vmClasses::Object_klass(), words);
448     allocator.initialize(start);
449   }
450 }
451 
452 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
453 {
454   DEBUG_ONLY(fill_args_check(start, words);)
455   HandleMark hm(Thread::current());  // Free handles before leaving.
456   fill_with_object_impl(start, words, zap);
457 }
458 
459 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
460 {
461   DEBUG_ONLY(fill_args_check(start, words);)
462   HandleMark hm(Thread::current());  // Free handles before leaving.
463 
464   // Multiple objects may be required depending on the filler array maximum size. Fill
465   // the range up to that with objects that are filler_array_max_size sized. The
466   // remainder is filled with a single object.
467   const size_t min = min_fill_size();
468   const size_t max = filler_array_max_size();
469   while (words > max) {
470     const size_t cur = (words - max) >= min ? max : max - min;
471     fill_with_array(start, cur, zap);
472     start += cur;
473     words -= cur;
474   }
475 
476   fill_with_object_impl(start, words, zap);
477 }
478 
479 void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
480   CollectedHeap::fill_with_object(start, end, zap);
481 }
482 
483 size_t CollectedHeap::min_dummy_object_size() const {
484   return oopDesc::header_size();
485 }
486 
487 size_t CollectedHeap::tlab_alloc_reserve() const {
488   size_t min_size = min_dummy_object_size();
489   return min_size > (size_t)MinObjAlignment ? align_object_size(min_size) : 0;
490 }
491 
492 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
493                                            size_t requested_size,
494                                            size_t* actual_size) {
495   guarantee(false, "thread-local allocation buffers not supported");
496   return NULL;
497 }
498 
499 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
500   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
501          "Should only be called at a safepoint or at start-up");
502 
503   ThreadLocalAllocStats stats;
504 
505   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next();) {
506     BarrierSet::barrier_set()->make_parsable(thread);
507     if (UseTLAB) {
508       if (retire_tlabs) {
509         thread->tlab().retire(&stats);
510       } else {
511         thread->tlab().make_parsable();
512       }
513     }
514   }
515 
516   stats.publish();
517 }
518 
519 void CollectedHeap::resize_all_tlabs() {
520   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
521          "Should only resize tlabs at safepoint");
522 
523   if (UseTLAB && ResizeTLAB) {
524     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
525       thread->tlab().resize();
526     }
527   }
528 }
529 
530 jlong CollectedHeap::millis_since_last_whole_heap_examined() {
531   return (os::javaTimeNanos() - _last_whole_heap_examined_time_ns) / NANOSECS_PER_MILLISEC;
532 }
533 
534 void CollectedHeap::record_whole_heap_examined_timestamp() {
535   _last_whole_heap_examined_time_ns = os::javaTimeNanos();
536 }
537 
538 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
539   assert(timer != NULL, "timer is null");
540   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
541     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
542     HeapDumper::dump_heap();
543   }
544 
545   LogTarget(Trace, gc, classhisto) lt;
546   if (lt.is_enabled()) {
547     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
548     ResourceMark rm;
549     LogStream ls(lt);
550     VM_GC_HeapInspection inspector(&ls, false /* ! full gc */);
551     inspector.doit();
552   }
553 }
554 
555 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
556   full_gc_dump(timer, true);
557 }
558 
559 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
560   full_gc_dump(timer, false);
561 }
562 
563 void CollectedHeap::initialize_reserved_region(const ReservedHeapSpace& rs) {
564   // It is important to do this in a way such that concurrent readers can't
565   // temporarily think something is in the heap.  (Seen this happen in asserts.)
566   _reserved.set_word_size(0);
567   _reserved.set_start((HeapWord*)rs.base());
568   _reserved.set_end((HeapWord*)rs.end());
569 }
570 
571 void CollectedHeap::post_initialize() {
572   StringDedup::initialize();
573   initialize_serviceability();
574 }
575 
576 #ifndef PRODUCT
577 
578 bool CollectedHeap::promotion_should_fail(volatile size_t* count) {
579   // Access to count is not atomic; the value does not have to be exact.
580   if (PromotionFailureALot) {
581     const size_t gc_num = total_collections();
582     const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
583     if (elapsed_gcs >= PromotionFailureALotInterval) {
584       // Test for unsigned arithmetic wrap-around.
585       if (++*count >= PromotionFailureALotCount) {
586         *count = 0;
587         return true;
588       }
589     }
590   }
591   return false;
592 }
593 
594 bool CollectedHeap::promotion_should_fail() {
595   return promotion_should_fail(&_promotion_failure_alot_count);
596 }
597 
598 void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
599   if (PromotionFailureALot) {
600     _promotion_failure_alot_gc_number = total_collections();
601     *count = 0;
602   }
603 }
604 
605 void CollectedHeap::reset_promotion_should_fail() {
606   reset_promotion_should_fail(&_promotion_failure_alot_count);
607 }
608 
609 #endif  // #ifndef PRODUCT
610 
611 bool CollectedHeap::supports_object_pinning() const {
612   return false;
613 }
614 
615 oop CollectedHeap::pin_object(JavaThread* thread, oop obj) {
616   ShouldNotReachHere();
617   return NULL;
618 }
619 
620 void CollectedHeap::unpin_object(JavaThread* thread, oop obj) {
621   ShouldNotReachHere();
622 }
623 
624 bool CollectedHeap::is_archived_object(oop object) const {
625   return false;
626 }
627 
628 uint32_t CollectedHeap::hash_oop(oop obj) const {
629   const uintptr_t addr = cast_from_oop<uintptr_t>(obj);
630   return static_cast<uint32_t>(addr >> LogMinObjAlignment);
631 }
632 
633 // It's the caller's responsibility to ensure glitch-freedom
634 // (if required).
635 void CollectedHeap::update_capacity_and_used_at_gc() {
636   _capacity_at_last_gc = capacity();
637   _used_at_last_gc     = used();
638 }