1 /*
  2  * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CDS_HEAPSHARED_HPP
 26 #define SHARE_CDS_HEAPSHARED_HPP
 27 
 28 #include "cds/aotMetaspace.hpp"
 29 #include "cds/dumpTimeClassInfo.hpp"
 30 #include "classfile/compactHashtable.hpp"
 31 #include "classfile/javaClasses.hpp"
 32 #include "gc/shared/gc_globals.hpp"
 33 #include "memory/allocation.hpp"
 34 #include "memory/allStatic.hpp"
 35 #include "oops/compressedOops.hpp"
 36 #include "oops/oop.hpp"
 37 #include "oops/oopHandle.hpp"
 38 #include "oops/oopsHierarchy.hpp"
 39 #include "utilities/growableArray.hpp"
 40 #include "utilities/hashTable.hpp"
 41 
 42 #if INCLUDE_CDS_JAVA_HEAP
 43 class FileMapInfo;
 44 class KlassSubGraphInfo;
 45 class MetaspaceObjToOopHandleTable;
 46 class ResourceBitMap;
 47 
 48 struct ArchivableStaticFieldInfo;
 49 
 50 #define ARCHIVED_BOOT_LAYER_CLASS "jdk/internal/module/ArchivedBootLayer"
 51 #define ARCHIVED_BOOT_LAYER_FIELD "archivedBootLayer"
 52 
 53 // A dump time sub-graph info for Klass _k. It includes the entry points
 54 // (static fields in _k's mirror) of the archived sub-graphs reachable
 55 // from _k's mirror. It also contains a list of Klasses of the objects
 56 // within the sub-graphs.
 57 class KlassSubGraphInfo: public CHeapObj<mtClass> {
 58  private:
 59   // The class that contains the static field(s) as the entry point(s)
 60   // of archived object sub-graph(s).
 61   Klass* _k;
 62   // A list of classes need to be loaded and initialized before the archived
 63   // object sub-graphs can be accessed at runtime.
 64   GrowableArray<Klass*>* _subgraph_object_klasses;
 65   // A list of _k's static fields as the entry points of archived sub-graphs.
 66   // For each entry field, it is a tuple of field_offset, field_value
 67   GrowableArray<int>* _subgraph_entry_fields;
 68 
 69   // Does this KlassSubGraphInfo belong to the archived full module graph
 70   bool _is_full_module_graph;
 71 
 72   // Does this KlassSubGraphInfo references any classes that were loaded while
 73   // JvmtiExport::is_early_phase()!=true. If so, this KlassSubGraphInfo cannot be
 74   // used at runtime if JVMTI ClassFileLoadHook is enabled.
 75   bool _has_non_early_klasses;
 76   static bool is_non_early_klass(Klass* k);
 77   static void check_allowed_klass(InstanceKlass* ik);
 78  public:
 79   KlassSubGraphInfo(Klass* k, bool is_full_module_graph) :
 80     _k(k),  _subgraph_object_klasses(nullptr),
 81     _subgraph_entry_fields(nullptr),
 82     _is_full_module_graph(is_full_module_graph),
 83     _has_non_early_klasses(false) {}
 84 
 85   ~KlassSubGraphInfo() {
 86     if (_subgraph_object_klasses != nullptr) {
 87       delete _subgraph_object_klasses;
 88     }
 89     if (_subgraph_entry_fields != nullptr) {
 90       delete _subgraph_entry_fields;
 91     }
 92   };
 93 
 94   Klass* klass()            { return _k; }
 95   GrowableArray<Klass*>* subgraph_object_klasses() {
 96     return _subgraph_object_klasses;
 97   }
 98   GrowableArray<int>* subgraph_entry_fields() {
 99     return _subgraph_entry_fields;
100   }
101   void add_subgraph_entry_field(int static_field_offset, oop v);
102   void add_subgraph_object_klass(Klass *orig_k);
103   int num_subgraph_object_klasses() {
104     return _subgraph_object_klasses == nullptr ? 0 :
105            _subgraph_object_klasses->length();
106   }
107   bool is_full_module_graph() const { return _is_full_module_graph; }
108   bool has_non_early_klasses() const { return _has_non_early_klasses; }
109 };
110 
111 // An archived record of object sub-graphs reachable from static
112 // fields within _k's mirror. The record is reloaded from the archive
113 // at runtime.
114 class ArchivedKlassSubGraphInfoRecord {
115  private:
116   Klass* _k;
117   bool _is_full_module_graph;
118   bool _has_non_early_klasses;
119 
120   // contains pairs of field offset and value for each subgraph entry field
121   Array<int>* _entry_field_records;
122 
123   // klasses of objects in archived sub-graphs referenced from the entry points
124   // (static fields) in the containing class
125   Array<Klass*>* _subgraph_object_klasses;
126  public:
127   ArchivedKlassSubGraphInfoRecord() :
128     _k(nullptr), _entry_field_records(nullptr), _subgraph_object_klasses(nullptr) {}
129   void init(KlassSubGraphInfo* info);
130   Klass* klass() const { return _k; }
131   Array<int>* entry_field_records() const { return _entry_field_records; }
132   Array<Klass*>* subgraph_object_klasses() const { return _subgraph_object_klasses; }
133   bool is_full_module_graph() const { return _is_full_module_graph; }
134   bool has_non_early_klasses() const { return _has_non_early_klasses; }
135 };
136 #endif // INCLUDE_CDS_JAVA_HEAP
137 
138 enum class HeapArchiveMode {
139   _uninitialized,
140   _mapping,
141   _streaming
142 };
143 
144 class HeapShared: AllStatic {
145   friend class VerifySharedOopClosure;
146 
147 public:
148   static void initialize_loading_mode(HeapArchiveMode mode) NOT_CDS_JAVA_HEAP_RETURN;
149   static void initialize_writing_mode() NOT_CDS_JAVA_HEAP_RETURN;
150 
151   inline static bool is_loading() NOT_CDS_JAVA_HEAP_RETURN_(false);
152 
153   inline static bool is_loading_streaming_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
154   inline static bool is_loading_mapping_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
155 
156   inline static bool is_writing() NOT_CDS_JAVA_HEAP_RETURN_(false);
157 
158   inline static bool is_writing_streaming_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
159   inline static bool is_writing_mapping_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
160 
161   static bool is_subgraph_root_class(InstanceKlass* ik);
162 
163   // Scratch objects for archiving Klass::java_mirror()
164   static oop scratch_java_mirror(BasicType t)     NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
165   static oop scratch_java_mirror(Klass* k)        NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
166   static oop scratch_java_mirror(oop java_mirror) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
167   static bool is_archived_boot_layer_available(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN_(false);
168 
169   static bool is_archived_heap_in_use() NOT_CDS_JAVA_HEAP_RETURN_(false);
170   static bool can_use_archived_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
171   static bool is_too_large_to_archive(size_t size);
172   static bool is_string_too_large_to_archive(oop string);
173   static bool is_too_large_to_archive(oop obj);
174 
175   static void initialize_streaming() NOT_CDS_JAVA_HEAP_RETURN;
176   static void enable_gc() NOT_CDS_JAVA_HEAP_RETURN;
177   static void materialize_thread_object() NOT_CDS_JAVA_HEAP_RETURN;
178   static void archive_interned_string(oop string);
179   static void finalize_initialization(FileMapInfo* static_mapinfo) NOT_CDS_JAVA_HEAP_RETURN;
180 
181 private:
182 #if INCLUDE_CDS_JAVA_HEAP
183   static HeapArchiveMode _heap_load_mode;
184   static HeapArchiveMode _heap_write_mode;
185 
186   // statistics
187   constexpr static int ALLOC_STAT_SLOTS = 16;
188   static size_t _alloc_count[ALLOC_STAT_SLOTS];
189   static size_t _alloc_size[ALLOC_STAT_SLOTS];
190   static size_t _total_obj_count;
191   static size_t _total_obj_size; // in HeapWords
192 
193   static void count_allocation(size_t size);
194   static void print_stats();
195 public:
196   static void debug_trace();
197   static unsigned oop_hash(oop const& p);
198   static bool oop_handle_equals(const OopHandle& a, const OopHandle& b);
199 
200   class CopyKlassSubGraphInfoToArchive;
201 
202   class CachedOopInfo {
203     // Used by CDSHeapVerifier.
204     OopHandle _orig_referrer;
205 
206     // The location of this object inside {AOTMappedHeapWriter, AOTStreamedHeapWriter}::_buffer
207     size_t _buffer_offset;
208 
209     // One or more fields in this object are pointing to non-null oops.
210     bool _has_oop_pointers;
211 
212     // One or more fields in this object are pointing to MetaspaceObj
213     bool _has_native_pointers;
214 
215     // >= 0 if this oop has been append to the list of roots
216     int _root_index;
217   public:
218     CachedOopInfo(OopHandle orig_referrer, bool has_oop_pointers)
219       : _orig_referrer(orig_referrer),
220         _buffer_offset(0),
221         _has_oop_pointers(has_oop_pointers),
222         _has_native_pointers(false),
223         _root_index(-1) {}
224     oop orig_referrer() const;
225     void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
226     size_t buffer_offset()          const { return _buffer_offset;   }
227     bool has_oop_pointers()         const { return _has_oop_pointers; }
228     bool has_native_pointers()      const { return _has_native_pointers; }
229     void set_has_native_pointers()        { _has_native_pointers = true; }
230     int  root_index()               const { return _root_index; }
231     void set_root_index(int i)            { _root_index = i; }
232   };
233 
234 private:
235   static const int INITIAL_TABLE_SIZE = 15889; // prime number
236   static const int MAX_TABLE_SIZE     = 1000000;
237   static bool _use_identity_hash_for_archived_object_cache;
238 
239   static unsigned archived_object_cache_hash(OopHandle const& oh);
240 
241   typedef ResizeableHashTable<OopHandle, CachedOopInfo,
242       AnyObj::C_HEAP,
243       mtClassShared,
244       HeapShared::archived_object_cache_hash,
245       HeapShared::oop_handle_equals> ArchivedObjectCache;
246   static ArchivedObjectCache* _archived_object_cache;
247 
248   class DumpTimeKlassSubGraphInfoTable
249     : public HashTable<Klass*, KlassSubGraphInfo,
250                                137, // prime number
251                                AnyObj::C_HEAP,
252                                mtClassShared,
253                                DumpTimeSharedClassTable_hash> {};
254 
255 public: // solaris compiler wants this for RunTimeKlassSubGraphInfoTable
256   inline static bool record_equals_compact_hashtable_entry(
257        const ArchivedKlassSubGraphInfoRecord* value, const Klass* key, int len_unused) {
258     return (value->klass() == key);
259   }
260 
261 private:
262   typedef OffsetCompactHashtable<
263     const Klass*,
264     const ArchivedKlassSubGraphInfoRecord*,
265     record_equals_compact_hashtable_entry
266     > RunTimeKlassSubGraphInfoTable;
267 
268   static DumpTimeKlassSubGraphInfoTable* _dump_time_subgraph_info_table;
269   static RunTimeKlassSubGraphInfoTable _run_time_subgraph_info_table;
270 
271   static CachedOopInfo make_cached_oop_info(oop obj, oop referrer);
272   static ArchivedKlassSubGraphInfoRecord* archive_subgraph_info(KlassSubGraphInfo* info);
273   static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
274                                        bool is_full_module_graph);
275 
276   // Archive object sub-graph starting from the given static field
277   // in Klass k's mirror.
278   static void archive_reachable_objects_from_static_field(
279     InstanceKlass* k, const char* klass_name,
280     int field_offset, const char* field_name);
281 
282   static void verify_subgraph_from_static_field(
283     InstanceKlass* k, int field_offset) PRODUCT_RETURN;
284   static void verify_reachable_objects_from(oop obj) PRODUCT_RETURN;
285   static void verify_subgraph_from(oop orig_obj) PRODUCT_RETURN;
286   static void check_special_subgraph_classes();
287 
288   static KlassSubGraphInfo* init_subgraph_info(Klass *k, bool is_full_module_graph);
289   static KlassSubGraphInfo* get_subgraph_info(Klass *k);
290 
291   static void init_subgraph_entry_fields(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
292   static void init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], TRAPS);
293 
294   // UseCompressedOops only: Used by decode_from_archive
295   static address _narrow_oop_base;
296   static int     _narrow_oop_shift;
297 
298   // !UseCompressedOops only: used to relocate pointers to the archived objects
299   static ptrdiff_t _runtime_delta;
300 
301   typedef ResizeableHashTable<oop, bool,
302       AnyObj::C_HEAP,
303       mtClassShared,
304       HeapShared::oop_hash> SeenObjectsTable;
305 
306   static SeenObjectsTable *_seen_objects_table;
307 
308   // The "special subgraph" contains all the archived objects that are reachable
309   // from the following roots:
310   //    - interned strings
311   //    - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
312   //    - ConstantPool::resolved_references()
313   //    - Universe::<xxx>_exception_instance()
314   static KlassSubGraphInfo* _dump_time_special_subgraph;              // for collecting info during dump time
315   static ArchivedKlassSubGraphInfoRecord* _run_time_special_subgraph; // for initializing classes during run time.
316 
317   static GrowableArrayCHeap<oop, mtClassShared>* _pending_roots;
318   static OopHandle _scratch_basic_type_mirrors[T_VOID+1];
319   static MetaspaceObjToOopHandleTable* _scratch_objects_table;
320 
321   static void init_seen_objects_table() {
322     assert(_seen_objects_table == nullptr, "must be");
323     _seen_objects_table = new (mtClass)SeenObjectsTable(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
324   }
325   static void delete_seen_objects_table() {
326     assert(_seen_objects_table != nullptr, "must be");
327     delete _seen_objects_table;
328     _seen_objects_table = nullptr;
329   }
330 
331   // Statistics (for one round of start_recording_subgraph ... done_recording_subgraph)
332   static size_t _num_new_walked_objs;
333   static size_t _num_new_archived_objs;
334   static size_t _num_old_recorded_klasses;
335 
336   // Statistics (for all archived subgraphs)
337   static size_t _num_total_subgraph_recordings;
338   static size_t _num_total_walked_objs;
339   static size_t _num_total_archived_objs;
340   static size_t _num_total_recorded_klasses;
341   static size_t _num_total_verifications;
342 
343   static void start_recording_subgraph(InstanceKlass *k, const char* klass_name,
344                                        bool is_full_module_graph);
345   static void done_recording_subgraph(InstanceKlass *k, const char* klass_name);
346 
347   static bool has_been_seen_during_subgraph_recording(oop obj);
348   static void set_has_been_seen_during_subgraph_recording(oop obj);
349   static bool archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info);
350 
351   static void resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]);
352   static void resolve_classes_for_subgraph_of(JavaThread* current, Klass* k);
353   static void clear_archived_roots_of(Klass* k);
354   static const ArchivedKlassSubGraphInfoRecord*
355                resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS);
356   static void resolve_or_init(const char* klass_name, bool do_init, TRAPS);
357   static void resolve_or_init(Klass* k, bool do_init, TRAPS);
358   static void init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record);
359 
360   static bool has_been_archived(oop orig_obj);
361   static void prepare_resolved_references();
362   static void archive_subgraphs();
363   static void copy_java_mirror(oop orig_mirror, oop scratch_m);
364 
365   // PendingOop and PendingOopStack are used for recursively discovering all cacheable
366   // heap objects. The recursion is done using PendingOopStack so we won't overflow the
367   // C stack with deep reference chains.
368   class PendingOop {
369     oop _obj;
370     oop _referrer;
371     int _level;
372 
373   public:
374     PendingOop() : _obj(nullptr), _referrer(nullptr), _level(-1) {}
375     PendingOop(oop obj, oop referrer, int level) : _obj(obj), _referrer(referrer), _level(level) {}
376 
377     oop obj()      const { return _obj; }
378     oop referrer() const { return _referrer; }
379     int level()    const { return _level; }
380   };
381 
382   class OopFieldPusher;
383   using PendingOopStack = GrowableArrayCHeap<PendingOop, mtClassShared>;
384 
385   static PendingOop _object_being_archived;
386   static bool walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
387                               oop orig_obj, oop referrer);
388 
389   static void reset_archived_object_states(TRAPS);
390   static void ensure_determinism(TRAPS);
391  public:
392   static void prepare_for_archiving(TRAPS);
393   static void create_archived_object_cache() {
394     _archived_object_cache =
395       new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
396   }
397   static void destroy_archived_object_cache() {
398     delete _archived_object_cache;
399     _archived_object_cache = nullptr;
400   }
401   static void make_archived_object_cache_gc_safe();
402   static ArchivedObjectCache* archived_object_cache() {
403     return _archived_object_cache;
404   }
405 
406   static CachedOopInfo* get_cached_oop_info(oop orig_obj);
407 
408   static int archive_exception_instance(oop exception);
409 
410   static bool archive_reachable_objects_from(int level,
411                                              KlassSubGraphInfo* subgraph_info,
412                                              oop orig_obj);
413 
414   static bool is_interned_string(oop obj);
415   static bool is_dumped_interned_string(oop o);
416 
417   // Scratch objects for archiving Klass::java_mirror()
418   static void set_scratch_java_mirror(Klass* k, oop mirror);
419   static void remove_scratch_objects(Klass* k);
420   static bool is_metadata_field(oop src_obj, int offset);
421   template <typename T> static void do_metadata_offsets(oop src_obj, T callback);
422   static void remap_dumped_metadata(oop src_obj, address archived_object);
423   inline static void remap_loaded_metadata(oop obj);
424   inline static oop maybe_remap_referent(bool is_java_lang_ref, size_t field_offset, oop referent);
425   static void get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers);
426   static void set_has_native_pointers(oop src_obj);
427   static uintptr_t archive_location(oop src_obj);
428 
429   // We use the HeapShared::roots() array to make sure that objects stored in the
430   // archived heap region are not prematurely collected. These roots include:
431   //
432   //    - mirrors of classes that have not yet been loaded.
433   //    - ConstantPool::resolved_references() of classes that have not yet been loaded.
434   //    - ArchivedKlassSubGraphInfoRecords that have not been initialized
435   //    - java.lang.Module objects that have not yet been added to the module graph
436   //
437   // When a mirror M becomes referenced by a newly loaded class K, M will be removed
438   // from HeapShared::roots() via clear_root(), and K will be responsible for
439   // keeping M alive.
440   //
441   // Other types of roots are also cleared similarly when they become referenced.
442 
443   // Dump-time only. Returns the index of the root, which can be used at run time to read
444   // the root using get_root(index, ...).
445   static int append_root(oop obj);
446 
447   // AOT-compile time only.
448   // Returns -1 if obj is not in the heap root set.
449   static int get_root_index(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(-1);
450 
451   static GrowableArrayCHeap<oop, mtClassShared>* pending_roots() { return _pending_roots; }
452 
453   // Dump-time and runtime
454   static objArrayOop root_segment(int segment_idx);
455   static oop get_root(int index, bool clear=false);
456 
457   // Run-time only
458   static void clear_root(int index);
459   static void get_segment_indexes(int index, int& segment_index, int& internal_index);
460   static void setup_test_class(const char* test_class_name) PRODUCT_RETURN;
461 #endif // INCLUDE_CDS_JAVA_HEAP
462 
463  public:
464   static void finish_materialize_objects() NOT_CDS_JAVA_HEAP_RETURN;
465 
466   static void write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) NOT_CDS_JAVA_HEAP_RETURN;
467   static objArrayOop scratch_resolved_references(ConstantPool* src);
468   static void add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) NOT_CDS_JAVA_HEAP_RETURN;
469   static void init_dumping() NOT_CDS_JAVA_HEAP_RETURN;
470   static void init_scratch_objects_for_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
471   static void init_box_classes(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
472   static bool is_heap_region(int idx) {
473     CDS_JAVA_HEAP_ONLY(return (idx == AOTMetaspace::hp);)
474     NOT_CDS_JAVA_HEAP_RETURN_(false);
475   }
476   static void delete_tables_with_raw_oops() NOT_CDS_JAVA_HEAP_RETURN;
477 
478   static void resolve_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
479   static void initialize_from_archived_subgraph(JavaThread* current, Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
480 
481   static void init_for_dumping(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
482   static void init_heap_writer() NOT_CDS_JAVA_HEAP_RETURN;
483   static void write_subgraph_info_table() NOT_CDS_JAVA_HEAP_RETURN;
484   static void serialize_tables(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
485 
486 #ifndef PRODUCT
487   static bool is_a_test_class_in_unnamed_module(Klass* ik) NOT_CDS_JAVA_HEAP_RETURN_(false);
488   static void initialize_test_class_from_archive(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
489 #endif
490 
491   static void initialize_java_lang_invoke(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
492   static void init_classes_for_special_subgraph(Handle loader, TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
493 
494   static bool is_lambda_form_klass(InstanceKlass* ik) NOT_CDS_JAVA_HEAP_RETURN_(false);
495   static bool is_lambda_proxy_klass(InstanceKlass* ik) NOT_CDS_JAVA_HEAP_RETURN_(false);
496   static bool is_string_concat_klass(InstanceKlass* ik) NOT_CDS_JAVA_HEAP_RETURN_(false);
497   static bool is_archivable_hidden_klass(InstanceKlass* ik) NOT_CDS_JAVA_HEAP_RETURN_(false);
498 
499   // Used by AOTArtifactFinder
500   static void start_scanning_for_oops();
501   static void end_scanning_for_oops();
502   static void scan_java_class(Klass* k);
503   static void scan_java_mirror(oop orig_mirror);
504   static void copy_and_rescan_aot_inited_mirror(InstanceKlass* ik);
505 
506   static void log_heap_roots();
507 
508   static intptr_t log_target_location(oop source_oop);
509   static void log_oop_info(outputStream* st, oop source_oop, address archived_object_start, address archived_object_end);
510   static void log_oop_info(outputStream* st, oop source_oop);
511   static void log_oop_details(oop source_oop, address buffered_addr);
512 };
513 
514 #endif // SHARE_CDS_HEAPSHARED_HPP