< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page
*** 21,12 ***
--- 21,14 ---
   * questions.
   *
   */
  
  #include "cds/aotArtifactFinder.hpp"
+ #include "cds/aotCacheAccess.hpp"
  #include "cds/aotClassInitializer.hpp"
  #include "cds/aotClassLocation.hpp"
+ #include "cds/aotConstantPoolResolver.hpp"
  #include "cds/aotLogging.hpp"
  #include "cds/aotMappedHeapLoader.hpp"
  #include "cds/aotMappedHeapWriter.hpp"
  #include "cds/aotMetaspace.hpp"
  #include "cds/aotOopChecker.hpp"

*** 48,10 ***
--- 50,11 ---
  #include "classfile/symbolTable.hpp"
  #include "classfile/systemDictionary.hpp"
  #include "classfile/systemDictionaryShared.hpp"
  #include "classfile/vmClasses.hpp"
  #include "classfile/vmSymbols.hpp"
+ #include "code/aotCodeCache.hpp"
  #include "gc/shared/collectedHeap.hpp"
  #include "gc/shared/gcLocker.hpp"
  #include "gc/shared/gcVMOperations.hpp"
  #include "logging/log.hpp"
  #include "logging/logStream.hpp"

*** 93,10 ***
--- 96,21 ---
    bool valid() {
      return klass_name != nullptr;
    }
  };
  
+ class HeapShared::ContextMark : public StackObj {
+   ResourceMark rm;
+ public:
+   ContextMark(const char* c) : rm{} {
+     _context->push(c);
+   }
+   ~ContextMark() {
+     _context->pop();
+   }
+ };
+ 
  // Anything that goes in the header must be thoroughly purged from uninitialized memory
  // as it will be written to disk. Therefore, the constructors memset the memory to 0.
  // This is not the prettiest thing, but we need to know every byte is initialized,
  // including potential padding between fields.
  

*** 176,10 ***
--- 190,11 ---
    {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
    {"java/util/ImmutableCollections",              "archivedObjects"},
    {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
    {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
    {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
+   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
  
  #ifndef PRODUCT
    {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
  #endif
    {nullptr, nullptr},

*** 194,10 ***
--- 209,11 ---
  };
  
  KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
  ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
  GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
+ GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
  OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
  MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
  
  static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
    for (int i = 0; fields[i].valid(); i++) {

*** 229,20 ***
    // Do not call p->identity_hash() as that will update the
    // object header.
    return primitive_hash(cast_from_oop<intptr_t>(p));
  }
  
! unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
!   return oop_hash(oh.resolve());
! }
! 
! unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
    oop o = oh.resolve();
!   if (o == nullptr) {
!     return 0;
    } else {
!     return o->identity_hash();
    }
  }
  
  bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
    return a.resolve() == b.resolve();
--- 245,35 ---
    // Do not call p->identity_hash() as that will update the
    // object header.
    return primitive_hash(cast_from_oop<intptr_t>(p));
  }
  
! // About the hashcode in the cached objects:
! // - If a source object has a hashcode, it must be copied into the cache.
! //   That's because some cached hashtables are laid out using this hashcode.
! // - If a source object doesn't have a hashcode, we avoid computing it while
! //   copying the objects into the cache. This will allow the hashcode to be
+ //   dynamically and randomly computed in each production, which generally
+ //   desirable to make the hashcodes more random between runs.
+ unsigned HeapShared::archived_object_cache_hash(OopHandle const& oh) {
    oop o = oh.resolve();
!   if (_use_identity_hash_for_archived_object_cache) {
!     // This is called after all objects are copied. It's OK to update
+     // the object's hashcode.
+     //
+     // This may be called after we have left the AOT dumping safepoint.
+     // Objects in archived_object_cache() may be moved by the GC, so we
+     // can't use the address of o for computing the hash.
+     if (o == nullptr) {
+       return 0;
+     } else {
+       return o->identity_hash();
+     }
    } else {
!     // This is called while we are copying the objects. Don't call o->identity_hash()
+     // as that will update the object header.
+     return oop_hash(o);
    }
  }
  
  bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
    return a.resolve() == b.resolve();

*** 294,10 ***
--- 325,11 ---
    Handle boot_loader(THREAD, result.get_oop());
    reset_states(boot_loader(), CHECK);
  }
  
  HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
+ bool HeapShared::_use_identity_hash_for_archived_object_cache = false;
  
  bool HeapShared::is_archived_heap_in_use() {
    if (HeapShared::is_loading()) {
      if (HeapShared::is_loading_streaming_mode()) {
        return AOTStreamedHeapLoader::is_in_use();

*** 408,13 ***
    if (AOTStreamedHeapLoader::is_in_use()) {
      AOTStreamedHeapLoader::materialize_thread_object();
    }
  }
  
! void HeapShared::add_to_dumped_interned_strings(oop string) {
    assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
-   AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
    bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
    assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
  }
  
  void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
--- 440,12 ---
    if (AOTStreamedHeapLoader::is_in_use()) {
      AOTStreamedHeapLoader::materialize_thread_object();
    }
  }
  
! void HeapShared::archive_interned_string(oop string) {
    assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
    bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
    assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
  }
  
  void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {

*** 428,10 ***
--- 459,26 ---
        AOTMappedHeapLoader::finish_initialization(static_mapinfo);
      }
    }
  }
  
+ void HeapShared::make_archived_object_cache_gc_safe() {
+   ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
+ 
+   // It's safe to change the behavior of the hash function now, because iterate_all()
+   // doesn't call the hash function.
+   _use_identity_hash_for_archived_object_cache = true;
+ 
+   // Copy all CachedOopInfo into a new table using a different hashing algorithm
+   archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
+       new_cache->put_when_absent(oh, info);
+     });
+ 
+   destroy_archived_object_cache();
+   _archived_object_cache = new_cache;
+ }
+ 
  HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
    OopHandle oh(Universe::vm_global(), obj);
    CachedOopInfo* result = _archived_object_cache->get(oh);
    oh.release(Universe::vm_global());
    return result;

*** 441,18 ***
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
    return get_cached_oop_info(obj) != nullptr;
  }
  
  int HeapShared::append_root(oop obj) {
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
!   if (obj != nullptr) {
!     assert(has_been_archived(obj), "must be");
    }
-   // No GC should happen since we aren't scanning _pending_roots.
-   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  
!   return _pending_roots->append(obj);
  }
  
  oop HeapShared::get_root(int index, bool clear) {
    assert(index >= 0, "sanity");
    assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
--- 488,60 ---
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
    return get_cached_oop_info(obj) != nullptr;
  }
  
  int HeapShared::append_root(oop obj) {
+   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
!   assert(_pending_roots != nullptr, "sanity");
! 
+   if (obj == nullptr) {
+     assert(_pending_roots->at(0) == nullptr, "root index 1 is always null");
+     return 0;
+   } else if (CDSConfig::is_dumping_aot_linked_classes()) {
+     // The AOT compiler may refer the same obj many times, so we
+     // should use the same index for this oop to avoid excessive entries
+     // in the roots array.
+     CachedOopInfo* obj_info = get_cached_oop_info(obj);
+     assert(obj_info != nullptr, "must be archived");
+ 
+     if (obj_info->root_index() > 0) {
+       return obj_info->root_index();
+     } else {
+       int i = _pending_roots->append(obj);
+       obj_info->set_root_index(i);
+       return i;
+     }
+   } else {
+     return _pending_roots->append(obj);
+   }
+ }
+ 
+ int HeapShared::get_root_index(oop obj) {
+   if (!CDSConfig::is_dumping_heap()) {
+     return -1; // Called by the Leyden old workflow
+   }
+ 
+   if (java_lang_Class::is_instance(obj)) {
+     obj = scratch_java_mirror(obj);
    }
  
!   CachedOopInfo* obj_info = get_cached_oop_info(obj);
+   const char* error = nullptr;
+   if (obj_info == nullptr) {
+     error = "Not a cached oop";
+   } else if (obj_info->root_index() < 0) {
+     error = "Not a cached oop root";
+   } else {
+     return obj_info->root_index();
+   }
+ 
+   ResourceMark rm;
+   log_debug(aot, codecache, oops)("%s: " INTPTR_FORMAT " (%s)", error,
+                                   cast_from_oop<uintptr_t>(obj),
+                                   obj->klass()->external_name());
+   return -1;
  }
  
  oop HeapShared::get_root(int index, bool clear) {
    assert(index >= 0, "sanity");
    assert(is_archived_heap_in_use(), "getting roots into heap that is not used");

*** 477,10 ***
--- 566,17 ---
      AOTStreamedHeapLoader::finish_materialize_objects();
    }
  }
  
  void HeapShared::clear_root(int index) {
+   if (CDSConfig::is_using_aot_linked_classes()) {
+     // When AOT linked classes are in use, all roots will be in use all
+     // the time, there's no benefit for clearing the roots. Also, we
+     // can't clear the roots as they can be shared.
+     return;
+   }
+ 
    assert(index >= 0, "sanity");
    assert(CDSConfig::is_using_archive(), "must be");
    if (is_archived_heap_in_use()) {
      if (log_is_enabled(Debug, aot, heap)) {
        log_debug(aot, heap)("Clearing root %d: was %zu", index, p2i(get_root(index, false /* clear */)));

*** 557,10 ***
--- 653,14 ---
            m = RegeneratedClasses::get_regenerated_object(m);
          }
          InstanceKlass* method_holder = m->method_holder();
          AOTArtifactFinder::add_cached_class(method_holder);
        }
+     } else if (AOTCodeCache::is_dumping_code() &&
+                (java_lang_invoke_MethodHandle::is_instance(obj) || is_interned_string(obj))) {
+       // Needed by AOT compiler.
+       append_root(obj);
      }
    }
  
    if (log_is_enabled(Debug, aot, heap)) {
      ResourceMark rm;

*** 624,13 ***
  
  objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
    return (objArrayOop)_scratch_objects_table->get_oop(src);
  }
  
!  void HeapShared::init_dumping() {
!    _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
!    _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
  }
  
  void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
    for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
      BasicType bt = (BasicType)i;
--- 724,14 ---
  
  objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
    return (objArrayOop)_scratch_objects_table->get_oop(src);
  }
  
! void HeapShared::init_dumping() {
!   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
!   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
+   _pending_roots->append(nullptr); // root index 0 represents a null oop
  }
  
  void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
    for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
      BasicType bt = (BasicType)i;

*** 743,15 ***
        switch (fd.field_type()) {
        case T_OBJECT:
        case T_ARRAY:
          {
            oop field_obj = orig_mirror->obj_field(offset);
-           if (offset == java_lang_Class::reflection_data_offset()) {
-             // Class::reflectData use SoftReference, which cannot be archived. Set it
-             // to null and it will be recreated at runtime.
-             field_obj = nullptr;
-           }
            m->obj_field_put(offset, field_obj);
            if (field_obj != nullptr) {
              bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
              assert(success, "sanity");
            }
--- 844,10 ---

*** 822,10 ***
--- 918,18 ---
  
    if (CDSConfig::is_dumping_aot_linked_classes()) {
      java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
      java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
    }
+ 
+   Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror();
+   if (CDSConfig::is_dumping_reflection_data() &&
+       k != nullptr && k->is_instance_klass() &&
+       java_lang_Class::reflection_data(orig_mirror) != nullptr &&
+       AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) {
+     java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror));
+   }
  }
  
  static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
    if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
      objArrayOop rr = src_ik->constants()->resolved_references_or_null();

*** 864,10 ***
--- 968,11 ---
      NoSafepointVerifier nsv;
  
      // The special subgraph doesn't belong to any class. We use Object_klass() here just
      // for convenience.
      _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
+     _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
  
      // Cache for recording where the archived objects are copied to
      create_archived_object_cache();
  
      if (HeapShared::is_writing_mapping_mode() && (UseG1GC || UseCompressedOops)) {

*** 893,11 ***
  }
  
  void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
    {
      NoSafepointVerifier nsv;
!     CDSHeapVerifier::verify();
      check_special_subgraph_classes();
    }
  
    if (HeapShared::is_writing_mapping_mode()) {
      StringTable::write_shared_table();
--- 998,13 ---
  }
  
  void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
    {
      NoSafepointVerifier nsv;
!     if (!SkipArchiveHeapVerification) {
+       CDSHeapVerifier::verify();
+     }
      check_special_subgraph_classes();
    }
  
    if (HeapShared::is_writing_mapping_mode()) {
      StringTable::write_shared_table();

*** 907,18 ***
--- 1014,29 ---
      AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
    }
  
    ArchiveBuilder::OtherROAllocMark mark;
    write_subgraph_info_table();
+ 
+   delete _pending_roots;
+   _pending_roots = nullptr;
+ 
+   make_archived_object_cache_gc_safe();
  }
  
  void HeapShared::scan_java_mirror(oop orig_mirror) {
    oop m = scratch_java_mirror(orig_mirror);
    if (m != nullptr) { // nullptr if for custom class loader
      copy_java_mirror(orig_mirror, m);
      bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
      assert(success, "sanity");
+ 
+     oop extra;
+     if ((extra = java_lang_Class::reflection_data(m)) != nullptr) {
+       success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra);
+       assert(success, "sanity");
+     }
    }
  }
  
  void HeapShared::scan_java_class(Klass* orig_k) {
    scan_java_mirror(orig_k->java_mirror());

*** 979,11 ***
    if (_subgraph_entry_fields == nullptr) {
      _subgraph_entry_fields =
        new (mtClass) GrowableArray<int>(10, mtClass);
    }
    _subgraph_entry_fields->append(static_field_offset);
!   _subgraph_entry_fields->append(HeapShared::append_root(v));
  }
  
  // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
  // Only objects of boot classes can be included in sub-graph.
  void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
--- 1097,15 ---
    if (_subgraph_entry_fields == nullptr) {
      _subgraph_entry_fields =
        new (mtClass) GrowableArray<int>(10, mtClass);
    }
    _subgraph_entry_fields->append(static_field_offset);
!   if (v == nullptr) {
+     _subgraph_entry_fields->append(-1);
+   } else {
+     _subgraph_entry_fields->append(HeapShared::append_root(v));
+   }
  }
  
  // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
  // Only objects of boot classes can be included in sub-graph.
  void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {

*** 1322,22 ***
    if (record == nullptr) {
     clear_archived_roots_of(k);
    }
  }
  
  void HeapShared::initialize_java_lang_invoke(TRAPS) {
    if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
!     resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
!     resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
!     resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
!     resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
-     resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
-     resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
-     resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
    }
  }
  
  // Initialize the InstanceKlasses of objects that are reachable from the following roots:
  //   - interned strings
  //   - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
  //   - ConstantPool::resolved_references()
  //   - Universe::<xxx>_exception_instance()
--- 1444,42 ---
    if (record == nullptr) {
     clear_archived_roots_of(k);
    }
  }
  
+ static const char* java_lang_invoke_core_klasses[] = {
+   "java/lang/invoke/Invokers$Holder",
+   "java/lang/invoke/MethodHandle",
+   "java/lang/invoke/MethodHandleNatives",
+   "java/lang/invoke/DirectMethodHandle$Holder",
+   "java/lang/invoke/DelegatingMethodHandle$Holder",
+   "java/lang/invoke/LambdaForm$Holder",
+   "java/lang/invoke/BoundMethodHandle$Species_L",
+ };
+ 
  void HeapShared::initialize_java_lang_invoke(TRAPS) {
    if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
!     int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
!     for (int i = 0; i < len; i++) {
!       resolve_or_init(java_lang_invoke_core_klasses[i], true, CHECK);
!     }
    }
  }
  
+ bool HeapShared::is_core_java_lang_invoke_klass(InstanceKlass* klass) {
+   // TODO: Crude, rewrite using Symbols or vmClasses instead
+   ResourceMark rm;
+   char* s2 = klass->name()->as_C_string();
+   int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
+   for (int i = 0; i < len; i++) {
+     if (strcmp(java_lang_invoke_core_klasses[i], s2) == 0) {
+       return true;
+     }
+   }
+   return false;
+ }
+ 
  // Initialize the InstanceKlasses of objects that are reachable from the following roots:
  //   - interned strings
  //   - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
  //   - ConstantPool::resolved_references()
  //   - Universe::<xxx>_exception_instance()

*** 1528,11 ***
      for (int i = 0; i < efr_len; i += 2) {
        int field_offset = entry_field_records->at(i);
        int root_index = entry_field_records->at(i+1);
        // Load the subgraph entry fields from the record and store them back to
        // the corresponding fields within the mirror.
!       oop v = get_root(root_index, /*clear=*/true);
        oop m = k->java_mirror();
        if (k->has_aot_initialized_mirror()) {
          assert(v == m->obj_field(field_offset), "must be aot-initialized");
        } else {
          m->obj_field_put(field_offset, v);
--- 1670,16 ---
      for (int i = 0; i < efr_len; i += 2) {
        int field_offset = entry_field_records->at(i);
        int root_index = entry_field_records->at(i+1);
        // Load the subgraph entry fields from the record and store them back to
        // the corresponding fields within the mirror.
!       oop v;
+       if (root_index < 0) {
+         v = nullptr;
+       } else {
+         v = get_root(root_index, /*clear=*/true);
+       }
        oop m = k->java_mirror();
        if (k->has_aot_initialized_mirror()) {
          assert(v == m->obj_field(field_offset), "must be aot-initialized");
        } else {
          m->obj_field_put(field_offset, v);

*** 1670,10 ***
--- 1817,24 ---
      vmClasses::Long_klass()->initialize(CHECK);
      vmClasses::Void_klass()->initialize(CHECK);
    }
  }
  
+ void HeapShared::exit_on_error() {
+   if (_context != nullptr) {
+     ResourceMark rm;
+     LogStream ls(Log(cds, heap)::error());
+     ls.print_cr("Context");
+     for (int i = 0; i < _context->length(); i++) {
+       const char* s = _context->at(i);
+       ls.print_cr("- %s", s);
+     }
+   }
+   debug_trace();
+   AOTMetaspace::unrecoverable_writing_error();
+ }
+ 
  // (1) If orig_obj has not been archived yet, archive it.
  // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
  //     trace all  objects that are reachable from it, and make sure these objects are archived.
  // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
  //     were already archived when this function is called)

*** 1936,10 ***
--- 2097,15 ---
  }
  
  void HeapShared::verify_reachable_objects_from(oop obj) {
    _num_total_verifications ++;
    if (java_lang_Class::is_instance(obj)) {
+     Klass* k = java_lang_Class::as_Klass(obj);
+     if (RegeneratedClasses::has_been_regenerated(k)) {
+       k = RegeneratedClasses::get_regenerated_object(k);
+       obj = k->java_mirror();
+     }
      obj = scratch_java_mirror(obj);
      assert(obj != nullptr, "must be");
    }
    if (!has_been_seen_during_subgraph_recording(obj)) {
      set_has_been_seen_during_subgraph_recording(obj);

*** 2258,20 ***
--- 2424,22 ---
    for (int i = 0; fields[i].valid(); ) {
      ArchivableStaticFieldInfo* info = &fields[i];
      const char* klass_name = info->klass_name;
      start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
  
+     ContextMark cm(klass_name);
      // If you have specified consecutive fields of the same klass in
      // fields[], these will be archived in the same
      // {start_recording_subgraph ... done_recording_subgraph} pass to
      // save time.
      for (; fields[i].valid(); i++) {
        ArchivableStaticFieldInfo* f = &fields[i];
        if (f->klass_name != klass_name) {
          break;
        }
  
+       ContextMark cm(f->field_name);
        archive_reachable_objects_from_static_field(f->klass, f->klass_name,
                                                    f->offset, f->field_name);
      }
      done_recording_subgraph(info->klass, klass_name);
    }

*** 2289,16 ***
    }
    log_info(aot, heap)("  Verified %zu references", _num_total_verifications);
  #endif
  }
  
! bool HeapShared::is_dumped_interned_string(oop o) {
!   if (is_writing_mapping_mode()) {
!     return AOTMappedHeapWriter::is_dumped_interned_string(o);
-   } else {
-     return AOTStreamedHeapWriter::is_dumped_interned_string(o);
    }
  }
  
  // These tables should be used only within the CDS safepoint, so
  // delete them before we exit the safepoint. Otherwise the table will
  // contain bad oops after a GC.
--- 2457,26 ---
    }
    log_info(aot, heap)("  Verified %zu references", _num_total_verifications);
  #endif
  }
  
! bool HeapShared::is_interned_string(oop obj) {
!   if (!java_lang_String::is_instance(obj)) {
!     return false;
    }
+ 
+   ResourceMark rm;
+   int len = 0;
+   jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
+   if (name == nullptr) {
+     fatal("Insufficient memory for dumping");
+   }
+   return StringTable::lookup(name, len) == obj;
+ }
+ 
+ bool HeapShared::is_dumped_interned_string(oop o) {
+   return is_interned_string(o) && has_been_archived(o);
  }
  
  // These tables should be used only within the CDS safepoint, so
  // delete them before we exit the safepoint. Otherwise the table will
  // contain bad oops after a GC.
< prev index next >