< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page
*** 20,10 ***
--- 20,11 ---
   * or visit www.oracle.com if you need additional information or have any
   * questions.
   *
   */
  
+ #include "cds/aotCacheAccess.hpp"
  #include "cds/aotArtifactFinder.hpp"
  #include "cds/aotClassInitializer.hpp"
  #include "cds/aotClassLocation.hpp"
  #include "cds/aotLogging.hpp"
  #include "cds/aotReferenceObjSupport.hpp"

*** 56,10 ***
--- 57,11 ---
  #include "memory/universe.hpp"
  #include "oops/compressedOops.inline.hpp"
  #include "oops/fieldStreams.inline.hpp"
  #include "oops/objArrayOop.inline.hpp"
  #include "oops/oop.inline.hpp"
+ #include "oops/oopHandle.inline.hpp"
  #include "oops/typeArrayOop.inline.hpp"
  #include "prims/jvmtiExport.hpp"
  #include "runtime/arguments.hpp"
  #include "runtime/fieldDescriptor.inline.hpp"
  #include "runtime/init.hpp"

*** 87,10 ***
--- 89,21 ---
    bool valid() {
      return klass_name != nullptr;
    }
  };
  
+ class HeapShared::ContextMark : public StackObj {
+   ResourceMark rm;
+ public:
+   ContextMark(const char* c) : rm{} {
+     _context->push(c);
+   }
+   ~ContextMark() {
+     _context->pop();
+   }
+ };
+ 
  DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
  
  size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
  size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
  size_t HeapShared::_total_obj_count;

*** 120,10 ***
--- 133,11 ---
    {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
    {"java/util/ImmutableCollections",              "archivedObjects"},
    {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
    {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
    {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
+   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
  
  #ifndef PRODUCT
    {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
  #endif
    {nullptr, nullptr},

*** 137,11 ***
    {nullptr, nullptr},
  };
  
  KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
  ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
! GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
  GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
  int HeapShared::_root_segment_max_size_elems;
  OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
  MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
  
--- 151,12 ---
    {nullptr, nullptr},
  };
  
  KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
  ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
! GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
+ GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
  GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
  int HeapShared::_root_segment_max_size_elems;
  OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
  MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
  

*** 157,16 ***
--- 172,57 ---
  bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
    return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
           is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
  }
  
+ oop HeapShared::CachedOopInfo::orig_referrer() const {
+   return _orig_referrer.resolve();
+ }
+ 
+ void HeapShared::rehash_archived_object_cache() {
+   if (!CDSConfig::is_dumping_heap()) {
+     return;
+   }
+   assert(SafepointSynchronize::is_at_safepoint() ||
+          JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
+ 
+   ArchivedObjectCache* new_cache =
+       new (mtClass)ArchivedObjectCache(archived_object_cache()->table_size(), MAX_TABLE_SIZE);
+ 
+   archived_object_cache()->iterate_all([&](OopHandle o, CachedOopInfo& info) {
+     new_cache->put_when_absent(o, info);
+   });
+ 
+   delete _archived_object_cache;
+   _archived_object_cache = new_cache;
+ }
+ 
  unsigned HeapShared::oop_hash(oop const& p) {
+   assert(SafepointSynchronize::is_at_safepoint() ||
+          JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
    // Do not call p->identity_hash() as that will update the
    // object header.
    return primitive_hash(cast_from_oop<intptr_t>(p));
  }
  
+ unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
+   return oop_hash(oh.resolve());
+ }
+ 
+ unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
+   oop o = oh.resolve();
+   if (o == nullptr) {
+     return 0;
+   } else {
+     return o->identity_hash();
+   }
+ }
+ 
+ bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
+   return a.resolve() == b.resolve();
+ }
+ 
  static void reset_states(oop obj, TRAPS) {
    Handle h_obj(THREAD, obj);
    InstanceKlass* klass = InstanceKlass::cast(obj->klass());
    TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
    Symbol* method_sig = vmSymbols::void_method_signature();

*** 214,36 ***
  
  HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
  
  bool HeapShared::has_been_archived(oop obj) {
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
!   return archived_object_cache()->get(obj) != nullptr;
  }
  
  int HeapShared::append_root(oop obj) {
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
    if (obj != nullptr) {
      assert(has_been_archived(obj), "must be");
    }
    // No GC should happen since we aren't scanning _pending_roots.
    assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  
!   return _pending_roots->append(obj);
  }
  
  objArrayOop HeapShared::root_segment(int segment_idx) {
!   if (CDSConfig::is_dumping_heap()) {
      assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
    } else {
      assert(CDSConfig::is_using_archive(), "must be");
    }
  
    objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
    assert(segment != nullptr, "should have been initialized");
    return segment;
  }
  
  void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
    assert(_root_segment_max_size_elems > 0, "sanity");
  
    // Try to avoid divisions for the common case.
    if (idx < _root_segment_max_size_elems) {
--- 270,193 ---
  
  HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
  
  bool HeapShared::has_been_archived(oop obj) {
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
!   OopHandle oh(&obj);
+   return archived_object_cache()->get(oh) != nullptr;
  }
  
  int HeapShared::append_root(oop obj) {
    assert(CDSConfig::is_dumping_heap(), "dump-time only");
    if (obj != nullptr) {
      assert(has_been_archived(obj), "must be");
    }
    // No GC should happen since we aren't scanning _pending_roots.
    assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
  
!   OopHandle oh(Universe::vm_global(), obj);
+   return _pending_roots->append(oh);
  }
  
  objArrayOop HeapShared::root_segment(int segment_idx) {
!   if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
      assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
    } else {
      assert(CDSConfig::is_using_archive(), "must be");
    }
  
    objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
    assert(segment != nullptr, "should have been initialized");
    return segment;
  }
  
+ class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
+     36137, // prime number
+     AnyObj::C_HEAP,
+     mtClassShared,
+     HeapShared::oop_handle_hash,
+     HeapShared::oop_handle_equals> {};
+ 
+ static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
+ 
+ void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
+   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
+   if (_orig_to_scratch_object_table == nullptr) {
+     _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
+   }
+ 
+   OopHandle orig_h(Universe::vm_global(), orig_obj);
+   OopHandle scratch_h(Universe::vm_global(), scratch_obj);
+   _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
+ }
+ 
+ oop HeapShared::orig_to_scratch_object(oop orig_obj) {
+   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
+   if (_orig_to_scratch_object_table != nullptr) {
+     OopHandle orig(&orig_obj);
+     OopHandle* v = _orig_to_scratch_object_table->get(orig);
+     if (v != nullptr) {
+       return v->resolve();
+     }
+   }
+   return nullptr;
+ }
+ 
+ // Permanent oops are used to support AOT-compiled methods, which may have in-line references
+ // to Strings and MH oops.
+ //
+ // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
+ // and are accssed vis AOTCacheAccess::get_archived_object(int).
+ struct PermanentOopInfo {
+   int _index;       // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
+   int _heap_offset; // Offset of the object from the bottom of the archived heap.
+   PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
+ };
+ 
+ class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
+     36137, // prime number
+     AnyObj::C_HEAP,
+     mtClassShared,
+     HeapShared::oop_handle_hash,
+     HeapShared::oop_handle_equals> {};
+ 
+ static int _dumptime_permanent_oop_count = 0;
+ static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
+ static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
+ 
+ // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
+ // so we can remember their offset (from the bottom of the archived heap).
+ void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
+   assert_at_safepoint();
+   if (_dumptime_permanent_oop_table == nullptr) {
+     _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
+   }
+ 
+   PermanentOopInfo info(-1, offset);
+   OopHandle oh(Universe::vm_global(), obj);
+   _dumptime_permanent_oop_table->put_when_absent(oh, info);
+ }
+ 
+ // A permanent index is assigned to an archived object ONLY when
+ // the AOT compiler calls this function.
+ int HeapShared::get_archived_object_permanent_index(oop obj) {
+   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
+ 
+   if (!CDSConfig::is_dumping_heap()) {
+     return -1; // Called by the Leyden old workflow
+   }
+   if (_dumptime_permanent_oop_table == nullptr) {
+     return -1;
+   }
+ 
+   if (_orig_to_scratch_object_table != nullptr) {
+     OopHandle orig(&obj);
+     OopHandle* v = _orig_to_scratch_object_table->get(orig);
+     if (v != nullptr) {
+       obj = v->resolve();
+     }
+   }
+ 
+   OopHandle tmp(&obj);
+   PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
+   if (info == nullptr) {
+     return -1;
+   } else {
+     if (info->_index < 0) {
+       info->_index = _dumptime_permanent_oop_count++;
+     }
+     return info->_index;
+   }
+ }
+ 
+ oop HeapShared::get_archived_object(int permanent_index) {
+   assert(permanent_index >= 0, "sanity");
+   assert(ArchiveHeapLoader::is_in_use(), "sanity");
+   assert(_runtime_permanent_oops != nullptr, "sanity");
+ 
+   return _runtime_permanent_oops->at(permanent_index).resolve();
+ }
+ 
+ // Remember all archived heap objects that have a permanent index.
+ //   table[i] = offset of oop whose permanent index is i.
+ void CachedCodeDirectoryInternal::dumptime_init_internal() {
+   const int count = _dumptime_permanent_oop_count;
+   if (count == 0) {
+     // Avoid confusing CDS code with zero-sized tables, just return.
+     log_info(cds)("No permanent oops");
+     _permanent_oop_count = count;
+     _permanent_oop_offsets = nullptr;
+     return;
+   }
+ 
+   int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
+   for (int i = 0; i < count; i++) {
+     table[count] = -1;
+   }
+   _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
+     int index = info._index;
+     if (index >= 0) {
+       assert(index < count, "sanity");
+       table[index] = info._heap_offset;
+     }
+     return true; // continue
+   });
+ 
+   for (int i = 0; i < count; i++) {
+     assert(table[i] >= 0, "must be");
+   }
+ 
+   log_info(cds)("Dumped %d permanent oops", count);
+ 
+   _permanent_oop_count = count;
+   AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
+ }
+ 
+ // This is called during the bootstrap of the production run, before any GC can happen.
+ // Record each permanent oop in a OopHandle for GC safety.
+ void CachedCodeDirectoryInternal::runtime_init_internal() {
+   int count = _permanent_oop_count;
+   int* table = _permanent_oop_offsets;
+   _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
+   for (int i = 0; i < count; i++) {
+     oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
+     OopHandle oh(Universe::vm_global(), obj);
+     _runtime_permanent_oops->append(oh);
+   }
+ };
+ 
  void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
    assert(_root_segment_max_size_elems > 0, "sanity");
  
    // Try to avoid divisions for the common case.
    if (idx < _root_segment_max_size_elems) {

*** 301,11 ***
      return false;
    } else {
      count_allocation(obj->size());
      ArchiveHeapWriter::add_source_obj(obj);
      CachedOopInfo info = make_cached_oop_info(obj, referrer);
!     archived_object_cache()->put_when_absent(obj, info);
      archived_object_cache()->maybe_grow();
      mark_native_pointers(obj);
  
      Klass* k = obj->klass();
      if (k->is_instance_klass()) {
--- 514,13 ---
      return false;
    } else {
      count_allocation(obj->size());
      ArchiveHeapWriter::add_source_obj(obj);
      CachedOopInfo info = make_cached_oop_info(obj, referrer);
! 
+     OopHandle oh(Universe::vm_global(), obj);
+     archived_object_cache()->put_when_absent(oh, info);
      archived_object_cache()->maybe_grow();
      mark_native_pointers(obj);
  
      Klass* k = obj->klass();
      if (k->is_instance_klass()) {

*** 404,19 ***
    return (objArrayOop)_scratch_objects_table->get_oop(src);
  }
  
  void HeapShared::init_dumping() {
    _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
!   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
  }
  
  void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
    for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
      BasicType bt = (BasicType)i;
      if (!is_reference_type(bt)) {
        oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
        _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
      }
    }
  }
  
  // Given java_mirror that represents a (primitive or reference) type T,
--- 619,20 ---
    return (objArrayOop)_scratch_objects_table->get_oop(src);
  }
  
  void HeapShared::init_dumping() {
    _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
!   _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
  }
  
  void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
    for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
      BasicType bt = (BasicType)i;
      if (!is_reference_type(bt)) {
        oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
        _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
+       track_scratch_object(Universe::java_mirror(bt), m);
      }
    }
  }
  
  // Given java_mirror that represents a (primitive or reference) type T,

*** 453,10 ***
--- 669,11 ---
  oop HeapShared::scratch_java_mirror(Klass* k) {
    return _scratch_objects_table->get_oop(k);
  }
  
  void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
+   track_scratch_object(k->java_mirror(), mirror);
    _scratch_objects_table->set_oop(k, mirror);
  }
  
  void HeapShared::remove_scratch_objects(Klass* k) {
    // Klass is being deallocated. Java mirror can still be alive, and it should not

*** 468,10 ***
--- 685,19 ---
    }
    _scratch_objects_table->remove_oop(k);
    if (k->is_instance_klass()) {
      _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
    }
+   if (mirror != nullptr) {
+     OopHandle tmp(&mirror);
+     OopHandle* v = _orig_to_scratch_object_table->get(tmp);
+     if (v != nullptr) {
+       oop scratch_mirror = v->resolve();
+       java_lang_Class::set_klass(scratch_mirror, nullptr);
+       _orig_to_scratch_object_table->remove(tmp);
+     }
+   }
  }
  
  //TODO: we eventually want a more direct test for these kinds of things.
  //For example the JVM could record some bit of context from the creation
  //of the klass, such as who called the hidden class factory.  Using

*** 627,18 ***
      ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
    }
  }
  
  void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
!   CachedOopInfo* info = archived_object_cache()->get(src_obj);
    assert(info != nullptr, "must be");
    has_oop_pointers = info->has_oop_pointers();
    has_native_pointers = info->has_native_pointers();
  }
  
  void HeapShared::set_has_native_pointers(oop src_obj) {
!   CachedOopInfo* info = archived_object_cache()->get(src_obj);
    assert(info != nullptr, "must be");
    info->set_has_native_pointers();
  }
  
  // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
--- 853,20 ---
      ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
    }
  }
  
  void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
!   OopHandle oh(&src_obj);
+   CachedOopInfo* info = archived_object_cache()->get(oh);
    assert(info != nullptr, "must be");
    has_oop_pointers = info->has_oop_pointers();
    has_native_pointers = info->has_native_pointers();
  }
  
  void HeapShared::set_has_native_pointers(oop src_obj) {
!   OopHandle oh(&src_obj);
+   CachedOopInfo* info = archived_object_cache()->get(oh);
    assert(info != nullptr, "must be");
    info->set_has_native_pointers();
  }
  
  // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that

*** 648,10 ***
--- 876,11 ---
      NoSafepointVerifier nsv;
  
      // The special subgraph doesn't belong to any class. We use Object_klass() here just
      // for convenience.
      _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
+     _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
  
      // Cache for recording where the archived objects are copied to
      create_archived_object_cache();
  
      if (UseCompressedOops || UseG1GC) {

*** 675,16 ***
  }
  
  void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
    {
      NoSafepointVerifier nsv;
!     CDSHeapVerifier::verify();
      check_special_subgraph_classes();
    }
  
    StringTable::write_shared_table();
!   ArchiveHeapWriter::write(_pending_roots, heap_info);
  
    ArchiveBuilder::OtherROAllocMark mark;
    write_subgraph_info_table();
  }
  
--- 904,23 ---
  }
  
  void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
    {
      NoSafepointVerifier nsv;
!     if (!SkipArchiveHeapVerification) {
+       CDSHeapVerifier::verify();
+     }
      check_special_subgraph_classes();
    }
  
    StringTable::write_shared_table();
!   GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
+   for (int i = 0; i < _pending_roots->length(); i++) {
+     roots->append(_pending_roots->at(i).resolve());
+   }
+   ArchiveHeapWriter::write(roots, heap_info);
+   delete roots;
  
    ArchiveBuilder::OtherROAllocMark mark;
    write_subgraph_info_table();
  }
  

*** 1086,10 ***
--- 1322,23 ---
  void HeapShared::resolve_classes(JavaThread* current) {
    assert(CDSConfig::is_using_archive(), "runtime only!");
    if (!ArchiveHeapLoader::is_in_use()) {
      return; // nothing to do
    }
+ 
+   if (!CDSConfig::is_using_aot_linked_classes()) {
+     assert( _run_time_special_subgraph != nullptr, "must be");
+     Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
+     if (klasses != nullptr) {
+       for (int i = 0; i < klasses->length(); i++) {
+         Klass* k = klasses->at(i);
+         ExceptionMark em(current); // no exception can happen here
+         resolve_or_init(k, /*do_init*/false, current);
+       }
+     }
+   }
+ 
    resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
    resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
  }
  
  void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {

*** 1444,11 ***
  };
  
  HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
    PointsToOopsChecker points_to_oops_checker;
    obj->oop_iterate(&points_to_oops_checker);
!   return CachedOopInfo(referrer, points_to_oops_checker.result());
  }
  
  void HeapShared::init_box_classes(TRAPS) {
    if (ArchiveHeapLoader::is_in_use()) {
      vmClasses::Boolean_klass()->initialize(CHECK);
--- 1693,11 ---
  };
  
  HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
    PointsToOopsChecker points_to_oops_checker;
    obj->oop_iterate(&points_to_oops_checker);
!   return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
  }
  
  void HeapShared::init_box_classes(TRAPS) {
    if (ArchiveHeapLoader::is_in_use()) {
      vmClasses::Boolean_klass()->initialize(CHECK);

*** 1461,10 ***
--- 1710,24 ---
      vmClasses::Long_klass()->initialize(CHECK);
      vmClasses::Void_klass()->initialize(CHECK);
    }
  }
  
+ void HeapShared::exit_on_error() {
+   if (_context != nullptr) {
+     ResourceMark rm;
+     LogStream ls(Log(cds, heap)::error());
+     ls.print_cr("Context");
+     for (int i = 0; i < _context->length(); i++) {
+       const char* s = _context->at(i);
+       ls.print_cr("- %s", s);
+     }
+   }
+   debug_trace();
+   MetaspaceShared::unrecoverable_writing_error();
+ }
+ 
  // (1) If orig_obj has not been archived yet, archive it.
  // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
  //     trace all  objects that are reachable from it, and make sure these objects are archived.
  // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
  //     were already archived when this function is called)

*** 1576,11 ***
          return false;
        } else {
          // We don't know how to handle an object that has been archived, but some of its reachable
          // objects cannot be archived. Bail out for now. We might need to fix this in the future if
          // we have a real use case.
!         MetaspaceShared::unrecoverable_writing_error();
        }
      }
    }
  
    Klass *orig_k = orig_obj->klass();
--- 1839,11 ---
          return false;
        } else {
          // We don't know how to handle an object that has been archived, but some of its reachable
          // objects cannot be archived. Bail out for now. We might need to fix this in the future if
          // we have a real use case.
!         exit_on_error();
        }
      }
    }
  
    Klass *orig_k = orig_obj->klass();

*** 2030,20 ***
--- 2293,22 ---
    for (int i = 0; fields[i].valid(); ) {
      ArchivableStaticFieldInfo* info = &fields[i];
      const char* klass_name = info->klass_name;
      start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
  
+     ContextMark cm(klass_name);
      // If you have specified consecutive fields of the same klass in
      // fields[], these will be archived in the same
      // {start_recording_subgraph ... done_recording_subgraph} pass to
      // save time.
      for (; fields[i].valid(); i++) {
        ArchivableStaticFieldInfo* f = &fields[i];
        if (f->klass_name != klass_name) {
          break;
        }
  
+       ContextMark cm(f->field_name);
        archive_reachable_objects_from_static_field(f->klass, f->klass_name,
                                                    f->offset, f->field_name);
      }
      done_recording_subgraph(info->klass, klass_name);
    }

*** 2080,10 ***
--- 2345,22 ---
  
  bool HeapShared::is_dumped_interned_string(oop o) {
    return _dumped_interned_strings->get(o) != nullptr;
  }
  
+ // These tables should be used only within the CDS safepoint, so
+ // delete them before we exit the safepoint. Otherwise the table will
+ // contain bad oops after a GC.
+ void HeapShared::delete_tables_with_raw_oops() {
+   assert(_seen_objects_table == nullptr, "should have been deleted");
+ 
+   delete _dumped_interned_strings;
+   _dumped_interned_strings = nullptr;
+ 
+   ArchiveHeapWriter::delete_tables_with_raw_oops();
+ }
+ 
  void HeapShared::debug_trace() {
    ResourceMark rm;
    oop referrer = _object_being_archived.referrer();
    if (referrer != nullptr) {
      LogStream ls(Log(aot, heap)::error());
< prev index next >