< prev index next > src/hotspot/share/cds/heapShared.cpp
Print this page
#include "cds/aotClassInitializer.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/archiveUtils.hpp"
+ #include "cds/cdsAccess.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cdsEnumKlass.hpp"
#include "cds/cdsHeapVerifier.hpp"
#include "cds/heapShared.hpp"
#include "cds/metaspaceShared.hpp"
bool valid() {
return klass_name != nullptr;
}
};
+ class HeapShared::ArchivingObjectMark : public StackObj {
+ public:
+ ArchivingObjectMark(oop obj) {
+ _trace->push(obj);
+ }
+ ~ArchivingObjectMark() {
+ _trace->pop();
+ }
+ };
+
+ class HeapShared::ContextMark : public StackObj {
+ ResourceMark rm;
+ public:
+ ContextMark(const char* c) : rm{} {
+ _context->push(c);
+ }
+ ~ContextMark() {
+ _context->pop();
+ }
+ };
+
bool HeapShared::_disable_writing = false;
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
{"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
{"java/util/ImmutableCollections", "archivedObjects"},
{"java/lang/ModuleLayer", "EMPTY_LAYER"},
{"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
{"jdk/internal/math/FDBigInteger", "archivedCaches"},
+ {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
#ifndef PRODUCT
{nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
#endif
{nullptr, nullptr},
{nullptr, nullptr},
};
KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
! GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
int HeapShared::_root_segment_max_size_elems;
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
{nullptr, nullptr},
};
KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
! GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
+ GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_trace = nullptr;
+ GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
int HeapShared::_root_segment_max_size_elems;
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
// No GC should happen since we aren't scanning _pending_roots.
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (_pending_roots == nullptr) {
! _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
}
! return _pending_roots->append(obj);
}
objArrayOop HeapShared::root_segment(int segment_idx) {
! if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (!HeapShared::can_write()) {
return nullptr;
}
} else {
// No GC should happen since we aren't scanning _pending_roots.
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (_pending_roots == nullptr) {
! _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
}
! OopHandle oh(Universe::vm_global(), obj);
+ return _pending_roots->append(oh);
}
objArrayOop HeapShared::root_segment(int segment_idx) {
! if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
if (!HeapShared::can_write()) {
return nullptr;
}
} else {
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
assert(segment != nullptr, "should have been initialized");
return segment;
}
+ inline unsigned int oop_handle_hash(const OopHandle& oh) {
+ oop o = oh.resolve();
+ if (o == nullptr) {
+ return 0;
+ } else {
+ return o->identity_hash();
+ }
+ }
+
+ inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
+ return a.resolve() == b.resolve();
+ }
+
+ class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
+ 36137, // prime number
+ AnyObj::C_HEAP,
+ mtClassShared,
+ oop_handle_hash,
+ oop_handle_equals> {};
+
+ static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
+
+ void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
+ MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
+ if (_orig_to_scratch_object_table == nullptr) {
+ _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
+ }
+
+ OopHandle orig_h(Universe::vm_global(), orig_obj);
+ OopHandle scratch_h(Universe::vm_global(), scratch_obj);
+ _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
+ }
+
+ oop HeapShared::orig_to_scratch_object(oop orig_obj) {
+ MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
+ if (_orig_to_scratch_object_table != nullptr) {
+ OopHandle orig(&orig_obj);
+ OopHandle* v = _orig_to_scratch_object_table->get(orig);
+ if (v != nullptr) {
+ return v->resolve();
+ }
+ }
+ return nullptr;
+ }
+
+ // Permanent oops are used to support AOT-compiled methods, which may have in-line references
+ // to Strings and MH oops.
+ //
+ // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
+ // and are accssed vis CDSAccess::get_archived_object(int).
+ struct PermanentOopInfo {
+ int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
+ int _heap_offset; // Offset of the object from the bottom of the archived heap.
+ PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
+ };
+
+ class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
+ 36137, // prime number
+ AnyObj::C_HEAP,
+ mtClassShared,
+ oop_handle_hash,
+ oop_handle_equals> {};
+
+ static int _dumptime_permanent_oop_count = 0;
+ static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
+ static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
+
+ // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
+ // so we can remember their offset (from the bottom of the archived heap).
+ void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
+ assert_at_safepoint();
+ if (_dumptime_permanent_oop_table == nullptr) {
+ _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
+ }
+
+ PermanentOopInfo info(-1, offset);
+ OopHandle oh(Universe::vm_global(), obj);
+ _dumptime_permanent_oop_table->put_when_absent(oh, info);
+ }
+
+ // A permanent index is assigned to an archived object ONLY when
+ // the AOT compiler calls this function.
+ int HeapShared::get_archived_object_permanent_index(oop obj) {
+ MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
+
+ if (!CDSConfig::is_dumping_heap()) {
+ return -1; // Called by the Leyden old workflow
+ }
+ if (_dumptime_permanent_oop_table == nullptr) {
+ return -1;
+ }
+
+ if (_orig_to_scratch_object_table != nullptr) {
+ OopHandle orig(&obj);
+ OopHandle* v = _orig_to_scratch_object_table->get(orig);
+ if (v != nullptr) {
+ obj = v->resolve();
+ }
+ }
+
+ OopHandle tmp(&obj);
+ PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
+ if (info == nullptr) {
+ return -1;
+ } else {
+ if (info->_index < 0) {
+ info->_index = _dumptime_permanent_oop_count++;
+ }
+ return info->_index;
+ }
+ }
+
+ oop HeapShared::get_archived_object(int permanent_index) {
+ assert(permanent_index >= 0, "sanity");
+ assert(ArchiveHeapLoader::is_in_use(), "sanity");
+ assert(_runtime_permanent_oops != nullptr, "sanity");
+
+ return _runtime_permanent_oops->at(permanent_index).resolve();
+ }
+
+ // Remember all archived heap objects that have a permanent index.
+ // table[i] = offset of oop whose permanent index is i.
+ void CachedCodeDirectoryInternal::dumptime_init_internal() {
+ const int count = _dumptime_permanent_oop_count;
+ int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int));
+ for (int i = 0; i < count; i++) {
+ table[count] = -1;
+ }
+ _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
+ int index = info._index;
+ if (index >= 0) {
+ assert(index < count, "sanity");
+ table[index] = info._heap_offset;
+ }
+ return true; // continue
+ });
+
+ for (int i = 0; i < count; i++) {
+ assert(table[i] >= 0, "must be");
+ }
+
+ log_info(cds)("Dumped %d permanent oops", count);
+
+ _permanent_oop_count = count;
+ CDSAccess::set_pointer(&_permanent_oop_offsets, table);
+ }
+
+ // This is called during the bootstrap of the production run, before any GC can happen.
+ // Record each permanent oop in a OopHandle for GC safety.
+ void CachedCodeDirectoryInternal::runtime_init_internal() {
+ int count = _permanent_oop_count;
+ int* table = _permanent_oop_offsets;
+ _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
+ for (int i = 0; i < count; i++) {
+ oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
+ OopHandle oh(Universe::vm_global(), obj);
+ _runtime_permanent_oops->append(oh);
+ }
+ };
+
void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
assert(_root_segment_max_size_elems > 0, "sanity");
// Try to avoid divisions for the common case.
if (idx < _root_segment_max_size_elems) {
}
}
};
void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
+ if (_scratch_references_table == nullptr) {
+ _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
+ }
_scratch_references_table->set_oop(src, dest);
}
objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
return (objArrayOop)_scratch_references_table->get_oop(src);
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
_scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
}
}
_scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
! _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
}
// Given java_mirror that represents a (primitive or reference) type T,
// return the "scratch" version that represents the same type T.
// Note that if java_mirror will be returned if it's already a
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
BasicType bt = (BasicType)i;
if (!is_reference_type(bt)) {
oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
_scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
+ track_scratch_object(Universe::java_mirror(bt), m);
}
}
_scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
! if (_scratch_references_table == nullptr) {
+ _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
+ }
}
// Given java_mirror that represents a (primitive or reference) type T,
// return the "scratch" version that represents the same type T.
// Note that if java_mirror will be returned if it's already a
oop HeapShared::scratch_java_mirror(Klass* k) {
return _scratch_java_mirror_table->get_oop(k);
}
void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
+ track_scratch_object(k->java_mirror(), mirror);
_scratch_java_mirror_table->set_oop(k, mirror);
}
void HeapShared::remove_scratch_objects(Klass* k) {
// Klass is being deallocated. Java mirror can still be alive, and it should not
}
_scratch_java_mirror_table->remove_oop(k);
if (k->is_instance_klass()) {
_scratch_references_table->remove(InstanceKlass::cast(k)->constants());
}
+ if (mirror != nullptr) {
+ OopHandle tmp(&mirror);
+ OopHandle* v = _orig_to_scratch_object_table->get(tmp);
+ if (v != nullptr) {
+ oop scratch_mirror = v->resolve();
+ java_lang_Class::set_klass(scratch_mirror, nullptr);
+ _orig_to_scratch_object_table->remove(tmp);
+ }
+ }
}
//TODO: we eventually want a more direct test for these kinds of things.
//For example the JVM could record some bit of context from the creation
//of the klass, such as who called the hidden class factory. Using
NoSafepointVerifier nsv;
// The special subgraph doesn't belong to any class. We use Object_klass() here just
// for convenience.
_dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
+ _trace = new GrowableArrayCHeap<oop, mtClassShared>(250);
+ _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
// Cache for recording where the archived objects are copied to
create_archived_object_cache();
if (UseCompressedOops || UseG1GC) {
UseCompressedOops ? p2i(CompressedOops::end()) :
p2i((address)G1CollectedHeap::heap()->reserved().end()));
}
copy_objects();
! CDSHeapVerifier::verify();
check_special_subgraph_classes();
}
! ArchiveHeapWriter::write(_pending_roots, heap_info);
}
void HeapShared::copy_interned_strings() {
init_seen_objects_table();
UseCompressedOops ? p2i(CompressedOops::end()) :
p2i((address)G1CollectedHeap::heap()->reserved().end()));
}
copy_objects();
! if (!SkipArchiveHeapVerification) {
+ CDSHeapVerifier::verify();
+ }
check_special_subgraph_classes();
}
! GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
+ for (int i = 0; i < _pending_roots->length(); i++) {
+ roots->append(_pending_roots->at(i).resolve());
+ }
+ ArchiveHeapWriter::write(roots, heap_info);
}
void HeapShared::copy_interned_strings() {
init_seen_objects_table();
"should not have initialized any non-interface, non-hidden classes outside of java.base");
}
}
}
+ #if 0
+ if (buffered_ik->name()->equals("jdk/internal/loader/NativeLibraries")) { // FIXME -- leyden+JEP483 merge
+ return false;
+ }
+ #endif
buffered_ik->set_has_aot_initialized_mirror();
if (AOTClassInitializer::is_runtime_setup_required(src_ik)) {
buffered_ik->set_is_runtime_setup_required();
}
made_progress = true;
_subgraph_object_klasses->append_if_missing(buffered_k);
_has_non_early_klasses |= is_non_early_klass(orig_k);
}
void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
+ if (CDSConfig::is_dumping_invokedynamic()) {
+ // FIXME -- this allows LambdaProxy classes
+ return;
+ }
if (ik->module()->name() == vmSymbols::java_base()) {
assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
return;
}
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
if (!ArchiveHeapLoader::is_in_use()) {
return; // nothing to do
}
+
+ if (!CDSConfig::is_using_aot_linked_classes()) {
+ assert( _run_time_special_subgraph != nullptr, "must be");
+ Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
+ if (klasses != nullptr) {
+ for (int i = 0; i < klasses->length(); i++) {
+ Klass* k = klasses->at(i);
+ ExceptionMark em(current); // no exception can happen here
+ resolve_or_init(k, /*do_init*/false, current);
+ }
+ }
+ }
+
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
vmClasses::Long_klass()->initialize(CHECK);
vmClasses::Void_klass()->initialize(CHECK);
}
}
// (1) If orig_obj has not been archived yet, archive it.
// (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
// trace all objects that are reachable from it, and make sure these objects are archived.
// (3) Record the klasses of all orig_obj and all reachable objects.
bool HeapShared::archive_reachable_objects_from(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj) {
assert(orig_obj != nullptr, "must be");
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
// This object has injected fields that cannot be supported easily, so we disallow them for now.
// If you get an error here, you probably made a change in the JDK library that has added
// these objects that are referenced (directly or indirectly) by static fields.
ResourceMark rm;
log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
debug_trace();
! MetaspaceShared::unrecoverable_writing_error();
}
if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
ResourceMark rm;
LogTarget(Debug, cds, heap) log;
vmClasses::Long_klass()->initialize(CHECK);
vmClasses::Void_klass()->initialize(CHECK);
}
}
+ void HeapShared::exit_on_error() {
+ if (_context != nullptr) {
+ ResourceMark rm;
+ LogStream ls(Log(cds, heap)::error());
+ ls.print_cr("Context");
+ for (int i = 0; i < _context->length(); i++) {
+ const char* s = _context->at(i);
+ ls.print_cr("- %s", s);
+ }
+ }
+ if (_trace != nullptr) {
+ ResourceMark rm;
+ LogStream ls(Log(cds, heap)::error());
+ ls.print_cr("Reference trace");
+ for (int i = 0; i < _trace->length(); i++) {
+ oop orig_obj = _trace->at(i);
+ ls.print_cr("[%d] ========================================", i);
+ orig_obj->print_on(&ls);
+ ls.cr();
+ }
+ }
+ MetaspaceShared::unrecoverable_writing_error();
+ }
+
// (1) If orig_obj has not been archived yet, archive it.
// (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
// trace all objects that are reachable from it, and make sure these objects are archived.
// (3) Record the klasses of all orig_obj and all reachable objects.
bool HeapShared::archive_reachable_objects_from(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj) {
+ ArchivingObjectMark mark(orig_obj);
assert(orig_obj != nullptr, "must be");
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
// This object has injected fields that cannot be supported easily, so we disallow them for now.
// If you get an error here, you probably made a change in the JDK library that has added
// these objects that are referenced (directly or indirectly) by static fields.
ResourceMark rm;
log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
debug_trace();
! exit_on_error();
}
if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
ResourceMark rm;
LogTarget(Debug, cds, heap) log;
return false;
} else {
// We don't know how to handle an object that has been archived, but some of its reachable
// objects cannot be archived. Bail out for now. We might need to fix this in the future if
// we have a real use case.
! MetaspaceShared::unrecoverable_writing_error();
}
}
}
Klass *orig_k = orig_obj->klass();
return false;
} else {
// We don't know how to handle an object that has been archived, but some of its reachable
// objects cannot be archived. Bail out for now. We might need to fix this in the future if
// we have a real use case.
! exit_on_error();
}
}
}
Klass *orig_k = orig_obj->klass();
for (int i = 0; fields[i].valid(); ) {
ArchivableStaticFieldInfo* info = &fields[i];
const char* klass_name = info->klass_name;
start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
+ ContextMark cm(klass_name);
// If you have specified consecutive fields of the same klass in
// fields[], these will be archived in the same
// {start_recording_subgraph ... done_recording_subgraph} pass to
// save time.
for (; fields[i].valid(); i++) {
ArchivableStaticFieldInfo* f = &fields[i];
if (f->klass_name != klass_name) {
break;
}
+ ContextMark cm(f->field_name);
archive_reachable_objects_from_static_field(f->klass, f->klass_name,
f->offset, f->field_name);
}
done_recording_subgraph(info->klass, klass_name);
}
< prev index next >