< prev index next > src/hotspot/share/cds/aotStreamedHeapLoader.cpp
Print this page
if (Klass::layout_helper_is_instance(lh)) {
// Instance
if (Klass::layout_helper_needs_slow_path(lh)) {
return ((size_t*)(archive_object))[-1];
} else {
! return (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;
}
} else if (Klass::layout_helper_is_array(lh)) {
// Array
size_t size_in_bytes;
size_t array_length = (size_t)archive_array_length(archive_object);
size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
! return align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;
} else {
// Other
return ((size_t*)(archive_object))[-1];
}
}
if (Klass::layout_helper_is_instance(lh)) {
// Instance
if (Klass::layout_helper_needs_slow_path(lh)) {
return ((size_t*)(archive_object))[-1];
} else {
! size_t size = (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;
+ if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
+ size = align_object_size(size + 1);
+ }
+ return size;
}
} else if (Klass::layout_helper_is_array(lh)) {
// Array
size_t size_in_bytes;
size_t array_length = (size_t)archive_array_length(archive_object);
size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
! size_t size = align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;
+ if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
+ size = align_object_size(size + 1);
+ }
+ return size;
} else {
// Other
return ((size_t*)(archive_object))[-1];
}
}
assert(!archive_object->is_stackChunk(), "no such objects are archived");
oop heap_object;
Klass* klass = archive_object->klass();
if (klass->is_mirror_instance_klass()) {
! heap_object = Universe::heap()->class_allocate(klass, size, CHECK_NULL);
} else if (klass->is_instance_klass()) {
heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
} else {
assert(klass->is_array_klass(), "must be");
int length = archive_array_length(archive_object);
assert(!archive_object->is_stackChunk(), "no such objects are archived");
oop heap_object;
Klass* klass = archive_object->klass();
+ assert(!(UseCompactObjectHeaders && mark.is_hashed_not_expanded()), "Must not be hashed/not-expanded");
if (klass->is_mirror_instance_klass()) {
! size_t base_size = size;
+ assert(!(UseCompactObjectHeaders && mark.is_not_hashed_expanded()), "should not happen");
+ heap_object = Universe::heap()->class_allocate(klass, size, base_size, CHECK_NULL);
} else if (klass->is_instance_klass()) {
heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
} else {
assert(klass->is_array_klass(), "must be");
int length = archive_array_length(archive_object);
size_t size,
LinkerT linker) {
if (!_allow_gc) {
// Without concurrent GC running, we can copy incorrect object references
// and metadata references into the heap object and then fix them up in-place.
! size_t payload_size = size - 1;
! HeapWord* archive_start = ((HeapWord*)archive_object) + 1;
! HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object) + 1;
! Copy::disjoint_words(archive_start, heap_start, payload_size);
// In-place linking fixes up object indices from references of the heap object,
// and patches them up to refer to objects. This can be done because we just copied
// the payload of the object from the archive to the heap object, including the
// reference object indices. However, this is only okay to do before the GC can run.
size_t size,
LinkerT linker) {
if (!_allow_gc) {
// Without concurrent GC running, we can copy incorrect object references
// and metadata references into the heap object and then fix them up in-place.
! size_t offset = 1;
! size_t payload_size = size - offset;
! HeapWord* archive_start = ((HeapWord*)archive_object);
+ HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
+
+ Copy::disjoint_words(archive_start + offset, heap_start + offset, payload_size);
! if (UseCompactObjectHeaders) {
+ // The copying might have missed the first 4 bytes of payload/arraylength, copy that also.
+ *(reinterpret_cast<jint*>(heap_start) + 1) = *(reinterpret_cast<jint*>(archive_start) + 1);
+ }
// In-place linking fixes up object indices from references of the heap object,
// and patches them up to refer to objects. This can be done because we just copied
// the payload of the object from the archive to the heap object, including the
// reference object indices. However, this is only okay to do before the GC can run.
// intended runtime linked values only.
size_t word_scale = use_coops ? 2 : 1;
using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
// Skip the markWord; it is set at allocation time
! size_t header_size = word_scale;
size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
const BitMap::idx_t start_bit = header_bit + header_size;
const BitMap::idx_t end_bit = header_bit + size * word_scale;
BitMap::idx_t curr_bit = start_bit;
// We are a bit paranoid about GC or other safepointing operations observing
// shady metadata fields from the archive that do not point at real metadata.
// We deal with this by explicitly reading the requested address from the
// archive and fixing it to real Metadata before writing it into the heap object.
HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
// intended runtime linked values only.
size_t word_scale = use_coops ? 2 : 1;
using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
// Skip the markWord; it is set at allocation time
! size_t header_size = (UseCompactObjectHeaders && use_coops) ? 1 : word_scale;
size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
const BitMap::idx_t start_bit = header_bit + header_size;
const BitMap::idx_t end_bit = header_bit + size * word_scale;
BitMap::idx_t curr_bit = start_bit;
+ if (UseCompactObjectHeaders && !use_coops) {
+ // Copy first 4 primitive bytes.
+ jint* archive_start = reinterpret_cast<jint*>(archive_object);
+ HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
+ *(reinterpret_cast<jint*>(heap_start) + 1) = *(archive_start + 1);
+ }
+
// We are a bit paranoid about GC or other safepointing operations observing
// shady metadata fields from the archive that do not point at real metadata.
// We deal with this by explicitly reading the requested address from the
// archive and fixing it to real Metadata before writing it into the heap object.
HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
< prev index next >