406 }
407
408 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
409 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
410 if (p != nullptr) {
411 assert(*p > 0, "filler must be larger than zero bytes");
412 return *p;
413 } else {
414 return 0; // buffered_addr is not a filler
415 }
416 }
417
418 template <typename T>
419 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
420 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
421 *field_addr = value;
422 }
423
424 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
425 assert(!is_too_large_to_archive(src_obj), "already checked");
426 size_t byte_size = src_obj->size() * HeapWordSize;
427 assert(byte_size > 0, "no zero-size objects");
428
429 // For region-based collectors such as G1, the archive heap may be mapped into
430 // multiple regions. We need to make sure that we don't have an object that can possible
431 // span across two regions.
432 maybe_fill_gc_region_gap(byte_size);
433
434 size_t new_used = _buffer_used + byte_size;
435 assert(new_used > _buffer_used, "no wrap around");
436
437 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
438 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
439 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
440
441 ensure_buffer_space(new_used);
442
443 address from = cast_from_oop<address>(src_obj);
444 address to = offset_to_buffered_address<address>(_buffer_used);
445 assert(is_object_aligned(_buffer_used), "sanity");
446 assert(is_object_aligned(byte_size), "sanity");
447 memcpy(to, from, byte_size);
448
449 // These native pointers will be restored explicitly at run time.
450 if (java_lang_Module::is_instance(src_obj)) {
451 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
452 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
453 #ifdef ASSERT
454 // We only archive these loaders
455 if (src_obj != SystemDictionary::java_platform_loader() &&
456 src_obj != SystemDictionary::java_system_loader()) {
457 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
458 }
459 #endif
460 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
461 }
462
463 size_t buffered_obj_offset = _buffer_used;
464 _buffer_used = new_used;
465
466 return buffered_obj_offset;
467 }
567 assert(request_p >= (T*)_requested_bottom, "sanity");
568 assert(request_p < (T*)_requested_top, "sanity");
569 requested_region_bottom = _requested_bottom;
570
571 // Mark the pointer in the oopmap
572 T* region_bottom = (T*)requested_region_bottom;
573 assert(request_p >= region_bottom, "must be");
574 BitMap::idx_t idx = request_p - region_bottom;
575 assert(idx < oopmap->size(), "overflow");
576 oopmap->set_bit(idx);
577 }
578
579 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
580 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
581 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
582 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
583
584 oop fake_oop = cast_to_oop(buffered_addr);
585 if (UseCompactObjectHeaders) {
586 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
587 } else {
588 fake_oop->set_narrow_klass(nk);
589 }
590
591 if (src_obj == nullptr) {
592 return;
593 }
594 // We need to retain the identity_hash, because it may have been used by some hashtables
595 // in the shared heap.
596 if (!src_obj->fast_no_hash_check()) {
597 intptr_t src_hash = src_obj->identity_hash();
598 if (UseCompactObjectHeaders) {
599 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
600 } else {
601 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
602 }
603 assert(fake_oop->mark().is_unlocked(), "sanity");
604
605 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
606 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
607 }
608 // Strip age bits.
609 fake_oop->set_mark(fake_oop->mark().set_age(0));
610 }
611
612 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
613 oop _src_obj;
614 address _buffered_obj;
615 CHeapBitMap* _oopmap;
616 bool _is_java_lang_ref;
617 public:
618 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
619 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
620 {
621 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
622 }
623
624 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
625 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
626
|
406 }
407
408 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
409 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
410 if (p != nullptr) {
411 assert(*p > 0, "filler must be larger than zero bytes");
412 return *p;
413 } else {
414 return 0; // buffered_addr is not a filler
415 }
416 }
417
418 template <typename T>
419 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
420 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
421 *field_addr = value;
422 }
423
424 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
425 assert(!is_too_large_to_archive(src_obj), "already checked");
426 size_t old_size = src_obj->size();
427 size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
428 size_t byte_size = new_size * HeapWordSize;
429 assert(byte_size > 0, "no zero-size objects");
430
431 // For region-based collectors such as G1, the archive heap may be mapped into
432 // multiple regions. We need to make sure that we don't have an object that can possible
433 // span across two regions.
434 maybe_fill_gc_region_gap(byte_size);
435
436 size_t new_used = _buffer_used + byte_size;
437 assert(new_used > _buffer_used, "no wrap around");
438
439 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
440 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
441 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
442
443 ensure_buffer_space(new_used);
444
445 address from = cast_from_oop<address>(src_obj);
446 address to = offset_to_buffered_address<address>(_buffer_used);
447 assert(is_object_aligned(_buffer_used), "sanity");
448 assert(is_object_aligned(byte_size), "sanity");
449 memcpy(to, from, old_size * HeapWordSize);
450
451 // These native pointers will be restored explicitly at run time.
452 if (java_lang_Module::is_instance(src_obj)) {
453 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
454 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
455 #ifdef ASSERT
456 // We only archive these loaders
457 if (src_obj != SystemDictionary::java_platform_loader() &&
458 src_obj != SystemDictionary::java_system_loader()) {
459 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
460 }
461 #endif
462 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
463 }
464
465 size_t buffered_obj_offset = _buffer_used;
466 _buffer_used = new_used;
467
468 return buffered_obj_offset;
469 }
569 assert(request_p >= (T*)_requested_bottom, "sanity");
570 assert(request_p < (T*)_requested_top, "sanity");
571 requested_region_bottom = _requested_bottom;
572
573 // Mark the pointer in the oopmap
574 T* region_bottom = (T*)requested_region_bottom;
575 assert(request_p >= region_bottom, "must be");
576 BitMap::idx_t idx = request_p - region_bottom;
577 assert(idx < oopmap->size(), "overflow");
578 oopmap->set_bit(idx);
579 }
580
581 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
582 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
583 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
584 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
585
586 oop fake_oop = cast_to_oop(buffered_addr);
587 if (UseCompactObjectHeaders) {
588 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
589 assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
590 } else {
591 fake_oop->set_narrow_klass(nk);
592 }
593
594 if (src_obj == nullptr) {
595 return;
596 }
597 // We need to retain the identity_hash, because it may have been used by some hashtables
598 // in the shared heap.
599 if (!src_obj->fast_no_hash_check()) {
600 intptr_t src_hash = src_obj->identity_hash();
601 if (UseCompactObjectHeaders) {
602 markWord m = markWord::prototype().set_narrow_klass(nk);
603 m = m.copy_hashctrl_from(src_obj->mark());
604 fake_oop->set_mark(m);
605 if (m.is_hashed_not_expanded()) {
606 fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m);
607 }
608 assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
609 } else {
610 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
611 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
612 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
613 }
614 assert(fake_oop->mark().is_unlocked(), "sanity");
615 }
616 // Strip age bits.
617 fake_oop->set_mark(fake_oop->mark().set_age(0));
618 }
619
620 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
621 oop _src_obj;
622 address _buffered_obj;
623 CHeapBitMap* _oopmap;
624 bool _is_java_lang_ref;
625 public:
626 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
627 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
628 {
629 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
630 }
631
632 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
633 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
634
|