405 }
406
407 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
408 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
409 if (p != nullptr) {
410 assert(*p > 0, "filler must be larger than zero bytes");
411 return *p;
412 } else {
413 return 0; // buffered_addr is not a filler
414 }
415 }
416
417 template <typename T>
418 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
419 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
420 *field_addr = value;
421 }
422
423 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
424 assert(!is_too_large_to_archive(src_obj), "already checked");
425 size_t byte_size = src_obj->size() * HeapWordSize;
426 assert(byte_size > 0, "no zero-size objects");
427
428 // For region-based collectors such as G1, the archive heap may be mapped into
429 // multiple regions. We need to make sure that we don't have an object that can possible
430 // span across two regions.
431 maybe_fill_gc_region_gap(byte_size);
432
433 size_t new_used = _buffer_used + byte_size;
434 assert(new_used > _buffer_used, "no wrap around");
435
436 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
437 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
438 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
439
440 ensure_buffer_space(new_used);
441
442 address from = cast_from_oop<address>(src_obj);
443 address to = offset_to_buffered_address<address>(_buffer_used);
444 assert(is_object_aligned(_buffer_used), "sanity");
445 assert(is_object_aligned(byte_size), "sanity");
446 memcpy(to, from, byte_size);
447
448 // These native pointers will be restored explicitly at run time.
449 if (java_lang_Module::is_instance(src_obj)) {
450 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
451 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
452 #ifdef ASSERT
453 // We only archive these loaders
454 if (src_obj != SystemDictionary::java_platform_loader() &&
455 src_obj != SystemDictionary::java_system_loader()) {
456 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
457 }
458 #endif
459 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
460 }
461
462 size_t buffered_obj_offset = _buffer_used;
463 _buffer_used = new_used;
464
465 return buffered_obj_offset;
466 }
562 assert(request_p >= (T*)_requested_bottom, "sanity");
563 assert(request_p < (T*)_requested_top, "sanity");
564 requested_region_bottom = _requested_bottom;
565
566 // Mark the pointer in the oopmap
567 T* region_bottom = (T*)requested_region_bottom;
568 assert(request_p >= region_bottom, "must be");
569 BitMap::idx_t idx = request_p - region_bottom;
570 assert(idx < oopmap->size(), "overflow");
571 oopmap->set_bit(idx);
572 }
573
574 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
575 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
576 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
577 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
578
579 oop fake_oop = cast_to_oop(buffered_addr);
580 if (UseCompactObjectHeaders) {
581 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
582 } else {
583 fake_oop->set_narrow_klass(nk);
584 }
585
586 if (src_obj == nullptr) {
587 return;
588 }
589 // We need to retain the identity_hash, because it may have been used by some hashtables
590 // in the shared heap.
591 if (!src_obj->fast_no_hash_check()) {
592 intptr_t src_hash = src_obj->identity_hash();
593 if (UseCompactObjectHeaders) {
594 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
595 } else {
596 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
597 }
598 assert(fake_oop->mark().is_unlocked(), "sanity");
599
600 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
601 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
602 }
603 // Strip age bits.
604 fake_oop->set_mark(fake_oop->mark().set_age(0));
605 }
606
607 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
608 oop _src_obj;
609 address _buffered_obj;
610 CHeapBitMap* _oopmap;
611 bool _is_java_lang_ref;
612 public:
613 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
614 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
615 {
616 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
617 }
618
619 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
620 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
621
|
405 }
406
407 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
408 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
409 if (p != nullptr) {
410 assert(*p > 0, "filler must be larger than zero bytes");
411 return *p;
412 } else {
413 return 0; // buffered_addr is not a filler
414 }
415 }
416
417 template <typename T>
418 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
419 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
420 *field_addr = value;
421 }
422
423 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
424 assert(!is_too_large_to_archive(src_obj), "already checked");
425 size_t old_size = src_obj->size();
426 size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
427 size_t byte_size = new_size * HeapWordSize;
428 assert(byte_size > 0, "no zero-size objects");
429
430 // For region-based collectors such as G1, the archive heap may be mapped into
431 // multiple regions. We need to make sure that we don't have an object that can possible
432 // span across two regions.
433 maybe_fill_gc_region_gap(byte_size);
434
435 size_t new_used = _buffer_used + byte_size;
436 assert(new_used > _buffer_used, "no wrap around");
437
438 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
439 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
440 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
441
442 ensure_buffer_space(new_used);
443
444 address from = cast_from_oop<address>(src_obj);
445 address to = offset_to_buffered_address<address>(_buffer_used);
446 assert(is_object_aligned(_buffer_used), "sanity");
447 assert(is_object_aligned(byte_size), "sanity");
448 memcpy(to, from, old_size * HeapWordSize);
449
450 // These native pointers will be restored explicitly at run time.
451 if (java_lang_Module::is_instance(src_obj)) {
452 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
453 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
454 #ifdef ASSERT
455 // We only archive these loaders
456 if (src_obj != SystemDictionary::java_platform_loader() &&
457 src_obj != SystemDictionary::java_system_loader()) {
458 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
459 }
460 #endif
461 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
462 }
463
464 size_t buffered_obj_offset = _buffer_used;
465 _buffer_used = new_used;
466
467 return buffered_obj_offset;
468 }
564 assert(request_p >= (T*)_requested_bottom, "sanity");
565 assert(request_p < (T*)_requested_top, "sanity");
566 requested_region_bottom = _requested_bottom;
567
568 // Mark the pointer in the oopmap
569 T* region_bottom = (T*)requested_region_bottom;
570 assert(request_p >= region_bottom, "must be");
571 BitMap::idx_t idx = request_p - region_bottom;
572 assert(idx < oopmap->size(), "overflow");
573 oopmap->set_bit(idx);
574 }
575
576 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
577 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
578 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
579 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
580
581 oop fake_oop = cast_to_oop(buffered_addr);
582 if (UseCompactObjectHeaders) {
583 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
584 assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
585 } else {
586 fake_oop->set_narrow_klass(nk);
587 }
588
589 if (src_obj == nullptr) {
590 return;
591 }
592 // We need to retain the identity_hash, because it may have been used by some hashtables
593 // in the shared heap.
594 if (!src_obj->fast_no_hash_check()) {
595 intptr_t src_hash = src_obj->identity_hash();
596 if (UseCompactObjectHeaders) {
597 markWord m = markWord::prototype().set_narrow_klass(nk);
598 m = m.copy_hashctrl_from(src_obj->mark());
599 fake_oop->set_mark(m);
600 if (m.is_hashed_not_expanded()) {
601 fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m);
602 }
603 assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
604 } else {
605 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
606 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
607 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
608 }
609 assert(fake_oop->mark().is_unlocked(), "sanity");
610 }
611 // Strip age bits.
612 fake_oop->set_mark(fake_oop->mark().set_age(0));
613 }
614
615 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
616 oop _src_obj;
617 address _buffered_obj;
618 CHeapBitMap* _oopmap;
619 bool _is_java_lang_ref;
620 public:
621 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
622 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
623 {
624 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
625 }
626
627 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
628 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
629
|