404 }
405
406 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
407 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
408 if (p != nullptr) {
409 assert(*p > 0, "filler must be larger than zero bytes");
410 return *p;
411 } else {
412 return 0; // buffered_addr is not a filler
413 }
414 }
415
416 template <typename T>
417 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
418 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
419 *field_addr = value;
420 }
421
422 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
423 assert(!is_too_large_to_archive(src_obj), "already checked");
424 size_t byte_size = src_obj->size() * HeapWordSize;
425 assert(byte_size > 0, "no zero-size objects");
426
427 // For region-based collectors such as G1, the archive heap may be mapped into
428 // multiple regions. We need to make sure that we don't have an object that can possible
429 // span across two regions.
430 maybe_fill_gc_region_gap(byte_size);
431
432 size_t new_used = _buffer_used + byte_size;
433 assert(new_used > _buffer_used, "no wrap around");
434
435 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
436 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
437 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
438
439 ensure_buffer_space(new_used);
440
441 address from = cast_from_oop<address>(src_obj);
442 address to = offset_to_buffered_address<address>(_buffer_used);
443 assert(is_object_aligned(_buffer_used), "sanity");
444 assert(is_object_aligned(byte_size), "sanity");
445 memcpy(to, from, byte_size);
446
447 // These native pointers will be restored explicitly at run time.
448 if (java_lang_Module::is_instance(src_obj)) {
449 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
450 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
451 #ifdef ASSERT
452 // We only archive these loaders
453 if (src_obj != SystemDictionary::java_platform_loader() &&
454 src_obj != SystemDictionary::java_system_loader()) {
455 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
456 }
457 #endif
458 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
459 }
460
461 size_t buffered_obj_offset = _buffer_used;
462 _buffer_used = new_used;
463
464 return buffered_obj_offset;
465 }
561 assert(request_p >= (T*)_requested_bottom, "sanity");
562 assert(request_p < (T*)_requested_top, "sanity");
563 requested_region_bottom = _requested_bottom;
564
565 // Mark the pointer in the oopmap
566 T* region_bottom = (T*)requested_region_bottom;
567 assert(request_p >= region_bottom, "must be");
568 BitMap::idx_t idx = request_p - region_bottom;
569 assert(idx < oopmap->size(), "overflow");
570 oopmap->set_bit(idx);
571 }
572
573 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
574 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
575 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
576 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
577
578 oop fake_oop = cast_to_oop(buffered_addr);
579 if (UseCompactObjectHeaders) {
580 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
581 } else {
582 fake_oop->set_narrow_klass(nk);
583 }
584
585 if (src_obj == nullptr) {
586 return;
587 }
588 // We need to retain the identity_hash, because it may have been used by some hashtables
589 // in the shared heap.
590 if (!src_obj->fast_no_hash_check()) {
591 intptr_t src_hash = src_obj->identity_hash();
592 if (UseCompactObjectHeaders) {
593 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
594 } else {
595 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
596 }
597 assert(fake_oop->mark().is_unlocked(), "sanity");
598
599 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
600 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
601 }
602 // Strip age bits.
603 fake_oop->set_mark(fake_oop->mark().set_age(0));
604 }
605
606 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
607 oop _src_obj;
608 address _buffered_obj;
609 CHeapBitMap* _oopmap;
610
611 public:
612 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
613 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
614
615 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
616 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
617
618 private:
619 template <class T> void do_oop_work(T *p) {
620 size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
|
404 }
405
406 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
407 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
408 if (p != nullptr) {
409 assert(*p > 0, "filler must be larger than zero bytes");
410 return *p;
411 } else {
412 return 0; // buffered_addr is not a filler
413 }
414 }
415
416 template <typename T>
417 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
418 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
419 *field_addr = value;
420 }
421
422 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
423 assert(!is_too_large_to_archive(src_obj), "already checked");
424 size_t old_size = src_obj->size();
425 size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
426 size_t byte_size = new_size * HeapWordSize;
427 assert(byte_size > 0, "no zero-size objects");
428
429 // For region-based collectors such as G1, the archive heap may be mapped into
430 // multiple regions. We need to make sure that we don't have an object that can possible
431 // span across two regions.
432 maybe_fill_gc_region_gap(byte_size);
433
434 size_t new_used = _buffer_used + byte_size;
435 assert(new_used > _buffer_used, "no wrap around");
436
437 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
438 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
439 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
440
441 ensure_buffer_space(new_used);
442
443 address from = cast_from_oop<address>(src_obj);
444 address to = offset_to_buffered_address<address>(_buffer_used);
445 assert(is_object_aligned(_buffer_used), "sanity");
446 assert(is_object_aligned(byte_size), "sanity");
447 memcpy(to, from, old_size * HeapWordSize);
448
449 // These native pointers will be restored explicitly at run time.
450 if (java_lang_Module::is_instance(src_obj)) {
451 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
452 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
453 #ifdef ASSERT
454 // We only archive these loaders
455 if (src_obj != SystemDictionary::java_platform_loader() &&
456 src_obj != SystemDictionary::java_system_loader()) {
457 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
458 }
459 #endif
460 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
461 }
462
463 size_t buffered_obj_offset = _buffer_used;
464 _buffer_used = new_used;
465
466 return buffered_obj_offset;
467 }
563 assert(request_p >= (T*)_requested_bottom, "sanity");
564 assert(request_p < (T*)_requested_top, "sanity");
565 requested_region_bottom = _requested_bottom;
566
567 // Mark the pointer in the oopmap
568 T* region_bottom = (T*)requested_region_bottom;
569 assert(request_p >= region_bottom, "must be");
570 BitMap::idx_t idx = request_p - region_bottom;
571 assert(idx < oopmap->size(), "overflow");
572 oopmap->set_bit(idx);
573 }
574
575 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
576 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
577 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
578 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
579
580 oop fake_oop = cast_to_oop(buffered_addr);
581 if (UseCompactObjectHeaders) {
582 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
583 assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
584 } else {
585 fake_oop->set_narrow_klass(nk);
586 }
587
588 if (src_obj == nullptr) {
589 return;
590 }
591 // We need to retain the identity_hash, because it may have been used by some hashtables
592 // in the shared heap.
593 if (!src_obj->fast_no_hash_check()) {
594 intptr_t src_hash = src_obj->identity_hash();
595 if (UseCompactObjectHeaders) {
596 markWord m = markWord::prototype().set_narrow_klass(nk);
597 m = m.copy_hashctrl_from(src_obj->mark());
598 fake_oop->set_mark(m);
599 if (m.is_hashed_not_expanded()) {
600 fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m);
601 }
602 assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
603 } else {
604 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
605 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
606 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
607 }
608 assert(fake_oop->mark().is_unlocked(), "sanity");
609 }
610 // Strip age bits.
611 fake_oop->set_mark(fake_oop->mark().set_age(0));
612 }
613
614 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
615 oop _src_obj;
616 address _buffered_obj;
617 CHeapBitMap* _oopmap;
618
619 public:
620 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
621 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
622
623 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
624 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
625
626 private:
627 template <class T> void do_oop_work(T *p) {
628 size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
|