< prev index next >

src/hotspot/share/cds/archiveHeapWriter.cpp

Print this page

400 }
401 
402 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
403   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
404   if (p != nullptr) {
405     assert(*p > 0, "filler must be larger than zero bytes");
406     return *p;
407   } else {
408     return 0; // buffered_addr is not a filler
409   }
410 }
411 
412 template <typename T>
413 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
414   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
415   *field_addr = value;
416 }
417 
418 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
419   assert(!is_too_large_to_archive(src_obj), "already checked");
420   size_t byte_size = src_obj->size() * HeapWordSize;


421   assert(byte_size > 0, "no zero-size objects");
422 
423   // For region-based collectors such as G1, the archive heap may be mapped into
424   // multiple regions. We need to make sure that we don't have an object that can possible
425   // span across two regions.
426   maybe_fill_gc_region_gap(byte_size);
427 
428   size_t new_used = _buffer_used + byte_size;
429   assert(new_used > _buffer_used, "no wrap around");
430 
431   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
432   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
433   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
434 
435   ensure_buffer_space(new_used);
436 
437   address from = cast_from_oop<address>(src_obj);
438   address to = offset_to_buffered_address<address>(_buffer_used);
439   assert(is_object_aligned(_buffer_used), "sanity");
440   assert(is_object_aligned(byte_size), "sanity");
441   memcpy(to, from, byte_size);
442 
443   // These native pointers will be restored explicitly at run time.
444   if (java_lang_Module::is_instance(src_obj)) {
445     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
446   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
447 #ifdef ASSERT
448     // We only archive these loaders
449     if (src_obj != SystemDictionary::java_platform_loader() &&
450         src_obj != SystemDictionary::java_system_loader()) {
451       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
452     }
453 #endif
454     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
455   }
456 
457   size_t buffered_obj_offset = _buffer_used;
458   _buffer_used = new_used;
459 
460   return buffered_obj_offset;
461 }

557   assert(request_p >= (T*)_requested_bottom, "sanity");
558   assert(request_p <  (T*)_requested_top, "sanity");
559   requested_region_bottom = _requested_bottom;
560 
561   // Mark the pointer in the oopmap
562   T* region_bottom = (T*)requested_region_bottom;
563   assert(request_p >= region_bottom, "must be");
564   BitMap::idx_t idx = request_p - region_bottom;
565   assert(idx < oopmap->size(), "overflow");
566   oopmap->set_bit(idx);
567 }
568 
569 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
570   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
571   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
572   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
573 
574   oop fake_oop = cast_to_oop(buffered_addr);
575   if (UseCompactObjectHeaders) {
576     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));

577   } else {
578     fake_oop->set_narrow_klass(nk);
579   }
580 
581   if (src_obj == nullptr) {
582     return;
583   }
584   // We need to retain the identity_hash, because it may have been used by some hashtables
585   // in the shared heap.
586   if (!src_obj->fast_no_hash_check()) {
587     intptr_t src_hash = src_obj->identity_hash();
588     if (UseCompactObjectHeaders) {
589       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));






590     } else {
591       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));


592     }
593     assert(fake_oop->mark().is_unlocked(), "sanity");
594 
595     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
596     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
597   }
598   // Strip age bits.
599   fake_oop->set_mark(fake_oop->mark().set_age(0));
600 }
601 
602 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
603   oop _src_obj;
604   address _buffered_obj;
605   CHeapBitMap* _oopmap;
606 
607 public:
608   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
609     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
610 
611   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
612   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
613 
614 private:
615   template <class T> void do_oop_work(T *p) {
616     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));

400 }
401 
402 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
403   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
404   if (p != nullptr) {
405     assert(*p > 0, "filler must be larger than zero bytes");
406     return *p;
407   } else {
408     return 0; // buffered_addr is not a filler
409   }
410 }
411 
412 template <typename T>
413 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
414   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
415   *field_addr = value;
416 }
417 
418 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
419   assert(!is_too_large_to_archive(src_obj), "already checked");
420   size_t old_size = src_obj->size();
421   size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
422   size_t byte_size = new_size * HeapWordSize;
423   assert(byte_size > 0, "no zero-size objects");
424 
425   // For region-based collectors such as G1, the archive heap may be mapped into
426   // multiple regions. We need to make sure that we don't have an object that can possible
427   // span across two regions.
428   maybe_fill_gc_region_gap(byte_size);
429 
430   size_t new_used = _buffer_used + byte_size;
431   assert(new_used > _buffer_used, "no wrap around");
432 
433   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
434   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
435   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
436 
437   ensure_buffer_space(new_used);
438 
439   address from = cast_from_oop<address>(src_obj);
440   address to = offset_to_buffered_address<address>(_buffer_used);
441   assert(is_object_aligned(_buffer_used), "sanity");
442   assert(is_object_aligned(byte_size), "sanity");
443   memcpy(to, from, old_size * HeapWordSize);
444 
445   // These native pointers will be restored explicitly at run time.
446   if (java_lang_Module::is_instance(src_obj)) {
447     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
448   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
449 #ifdef ASSERT
450     // We only archive these loaders
451     if (src_obj != SystemDictionary::java_platform_loader() &&
452         src_obj != SystemDictionary::java_system_loader()) {
453       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
454     }
455 #endif
456     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
457   }
458 
459   size_t buffered_obj_offset = _buffer_used;
460   _buffer_used = new_used;
461 
462   return buffered_obj_offset;
463 }

559   assert(request_p >= (T*)_requested_bottom, "sanity");
560   assert(request_p <  (T*)_requested_top, "sanity");
561   requested_region_bottom = _requested_bottom;
562 
563   // Mark the pointer in the oopmap
564   T* region_bottom = (T*)requested_region_bottom;
565   assert(request_p >= region_bottom, "must be");
566   BitMap::idx_t idx = request_p - region_bottom;
567   assert(idx < oopmap->size(), "overflow");
568   oopmap->set_bit(idx);
569 }
570 
571 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
572   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
573   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
574   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
575 
576   oop fake_oop = cast_to_oop(buffered_addr);
577   if (UseCompactObjectHeaders) {
578     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
579     assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
580   } else {
581     fake_oop->set_narrow_klass(nk);
582   }
583 
584   if (src_obj == nullptr) {
585     return;
586   }
587   // We need to retain the identity_hash, because it may have been used by some hashtables
588   // in the shared heap.
589   if (!src_obj->fast_no_hash_check()) {
590     intptr_t src_hash = src_obj->identity_hash();
591     if (UseCompactObjectHeaders) {
592       markWord m = markWord::prototype().set_narrow_klass(nk);
593       m = m.copy_hashctrl_from(src_obj->mark());
594       fake_oop->set_mark(m);
595       if (m.is_hashed_not_expanded()) {
596         fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m);
597       }
598       assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
599     } else {
600       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
601       DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
602       assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
603     }
604     assert(fake_oop->mark().is_unlocked(), "sanity");



605   }
606   // Strip age bits.
607   fake_oop->set_mark(fake_oop->mark().set_age(0));
608 }
609 
610 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
611   oop _src_obj;
612   address _buffered_obj;
613   CHeapBitMap* _oopmap;
614 
615 public:
616   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
617     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
618 
619   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
620   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
621 
622 private:
623   template <class T> void do_oop_work(T *p) {
624     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
< prev index next >