< prev index next >

src/hotspot/share/cds/aotMappedHeapWriter.cpp

Print this page

 143   _source_objs = nullptr;
 144 
 145   delete _dumped_interned_strings;
 146   _dumped_interned_strings = nullptr;
 147 }
 148 
 149 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
 150   _source_objs->append(src_obj);
 151 }
 152 
 153 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
 154                                 ArchiveMappedHeapInfo* heap_info) {
 155   assert(CDSConfig::is_dumping_heap(), "sanity");
 156   allocate_buffer();
 157   copy_source_objs_to_buffer(roots);
 158   set_requested_address_range(heap_info);
 159   relocate_embedded_oops(roots, heap_info);
 160 }
 161 
 162 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
 163   return is_too_large_to_archive(o->size());


 164 }
 165 
 166 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
 167   typeArrayOop value = java_lang_String::value_no_keepalive(string);
 168   return is_too_large_to_archive(value);
 169 }
 170 
 171 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
 172   assert(size > 0, "no zero-size object");
 173   assert(size * HeapWordSize > size, "no overflow");
 174   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
 175 
 176   size_t byte_size = size * HeapWordSize;
 177   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
 178     return true;
 179   } else {
 180     return false;
 181   }
 182 }
 183 

 230   } else {
 231     return nullptr;
 232   }
 233 }
 234 
 235 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
 236   oop p = buffered_addr_to_source_obj(buffered_addr);
 237   if (p != nullptr) {
 238     return p->klass();
 239   } else if (get_filler_size_at(buffered_addr) > 0) {
 240     return Universe::fillerArrayKlass();
 241   } else {
 242     // This is one of the root segments
 243     return Universe::objectArrayKlass();
 244   }
 245 }
 246 
 247 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
 248   oop p = buffered_addr_to_source_obj(buffered_addr);
 249   if (p != nullptr) {
 250     return p->size();

 251   }
 252 
 253   size_t nbytes = get_filler_size_at(buffered_addr);
 254   if (nbytes > 0) {
 255     assert((nbytes % BytesPerWord) == 0, "should be aligned");
 256     return nbytes / BytesPerWord;
 257   }
 258 
 259   address hrs = buffer_bottom();
 260   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
 261     nbytes = _heap_root_segments.size_in_bytes(seg_idx);
 262     if (hrs == buffered_addr) {
 263       assert((nbytes % BytesPerWord) == 0, "should be aligned");
 264       return nbytes / BytesPerWord;
 265     }
 266     hrs += nbytes;
 267   }
 268 
 269   ShouldNotReachHere();
 270   return 0;

 515 }
 516 
 517 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
 518   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
 519   if (p != nullptr) {
 520     assert(*p > 0, "filler must be larger than zero bytes");
 521     return *p;
 522   } else {
 523     return 0; // buffered_addr is not a filler
 524   }
 525 }
 526 
 527 template <typename T>
 528 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
 529   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
 530   *field_addr = value;
 531 }
 532 
 533 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
 534   assert(!is_too_large_to_archive(src_obj), "already checked");
 535   size_t byte_size = src_obj->size() * HeapWordSize;


 536   assert(byte_size > 0, "no zero-size objects");
 537 
 538   // For region-based collectors such as G1, the archive heap may be mapped into
 539   // multiple regions. We need to make sure that we don't have an object that can possible
 540   // span across two regions.
 541   maybe_fill_gc_region_gap(byte_size);
 542 
 543   size_t new_used = _buffer_used + byte_size;
 544   assert(new_used > _buffer_used, "no wrap around");
 545 
 546   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
 547   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
 548   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
 549 
 550   ensure_buffer_space(new_used);
 551 
 552   address from = cast_from_oop<address>(src_obj);
 553   address to = offset_to_buffered_address<address>(_buffer_used);
 554   assert(is_object_aligned(_buffer_used), "sanity");
 555   assert(is_object_aligned(byte_size), "sanity");
 556   memcpy(to, from, byte_size);
 557 
 558   // These native pointers will be restored explicitly at run time.
 559   if (java_lang_Module::is_instance(src_obj)) {
 560     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
 561   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
 562 #ifdef ASSERT
 563     // We only archive these loaders
 564     if (src_obj != SystemDictionary::java_platform_loader() &&
 565         src_obj != SystemDictionary::java_system_loader()) {
 566       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
 567     }
 568 #endif
 569     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
 570   }
 571 
 572   size_t buffered_obj_offset = _buffer_used;
 573   _buffer_used = new_used;
 574 
 575   return buffered_obj_offset;
 576 }

 712   assert(request_p >= (T*)_requested_bottom, "sanity");
 713   assert(request_p <  (T*)_requested_top, "sanity");
 714   requested_region_bottom = _requested_bottom;
 715 
 716   // Mark the pointer in the oopmap
 717   T* region_bottom = (T*)requested_region_bottom;
 718   assert(request_p >= region_bottom, "must be");
 719   BitMap::idx_t idx = request_p - region_bottom;
 720   assert(idx < oopmap->size(), "overflow");
 721   oopmap->set_bit(idx);
 722 }
 723 
 724 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
 725   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
 726   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
 727   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
 728 
 729   oop fake_oop = cast_to_oop(buffered_addr);
 730   if (UseCompactObjectHeaders) {
 731     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));

 732   } else {
 733     fake_oop->set_narrow_klass(nk);
 734   }
 735 
 736   if (src_obj == nullptr) {
 737     return;
 738   }
 739   // We need to retain the identity_hash, because it may have been used by some hashtables
 740   // in the shared heap.
 741   if (!src_obj->fast_no_hash_check()) {
 742     intptr_t src_hash = src_obj->identity_hash();
 743     if (UseCompactObjectHeaders) {
 744       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));








 745     } else {
 746       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));


 747     }
 748     assert(fake_oop->mark().is_unlocked(), "sanity");
 749 
 750     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
 751     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 752   }
 753   // Strip age bits.
 754   fake_oop->set_mark(fake_oop->mark().set_age(0));
 755 }
 756 
 757 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
 758   oop _src_obj;
 759   address _buffered_obj;
 760   CHeapBitMap* _oopmap;
 761   bool _is_java_lang_ref;
 762 public:
 763   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
 764     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
 765   {
 766     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
 767   }
 768 
 769   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
 770   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
 771 

 143   _source_objs = nullptr;
 144 
 145   delete _dumped_interned_strings;
 146   _dumped_interned_strings = nullptr;
 147 }
 148 
 149 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
 150   _source_objs->append(src_obj);
 151 }
 152 
 153 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
 154                                 ArchiveMappedHeapInfo* heap_info) {
 155   assert(CDSConfig::is_dumping_heap(), "sanity");
 156   allocate_buffer();
 157   copy_source_objs_to_buffer(roots);
 158   set_requested_address_range(heap_info);
 159   relocate_embedded_oops(roots, heap_info);
 160 }
 161 
 162 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
 163   size_t size = o->size();
 164   size = o->copy_size_cds(size, o->mark());
 165   return is_too_large_to_archive(size);
 166 }
 167 
 168 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
 169   typeArrayOop value = java_lang_String::value_no_keepalive(string);
 170   return is_too_large_to_archive(value);
 171 }
 172 
 173 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
 174   assert(size > 0, "no zero-size object");
 175   assert(size * HeapWordSize > size, "no overflow");
 176   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
 177 
 178   size_t byte_size = size * HeapWordSize;
 179   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
 180     return true;
 181   } else {
 182     return false;
 183   }
 184 }
 185 

 232   } else {
 233     return nullptr;
 234   }
 235 }
 236 
 237 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
 238   oop p = buffered_addr_to_source_obj(buffered_addr);
 239   if (p != nullptr) {
 240     return p->klass();
 241   } else if (get_filler_size_at(buffered_addr) > 0) {
 242     return Universe::fillerArrayKlass();
 243   } else {
 244     // This is one of the root segments
 245     return Universe::objectArrayKlass();
 246   }
 247 }
 248 
 249 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
 250   oop p = buffered_addr_to_source_obj(buffered_addr);
 251   if (p != nullptr) {
 252     size_t size = p->size();
 253     return p->copy_size_cds(size, p->mark());
 254   }
 255 
 256   size_t nbytes = get_filler_size_at(buffered_addr);
 257   if (nbytes > 0) {
 258     assert((nbytes % BytesPerWord) == 0, "should be aligned");
 259     return nbytes / BytesPerWord;
 260   }
 261 
 262   address hrs = buffer_bottom();
 263   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
 264     nbytes = _heap_root_segments.size_in_bytes(seg_idx);
 265     if (hrs == buffered_addr) {
 266       assert((nbytes % BytesPerWord) == 0, "should be aligned");
 267       return nbytes / BytesPerWord;
 268     }
 269     hrs += nbytes;
 270   }
 271 
 272   ShouldNotReachHere();
 273   return 0;

 518 }
 519 
 520 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
 521   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
 522   if (p != nullptr) {
 523     assert(*p > 0, "filler must be larger than zero bytes");
 524     return *p;
 525   } else {
 526     return 0; // buffered_addr is not a filler
 527   }
 528 }
 529 
 530 template <typename T>
 531 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
 532   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
 533   *field_addr = value;
 534 }
 535 
 536 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
 537   assert(!is_too_large_to_archive(src_obj), "already checked");
 538   size_t old_size = src_obj->size();
 539   size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
 540   size_t byte_size = new_size * HeapWordSize;
 541   assert(byte_size > 0, "no zero-size objects");
 542 
 543   // For region-based collectors such as G1, the archive heap may be mapped into
 544   // multiple regions. We need to make sure that we don't have an object that can possible
 545   // span across two regions.
 546   maybe_fill_gc_region_gap(byte_size);
 547 
 548   size_t new_used = _buffer_used + byte_size;
 549   assert(new_used > _buffer_used, "no wrap around");
 550 
 551   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
 552   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
 553   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
 554 
 555   ensure_buffer_space(new_used);
 556 
 557   address from = cast_from_oop<address>(src_obj);
 558   address to = offset_to_buffered_address<address>(_buffer_used);
 559   assert(is_object_aligned(_buffer_used), "sanity");
 560   assert(is_object_aligned(byte_size), "sanity");
 561   memcpy(to, from, MIN2(new_size, old_size) * HeapWordSize);
 562 
 563   // These native pointers will be restored explicitly at run time.
 564   if (java_lang_Module::is_instance(src_obj)) {
 565     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
 566   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
 567 #ifdef ASSERT
 568     // We only archive these loaders
 569     if (src_obj != SystemDictionary::java_platform_loader() &&
 570         src_obj != SystemDictionary::java_system_loader()) {
 571       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
 572     }
 573 #endif
 574     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
 575   }
 576 
 577   size_t buffered_obj_offset = _buffer_used;
 578   _buffer_used = new_used;
 579 
 580   return buffered_obj_offset;
 581 }

 717   assert(request_p >= (T*)_requested_bottom, "sanity");
 718   assert(request_p <  (T*)_requested_top, "sanity");
 719   requested_region_bottom = _requested_bottom;
 720 
 721   // Mark the pointer in the oopmap
 722   T* region_bottom = (T*)requested_region_bottom;
 723   assert(request_p >= region_bottom, "must be");
 724   BitMap::idx_t idx = request_p - region_bottom;
 725   assert(idx < oopmap->size(), "overflow");
 726   oopmap->set_bit(idx);
 727 }
 728 
 729 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
 730   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
 731   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
 732   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
 733 
 734   oop fake_oop = cast_to_oop(buffered_addr);
 735   if (UseCompactObjectHeaders) {
 736     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
 737     assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
 738   } else {
 739     fake_oop->set_narrow_klass(nk);
 740   }
 741 
 742   if (src_obj == nullptr) {
 743     return;
 744   }
 745   // We need to retain the identity_hash, because it may have been used by some hashtables
 746   // in the shared heap.
 747   if (!src_obj->fast_no_hash_check()) {
 748     intptr_t src_hash = src_obj->identity_hash();
 749     if (UseCompactObjectHeaders) {
 750       markWord m = markWord::prototype().set_narrow_klass(nk);
 751       m = m.copy_hashctrl_from(src_obj->mark());
 752       fake_oop->set_mark(m);
 753       if (m.is_hashed_not_expanded()) {
 754         fake_oop->set_mark(fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m));
 755       } else if (m.is_not_hashed_expanded()) {
 756         fake_oop->set_mark(m.set_not_hashed_not_expanded());
 757       }
 758       assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
 759     } else {
 760       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
 761       DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
 762       assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 763     }
 764     assert(fake_oop->mark().is_unlocked(), "sanity");



 765   }
 766   // Strip age bits.
 767   fake_oop->set_mark(fake_oop->mark().set_age(0));
 768 }
 769 
 770 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
 771   oop _src_obj;
 772   address _buffered_obj;
 773   CHeapBitMap* _oopmap;
 774   bool _is_java_lang_ref;
 775 public:
 776   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
 777     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
 778   {
 779     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
 780   }
 781 
 782   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
 783   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
 784 
< prev index next >