< prev index next >

src/hotspot/share/cds/aotMappedHeapWriter.cpp

Print this page

142 
143 void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
144   delete _source_objs;
145   _source_objs = nullptr;
146 }
147 
148 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
149   _source_objs->append(src_obj);
150 }
151 
152 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
153                                 AOTMappedHeapInfo* heap_info) {
154   assert(CDSConfig::is_dumping_heap(), "sanity");
155   allocate_buffer();
156   copy_source_objs_to_buffer(roots);
157   set_requested_address_range(heap_info);
158   relocate_embedded_oops(roots, heap_info);
159 }
160 
161 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
162   return is_too_large_to_archive(o->size());


163 }
164 
165 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
166   typeArrayOop value = java_lang_String::value_no_keepalive(string);
167   return is_too_large_to_archive(value);
168 }
169 
170 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
171   assert(size > 0, "no zero-size object");
172   assert(size * HeapWordSize > size, "no overflow");
173   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
174 
175   size_t byte_size = size * HeapWordSize;
176   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
177     return true;
178   } else {
179     return false;
180   }
181 }
182 

210   } else {
211     return nullptr;
212   }
213 }
214 
215 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
216   oop p = buffered_addr_to_source_obj(buffered_addr);
217   if (p != nullptr) {
218     return p->klass();
219   } else if (get_filler_size_at(buffered_addr) > 0) {
220     return Universe::fillerArrayKlass();
221   } else {
222     // This is one of the root segments
223     return Universe::objectArrayKlass();
224   }
225 }
226 
227 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
228   oop p = buffered_addr_to_source_obj(buffered_addr);
229   if (p != nullptr) {












230     return p->size();
231   }
232 
233   size_t nbytes = get_filler_size_at(buffered_addr);
234   if (nbytes > 0) {
235     assert((nbytes % BytesPerWord) == 0, "should be aligned");
236     return nbytes / BytesPerWord;
237   }
238 
239   address hrs = buffer_bottom();
240   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
241     nbytes = _heap_root_segments.size_in_bytes(seg_idx);
242     if (hrs == buffered_addr) {
243       assert((nbytes % BytesPerWord) == 0, "should be aligned");
244       return nbytes / BytesPerWord;
245     }
246     hrs += nbytes;
247   }
248 
249   ShouldNotReachHere();

517 void AOTMappedHeapWriter::update_stats(oop src_obj) {
518   if (java_lang_String::is_instance(src_obj)) {
519     _num_strings ++;
520     _string_bytes += src_obj->size() * HeapWordSize;
521     _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
522   } else {
523     Klass* k = src_obj->klass();
524     Symbol* name = k->name();
525     if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
526       _num_packages ++;
527     } else if (name->equals("java/security/ProtectionDomain")) {
528       _num_protection_domains ++;
529     }
530   }
531 }
532 
533 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
534   update_stats(src_obj);
535 
536   assert(!is_too_large_to_archive(src_obj), "already checked");
537   size_t byte_size = src_obj->size() * HeapWordSize;


538   assert(byte_size > 0, "no zero-size objects");
539 
540   // For region-based collectors such as G1, the archive heap may be mapped into
541   // multiple regions. We need to make sure that we don't have an object that can possible
542   // span across two regions.
543   maybe_fill_gc_region_gap(byte_size);
544 
545   size_t new_used = _buffer_used + byte_size;
546   assert(new_used > _buffer_used, "no wrap around");
547 
548   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
549   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
550   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
551 
552   ensure_buffer_space(new_used);
553 
554   address from = cast_from_oop<address>(src_obj);
555   address to = offset_to_buffered_address<address>(_buffer_used);
556   assert(is_object_aligned(_buffer_used), "sanity");
557   assert(is_object_aligned(byte_size), "sanity");
558   memcpy(to, from, byte_size);
559 
560   // These native pointers will be restored explicitly at run time.
561   if (java_lang_Module::is_instance(src_obj)) {
562     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
563   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
564 #ifdef ASSERT
565     // We only archive these loaders
566     if (src_obj != SystemDictionary::java_platform_loader() &&
567         src_obj != SystemDictionary::java_system_loader()) {
568       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
569     }
570 #endif
571     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
572   }
573 
574   size_t buffered_obj_offset = _buffer_used;
575   _buffer_used = new_used;
576 
577   return buffered_obj_offset;
578 }

714   assert(request_p >= (T*)_requested_bottom, "sanity");
715   assert(request_p <  (T*)_requested_top, "sanity");
716   requested_region_bottom = _requested_bottom;
717 
718   // Mark the pointer in the oopmap
719   T* region_bottom = (T*)requested_region_bottom;
720   assert(request_p >= region_bottom, "must be");
721   BitMap::idx_t idx = request_p - region_bottom;
722   assert(idx < oopmap->size(), "overflow");
723   oopmap->set_bit(idx);
724 }
725 
726 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
727   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
728   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
729   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
730 
731   oop fake_oop = cast_to_oop(buffered_addr);
732   if (UseCompactObjectHeaders) {
733     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));

734   } else {
735     fake_oop->set_narrow_klass(nk);
736   }
737 
738   if (src_obj == nullptr) {
739     return;
740   }
741   // We need to retain the identity_hash, because it may have been used by some hashtables
742   // in the shared heap.
743   if (!src_obj->fast_no_hash_check()) {
744     intptr_t src_hash = src_obj->identity_hash();
745     if (UseCompactObjectHeaders) {
746       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));








747     } else {
748       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));


749     }
750     assert(fake_oop->mark().is_unlocked(), "sanity");
751 
752     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
753     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
754   }
755   // Strip age bits.
756   fake_oop->set_mark(fake_oop->mark().set_age(0));
757 }
758 
759 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
760   oop _src_obj;
761   address _buffered_obj;
762   CHeapBitMap* _oopmap;
763   bool _is_java_lang_ref;
764 public:
765   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
766     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
767   {
768     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
769   }
770 
771   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
772   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
773 

142 
143 void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
144   delete _source_objs;
145   _source_objs = nullptr;
146 }
147 
148 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
149   _source_objs->append(src_obj);
150 }
151 
152 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
153                                 AOTMappedHeapInfo* heap_info) {
154   assert(CDSConfig::is_dumping_heap(), "sanity");
155   allocate_buffer();
156   copy_source_objs_to_buffer(roots);
157   set_requested_address_range(heap_info);
158   relocate_embedded_oops(roots, heap_info);
159 }
160 
161 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
162   size_t size = o->size();
163   size = o->copy_size_cds(size, o->mark());
164   return is_too_large_to_archive(size);
165 }
166 
167 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
168   typeArrayOop value = java_lang_String::value_no_keepalive(string);
169   return is_too_large_to_archive(value);
170 }
171 
172 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
173   assert(size > 0, "no zero-size object");
174   assert(size * HeapWordSize > size, "no overflow");
175   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
176 
177   size_t byte_size = size * HeapWordSize;
178   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
179     return true;
180   } else {
181     return false;
182   }
183 }
184 

212   } else {
213     return nullptr;
214   }
215 }
216 
217 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
218   oop p = buffered_addr_to_source_obj(buffered_addr);
219   if (p != nullptr) {
220     return p->klass();
221   } else if (get_filler_size_at(buffered_addr) > 0) {
222     return Universe::fillerArrayKlass();
223   } else {
224     // This is one of the root segments
225     return Universe::objectArrayKlass();
226   }
227 }
228 
229 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
230   oop p = buffered_addr_to_source_obj(buffered_addr);
231   if (p != nullptr) {
232     if (UseCompactObjectHeaders) {
233       // Use the buffered object's mark word to determine size, not the source
234       // object's.  The source object's mark word may have changed after the
235       // buffer was written (e.g., it may have been hashed by
236       // make_archived_object_cache_gc_safe), which would cause copy_size_cds
237       // to return a different size than what was actually allocated.
238       // The buffered copy's mark word was set by update_header_for_requested_obj
239       // and correctly reflects the allocated size via its expanded/hash state.
240       oop buffered_oop = cast_to_oop(buffered_addr);
241       markWord buffered_mark = buffered_oop->mark();
242       return buffered_oop->size_given_mark_and_klass(buffered_mark, p->klass());
243     }
244     return p->size();
245   }
246 
247   size_t nbytes = get_filler_size_at(buffered_addr);
248   if (nbytes > 0) {
249     assert((nbytes % BytesPerWord) == 0, "should be aligned");
250     return nbytes / BytesPerWord;
251   }
252 
253   address hrs = buffer_bottom();
254   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
255     nbytes = _heap_root_segments.size_in_bytes(seg_idx);
256     if (hrs == buffered_addr) {
257       assert((nbytes % BytesPerWord) == 0, "should be aligned");
258       return nbytes / BytesPerWord;
259     }
260     hrs += nbytes;
261   }
262 
263   ShouldNotReachHere();

531 void AOTMappedHeapWriter::update_stats(oop src_obj) {
532   if (java_lang_String::is_instance(src_obj)) {
533     _num_strings ++;
534     _string_bytes += src_obj->size() * HeapWordSize;
535     _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
536   } else {
537     Klass* k = src_obj->klass();
538     Symbol* name = k->name();
539     if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
540       _num_packages ++;
541     } else if (name->equals("java/security/ProtectionDomain")) {
542       _num_protection_domains ++;
543     }
544   }
545 }
546 
547 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
548   update_stats(src_obj);
549 
550   assert(!is_too_large_to_archive(src_obj), "already checked");
551   size_t old_size = src_obj->size();
552   size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
553   size_t byte_size = new_size * HeapWordSize;
554   assert(byte_size > 0, "no zero-size objects");
555 
556   // For region-based collectors such as G1, the archive heap may be mapped into
557   // multiple regions. We need to make sure that we don't have an object that can possible
558   // span across two regions.
559   maybe_fill_gc_region_gap(byte_size);
560 
561   size_t new_used = _buffer_used + byte_size;
562   assert(new_used > _buffer_used, "no wrap around");
563 
564   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
565   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
566   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
567 
568   ensure_buffer_space(new_used);
569 
570   address from = cast_from_oop<address>(src_obj);
571   address to = offset_to_buffered_address<address>(_buffer_used);
572   assert(is_object_aligned(_buffer_used), "sanity");
573   assert(is_object_aligned(byte_size), "sanity");
574   memcpy(to, from, MIN2(new_size, old_size) * HeapWordSize);
575 
576   // These native pointers will be restored explicitly at run time.
577   if (java_lang_Module::is_instance(src_obj)) {
578     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
579   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
580 #ifdef ASSERT
581     // We only archive these loaders
582     if (src_obj != SystemDictionary::java_platform_loader() &&
583         src_obj != SystemDictionary::java_system_loader()) {
584       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
585     }
586 #endif
587     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
588   }
589 
590   size_t buffered_obj_offset = _buffer_used;
591   _buffer_used = new_used;
592 
593   return buffered_obj_offset;
594 }

730   assert(request_p >= (T*)_requested_bottom, "sanity");
731   assert(request_p <  (T*)_requested_top, "sanity");
732   requested_region_bottom = _requested_bottom;
733 
734   // Mark the pointer in the oopmap
735   T* region_bottom = (T*)requested_region_bottom;
736   assert(request_p >= region_bottom, "must be");
737   BitMap::idx_t idx = request_p - region_bottom;
738   assert(idx < oopmap->size(), "overflow");
739   oopmap->set_bit(idx);
740 }
741 
742 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
743   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
744   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
745   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
746 
747   oop fake_oop = cast_to_oop(buffered_addr);
748   if (UseCompactObjectHeaders) {
749     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
750     assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
751   } else {
752     fake_oop->set_narrow_klass(nk);
753   }
754 
755   if (src_obj == nullptr) {
756     return;
757   }
758   // We need to retain the identity_hash, because it may have been used by some hashtables
759   // in the shared heap.
760   if (!src_obj->fast_no_hash_check()) {
761     intptr_t src_hash = src_obj->identity_hash();
762     if (UseCompactObjectHeaders) {
763       markWord m = markWord::prototype().set_narrow_klass(nk);
764       m = m.copy_hashctrl_from(src_obj->mark());
765       fake_oop->set_mark(m);
766       if (m.is_hashed_not_expanded()) {
767         fake_oop->set_mark(fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m));
768       } else if (m.is_not_hashed_expanded()) {
769         fake_oop->set_mark(m.set_not_hashed_not_expanded());
770       }
771       assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
772     } else {
773       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
774       DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
775       assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
776     }
777     assert(fake_oop->mark().is_unlocked(), "sanity");



778   }
779   // Strip age bits.
780   fake_oop->set_mark(fake_oop->mark().set_age(0));
781 }
782 
783 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
784   oop _src_obj;
785   address _buffered_obj;
786   CHeapBitMap* _oopmap;
787   bool _is_java_lang_ref;
788 public:
789   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
790     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
791   {
792     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
793   }
794 
795   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
796   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
797 
< prev index next >