< prev index next >

src/hotspot/share/cds/archiveHeapWriter.cpp

Print this page

169 address ArchiveHeapWriter::requested_address() {
170   assert(_buffer != nullptr, "must be initialized");
171   return _requested_bottom;
172 }
173 
174 void ArchiveHeapWriter::allocate_buffer() {
175   int initial_buffer_size = 100000;
176   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
177   _buffer_used = 0;
178   ensure_buffer_space(1); // so that buffer_bottom() works
179 }
180 
181 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
182   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
183   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
184   _buffer->at_grow(to_array_index(min_bytes));
185 }
186 
187 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) {
188   HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
189   memset(mem, 0, objArrayOopDesc::object_size(element_count));
190 
191   // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
192   if (UseCompactObjectHeaders) {
193     oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
194   } else {

195     oopDesc::set_mark(mem, markWord::prototype());
196     oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
197   }
198   arrayOopDesc::set_length(mem, element_count);
199   return objArrayOop(cast_to_oop(mem));
200 }
201 
202 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
203   // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
204   if (UseCompressedOops) {
205     *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
206   } else {
207     *segment->obj_at_addr<oop>(index) = root;
208   }
209 }
210 
211 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
212   // Depending on the number of classes we are archiving, a single roots array may be
213   // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
214   // allows us to chop the large array into a series of "segments". Current layout
215   // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
216   // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
217   // This is simple and efficient. We do not need filler objects anywhere between the segments,
218   // or immediately after the last segment. This allows starting the object dump immediately
219   // after the roots.
220 
221   assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
222          "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
223 
224   int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
225   assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
226          "Should match exactly");
227 
228   HeapRootSegments segments(_buffer_used,
229                             roots->length(),
230                             MIN_GC_REGION_ALIGNMENT,
231                             max_elem_count);
232 
233   int root_index = 0;
234   for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
235     int size_elems = segments.size_in_elems(seg_idx);
236     size_t size_bytes = segments.size_in_bytes(seg_idx);
237 
238     size_t oop_offset = _buffer_used;
239     _buffer_used = oop_offset + size_bytes;
240     ensure_buffer_space(_buffer_used);
241 
242     assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
243            "Roots segment %zu start is not aligned: %zu",
244            segments.count(), oop_offset);
245 

319     int src_obj_index = _source_objs_order->at(i)._index;
320     oop src_obj = _source_objs->at(src_obj_index);
321     HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
322     assert(info != nullptr, "must be");
323     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
324     info->set_buffer_offset(buffer_offset);
325 
326     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
327     _buffer_offset_to_source_obj_table->maybe_grow();
328 
329     if (java_lang_Module::is_instance(src_obj)) {
330       Modules::check_archived_module_oop(src_obj);
331     }
332   }
333 
334   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
335                 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
336 }
337 
338 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
339   size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
340   return byte_size;
341 }
342 
343 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
344   assert(is_object_aligned(fill_bytes), "must be");
345   size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
346 
347   int initial_length = to_array_length(fill_bytes / elemSize);
348   for (int length = initial_length; length >= 0; length --) {
349     size_t array_byte_size = filler_array_byte_size(length);
350     if (array_byte_size == fill_bytes) {
351       return length;
352     }
353   }
354 
355   ShouldNotReachHere();
356   return -1;
357 }
358 
359 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {

576   oopmap->set_bit(idx);
577 }
578 
579 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
580   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
581   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
582   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
583 
584   oop fake_oop = cast_to_oop(buffered_addr);
585   if (UseCompactObjectHeaders) {
586     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
587   } else {
588     fake_oop->set_narrow_klass(nk);
589   }
590 
591   if (src_obj == nullptr) {
592     return;
593   }
594   // We need to retain the identity_hash, because it may have been used by some hashtables
595   // in the shared heap.
596   if (!src_obj->fast_no_hash_check()) {
597     intptr_t src_hash = src_obj->identity_hash();
598     if (UseCompactObjectHeaders) {
599       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));


600     } else {
601       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
602     }
603     assert(fake_oop->mark().is_unlocked(), "sanity");
604 
605     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
606     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
607   }
608   // Strip age bits.
609   fake_oop->set_mark(fake_oop->mark().set_age(0));
610 }
611 
612 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
613   oop _src_obj;
614   address _buffered_obj;
615   CHeapBitMap* _oopmap;
616   bool _is_java_lang_ref;
617 public:
618   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
619     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)

169 address ArchiveHeapWriter::requested_address() {
170   assert(_buffer != nullptr, "must be initialized");
171   return _requested_bottom;
172 }
173 
174 void ArchiveHeapWriter::allocate_buffer() {
175   int initial_buffer_size = 100000;
176   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
177   _buffer_used = 0;
178   ensure_buffer_space(1); // so that buffer_bottom() works
179 }
180 
181 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
182   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
183   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
184   _buffer->at_grow(to_array_index(min_bytes));
185 }
186 
187 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) {
188   HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
189   memset(mem, 0, refArrayOopDesc::object_size(element_count));
190 
191   // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
192   if (UseCompactObjectHeaders) {
193     oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
194   } else {
195     assert(!EnableValhalla || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
196     oopDesc::set_mark(mem, markWord::prototype());
197     oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
198   }
199   arrayOopDesc::set_length(mem, element_count);
200   return objArrayOop(cast_to_oop(mem));
201 }
202 
203 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
204   // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
205   if (UseCompressedOops) {
206     *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
207   } else {
208     *segment->obj_at_addr<oop>(index) = root;
209   }
210 }
211 
212 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
213   // Depending on the number of classes we are archiving, a single roots array may be
214   // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
215   // allows us to chop the large array into a series of "segments". Current layout
216   // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
217   // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
218   // This is simple and efficient. We do not need filler objects anywhere between the segments,
219   // or immediately after the last segment. This allows starting the object dump immediately
220   // after the roots.
221 
222   assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
223          "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
224 
225   int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
226   assert(refArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
227          "Should match exactly");
228 
229   HeapRootSegments segments(_buffer_used,
230                             roots->length(),
231                             MIN_GC_REGION_ALIGNMENT,
232                             max_elem_count);
233 
234   int root_index = 0;
235   for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
236     int size_elems = segments.size_in_elems(seg_idx);
237     size_t size_bytes = segments.size_in_bytes(seg_idx);
238 
239     size_t oop_offset = _buffer_used;
240     _buffer_used = oop_offset + size_bytes;
241     ensure_buffer_space(_buffer_used);
242 
243     assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
244            "Roots segment %zu start is not aligned: %zu",
245            segments.count(), oop_offset);
246 

320     int src_obj_index = _source_objs_order->at(i)._index;
321     oop src_obj = _source_objs->at(src_obj_index);
322     HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
323     assert(info != nullptr, "must be");
324     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
325     info->set_buffer_offset(buffer_offset);
326 
327     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
328     _buffer_offset_to_source_obj_table->maybe_grow();
329 
330     if (java_lang_Module::is_instance(src_obj)) {
331       Modules::check_archived_module_oop(src_obj);
332     }
333   }
334 
335   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
336                 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
337 }
338 
339 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
340   size_t byte_size = refArrayOopDesc::object_size(length) * HeapWordSize;
341   return byte_size;
342 }
343 
344 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
345   assert(is_object_aligned(fill_bytes), "must be");
346   size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
347 
348   int initial_length = to_array_length(fill_bytes / elemSize);
349   for (int length = initial_length; length >= 0; length --) {
350     size_t array_byte_size = filler_array_byte_size(length);
351     if (array_byte_size == fill_bytes) {
352       return length;
353     }
354   }
355 
356   ShouldNotReachHere();
357   return -1;
358 }
359 
360 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {

577   oopmap->set_bit(idx);
578 }
579 
580 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
581   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
582   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
583   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
584 
585   oop fake_oop = cast_to_oop(buffered_addr);
586   if (UseCompactObjectHeaders) {
587     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
588   } else {
589     fake_oop->set_narrow_klass(nk);
590   }
591 
592   if (src_obj == nullptr) {
593     return;
594   }
595   // We need to retain the identity_hash, because it may have been used by some hashtables
596   // in the shared heap.
597   if (!src_obj->fast_no_hash_check() && (!(EnableValhalla && src_obj->mark().is_inline_type()))) {
598     intptr_t src_hash = src_obj->identity_hash();
599     if (UseCompactObjectHeaders) {
600       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
601     } else if (EnableValhalla) {
602       fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash));
603     } else {
604       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
605     }
606     assert(fake_oop->mark().is_unlocked(), "sanity");
607 
608     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
609     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
610   }
611   // Strip age bits.
612   fake_oop->set_mark(fake_oop->mark().set_age(0));
613 }
614 
615 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
616   oop _src_obj;
617   address _buffered_obj;
618   CHeapBitMap* _oopmap;
619   bool _is_java_lang_ref;
620 public:
621   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
622     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
< prev index next >