188
189 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
190 Klass* k = Universe::objectArrayKlass(); // already relocated to point to archived klass
191 int length = roots->length();
192 _heap_roots_word_size = objArrayOopDesc::object_size(length);
193 size_t byte_size = _heap_roots_word_size * HeapWordSize;
194 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
195 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
196 vm_exit(1);
197 }
198
199 maybe_fill_gc_region_gap(byte_size);
200
201 size_t new_used = _buffer_used + byte_size;
202 ensure_buffer_space(new_used);
203
204 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
205 memset(mem, 0, byte_size);
206 {
207 // This is copied from MemAllocator::finish
208 oopDesc::set_mark(mem, markWord::prototype());
209 oopDesc::release_set_klass(mem, k);
210 }
211 {
212 // This is copied from ObjArrayAllocator::initialize
213 arrayOopDesc::set_length(mem, length);
214 }
215
216 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
217 for (int i = 0; i < length; i++) {
218 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
219 oop o = roots->at(i);
220 if (UseCompressedOops) {
221 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
222 } else {
223 * arrayOop->obj_at_addr<oop>(i) = o;
224 }
225 }
226 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
227
228 _heap_roots_offset = _buffer_used;
229 _buffer_used = new_used;
310 assert(is_object_aligned(fill_bytes), "must be");
311 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
312
313 int initial_length = to_array_length(fill_bytes / elemSize);
314 for (int length = initial_length; length >= 0; length --) {
315 size_t array_byte_size = filler_array_byte_size(length);
316 if (array_byte_size == fill_bytes) {
317 return length;
318 }
319 }
320
321 ShouldNotReachHere();
322 return -1;
323 }
324
325 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
326 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
327 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
328 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
329 memset(mem, 0, fill_bytes);
330 oopDesc::set_mark(mem, markWord::prototype());
331 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
332 cast_to_oop(mem)->set_narrow_klass(nk);
333 arrayOopDesc::set_length(mem, array_length);
334 return mem;
335 }
336
337 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
338 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
339 // leftover space is smaller than a zero-sized array object). Therefore, we need to
340 // make sure there's enough space of min_filler_byte_size in the current region after
341 // required_byte_size has been allocated. If not, fill the remainder of the current
342 // region.
343 size_t min_filler_byte_size = filler_array_byte_size(0);
344 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
345
346 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
347 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
348
349 if (cur_min_region_bottom != next_min_region_bottom) {
350 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
351 // we can map the region in any region-based collector.
352 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
512 address requested_region_bottom;
513
514 assert(request_p >= (T*)_requested_bottom, "sanity");
515 assert(request_p < (T*)_requested_top, "sanity");
516 requested_region_bottom = _requested_bottom;
517
518 // Mark the pointer in the oopmap
519 T* region_bottom = (T*)requested_region_bottom;
520 assert(request_p >= region_bottom, "must be");
521 BitMap::idx_t idx = request_p - region_bottom;
522 assert(idx < oopmap->size(), "overflow");
523 oopmap->set_bit(idx);
524 }
525
526 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
527 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
528 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
529 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
530
531 oop fake_oop = cast_to_oop(buffered_addr);
532 fake_oop->set_narrow_klass(nk);
533
534 // We need to retain the identity_hash, because it may have been used by some hashtables
535 // in the shared heap.
536 if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
537 intptr_t src_hash = src_obj->identity_hash();
538 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
539 assert(fake_oop->mark().is_unlocked(), "sanity");
540
541 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
542 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
543 }
544 }
545
546 // Relocate an element in the buffered copy of HeapShared::roots()
547 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
548 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
549 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
550 }
551
552 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
553 oop _src_obj;
554 address _buffered_obj;
555 CHeapBitMap* _oopmap;
556
557 public:
558 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|
188
189 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
190 Klass* k = Universe::objectArrayKlass(); // already relocated to point to archived klass
191 int length = roots->length();
192 _heap_roots_word_size = objArrayOopDesc::object_size(length);
193 size_t byte_size = _heap_roots_word_size * HeapWordSize;
194 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
195 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
196 vm_exit(1);
197 }
198
199 maybe_fill_gc_region_gap(byte_size);
200
201 size_t new_used = _buffer_used + byte_size;
202 ensure_buffer_space(new_used);
203
204 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
205 memset(mem, 0, byte_size);
206 {
207 // This is copied from MemAllocator::finish
208 if (UseCompactObjectHeaders) {
209 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(k);
210 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
211 } else {
212 oopDesc::set_mark(mem, markWord::prototype());
213 oopDesc::release_set_klass(mem, k);
214 }
215 }
216 {
217 // This is copied from ObjArrayAllocator::initialize
218 arrayOopDesc::set_length(mem, length);
219 }
220
221 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
222 for (int i = 0; i < length; i++) {
223 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
224 oop o = roots->at(i);
225 if (UseCompressedOops) {
226 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
227 } else {
228 * arrayOop->obj_at_addr<oop>(i) = o;
229 }
230 }
231 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
232
233 _heap_roots_offset = _buffer_used;
234 _buffer_used = new_used;
315 assert(is_object_aligned(fill_bytes), "must be");
316 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
317
318 int initial_length = to_array_length(fill_bytes / elemSize);
319 for (int length = initial_length; length >= 0; length --) {
320 size_t array_byte_size = filler_array_byte_size(length);
321 if (array_byte_size == fill_bytes) {
322 return length;
323 }
324 }
325
326 ShouldNotReachHere();
327 return -1;
328 }
329
330 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
331 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
332 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
333 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
334 memset(mem, 0, fill_bytes);
335 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
336 if (UseCompactObjectHeaders) {
337 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
338 } else {
339 oopDesc::set_mark(mem, markWord::prototype());
340 cast_to_oop(mem)->set_narrow_klass(nk);
341 }
342 arrayOopDesc::set_length(mem, array_length);
343 return mem;
344 }
345
346 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
347 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
348 // leftover space is smaller than a zero-sized array object). Therefore, we need to
349 // make sure there's enough space of min_filler_byte_size in the current region after
350 // required_byte_size has been allocated. If not, fill the remainder of the current
351 // region.
352 size_t min_filler_byte_size = filler_array_byte_size(0);
353 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
354
355 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
356 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
357
358 if (cur_min_region_bottom != next_min_region_bottom) {
359 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
360 // we can map the region in any region-based collector.
361 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
521 address requested_region_bottom;
522
523 assert(request_p >= (T*)_requested_bottom, "sanity");
524 assert(request_p < (T*)_requested_top, "sanity");
525 requested_region_bottom = _requested_bottom;
526
527 // Mark the pointer in the oopmap
528 T* region_bottom = (T*)requested_region_bottom;
529 assert(request_p >= region_bottom, "must be");
530 BitMap::idx_t idx = request_p - region_bottom;
531 assert(idx < oopmap->size(), "overflow");
532 oopmap->set_bit(idx);
533 }
534
535 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
536 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
537 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
538 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
539
540 oop fake_oop = cast_to_oop(buffered_addr);
541 if (UseCompactObjectHeaders) {
542 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
543 } else {
544 fake_oop->set_narrow_klass(nk);
545 }
546
547 // We need to retain the identity_hash, because it may have been used by some hashtables
548 // in the shared heap.
549 if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
550 intptr_t src_hash = src_obj->identity_hash();
551 if (UseCompactObjectHeaders) {
552 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
553 } else {
554 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
555 }
556 assert(fake_oop->mark().is_unlocked(), "sanity");
557
558 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
559 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
560 }
561 }
562
563 // Relocate an element in the buffered copy of HeapShared::roots()
564 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
565 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
566 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
567 }
568
569 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
570 oop _src_obj;
571 address _buffered_obj;
572 CHeapBitMap* _oopmap;
573
574 public:
575 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|