185
186 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
187 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
188 int length = roots->length();
189 _heap_roots_word_size = objArrayOopDesc::object_size(length);
190 size_t byte_size = _heap_roots_word_size * HeapWordSize;
191 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
192 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
193 vm_exit(1);
194 }
195
196 maybe_fill_gc_region_gap(byte_size);
197
198 size_t new_used = _buffer_used + byte_size;
199 ensure_buffer_space(new_used);
200
201 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
202 memset(mem, 0, byte_size);
203 {
204 // This is copied from MemAllocator::finish
205 oopDesc::set_mark(mem, markWord::prototype());
206 oopDesc::release_set_klass(mem, k);
207 }
208 {
209 // This is copied from ObjArrayAllocator::initialize
210 arrayOopDesc::set_length(mem, length);
211 }
212
213 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
214 for (int i = 0; i < length; i++) {
215 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
216 oop o = roots->at(i);
217 if (UseCompressedOops) {
218 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
219 } else {
220 * arrayOop->obj_at_addr<oop>(i) = o;
221 }
222 }
223 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
224
225 _heap_roots_offset = _buffer_used;
226 _buffer_used = new_used;
252 assert(is_object_aligned(fill_bytes), "must be");
253 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
254
255 int initial_length = to_array_length(fill_bytes / elemSize);
256 for (int length = initial_length; length >= 0; length --) {
257 size_t array_byte_size = filler_array_byte_size(length);
258 if (array_byte_size == fill_bytes) {
259 return length;
260 }
261 }
262
263 ShouldNotReachHere();
264 return -1;
265 }
266
267 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
268 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
269 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
270 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
271 memset(mem, 0, fill_bytes);
272 oopDesc::set_mark(mem, markWord::prototype());
273 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
274 cast_to_oop(mem)->set_narrow_klass(nk);
275 arrayOopDesc::set_length(mem, array_length);
276 return mem;
277 }
278
279 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
280 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
281 // leftover space is smaller than a zero-sized array object). Therefore, we need to
282 // make sure there's enough space of min_filler_byte_size in the current region after
283 // required_byte_size has been allocated. If not, fill the remainder of the current
284 // region.
285 size_t min_filler_byte_size = filler_array_byte_size(0);
286 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
287
288 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
289 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
290
291 if (cur_min_region_bottom != next_min_region_bottom) {
292 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
293 // we can map the region in any region-based collector.
294 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
454 address requested_region_bottom;
455
456 assert(request_p >= (T*)_requested_bottom, "sanity");
457 assert(request_p < (T*)_requested_top, "sanity");
458 requested_region_bottom = _requested_bottom;
459
460 // Mark the pointer in the oopmap
461 T* region_bottom = (T*)requested_region_bottom;
462 assert(request_p >= region_bottom, "must be");
463 BitMap::idx_t idx = request_p - region_bottom;
464 assert(idx < oopmap->size(), "overflow");
465 oopmap->set_bit(idx);
466 }
467
468 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
469 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
470 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
471 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
472
473 oop fake_oop = cast_to_oop(buffered_addr);
474 fake_oop->set_narrow_klass(nk);
475
476 // We need to retain the identity_hash, because it may have been used by some hashtables
477 // in the shared heap. This also has the side effect of pre-initializing the
478 // identity_hash for all shared objects, so they are less likely to be written
479 // into during run time, increasing the potential of memory sharing.
480 if (src_obj != nullptr) {
481 intptr_t src_hash = src_obj->identity_hash();
482 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
483 assert(fake_oop->mark().is_unlocked(), "sanity");
484
485 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
486 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
487 }
488 }
489
490 // Relocate an element in the buffered copy of HeapShared::roots()
491 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
492 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
493 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
494 }
495
496 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
497 oop _src_obj;
498 address _buffered_obj;
499 CHeapBitMap* _oopmap;
500
501 public:
502 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|
185
186 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
187 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
188 int length = roots->length();
189 _heap_roots_word_size = objArrayOopDesc::object_size(length);
190 size_t byte_size = _heap_roots_word_size * HeapWordSize;
191 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
192 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
193 vm_exit(1);
194 }
195
196 maybe_fill_gc_region_gap(byte_size);
197
198 size_t new_used = _buffer_used + byte_size;
199 ensure_buffer_space(new_used);
200
201 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
202 memset(mem, 0, byte_size);
203 {
204 // This is copied from MemAllocator::finish
205 if (UseCompactObjectHeaders) {
206 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(k);
207 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
208 } else {
209 oopDesc::set_mark(mem, markWord::prototype());
210 oopDesc::release_set_klass(mem, k);
211 }
212 }
213 {
214 // This is copied from ObjArrayAllocator::initialize
215 arrayOopDesc::set_length(mem, length);
216 }
217
218 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
219 for (int i = 0; i < length; i++) {
220 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
221 oop o = roots->at(i);
222 if (UseCompressedOops) {
223 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
224 } else {
225 * arrayOop->obj_at_addr<oop>(i) = o;
226 }
227 }
228 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
229
230 _heap_roots_offset = _buffer_used;
231 _buffer_used = new_used;
257 assert(is_object_aligned(fill_bytes), "must be");
258 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
259
260 int initial_length = to_array_length(fill_bytes / elemSize);
261 for (int length = initial_length; length >= 0; length --) {
262 size_t array_byte_size = filler_array_byte_size(length);
263 if (array_byte_size == fill_bytes) {
264 return length;
265 }
266 }
267
268 ShouldNotReachHere();
269 return -1;
270 }
271
272 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
273 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
274 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
275 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
276 memset(mem, 0, fill_bytes);
277 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
278 if (UseCompactObjectHeaders) {
279 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
280 } else {
281 oopDesc::set_mark(mem, markWord::prototype());
282 cast_to_oop(mem)->set_narrow_klass(nk);
283 }
284 arrayOopDesc::set_length(mem, array_length);
285 return mem;
286 }
287
288 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
289 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
290 // leftover space is smaller than a zero-sized array object). Therefore, we need to
291 // make sure there's enough space of min_filler_byte_size in the current region after
292 // required_byte_size has been allocated. If not, fill the remainder of the current
293 // region.
294 size_t min_filler_byte_size = filler_array_byte_size(0);
295 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
296
297 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
298 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
299
300 if (cur_min_region_bottom != next_min_region_bottom) {
301 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
302 // we can map the region in any region-based collector.
303 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
463 address requested_region_bottom;
464
465 assert(request_p >= (T*)_requested_bottom, "sanity");
466 assert(request_p < (T*)_requested_top, "sanity");
467 requested_region_bottom = _requested_bottom;
468
469 // Mark the pointer in the oopmap
470 T* region_bottom = (T*)requested_region_bottom;
471 assert(request_p >= region_bottom, "must be");
472 BitMap::idx_t idx = request_p - region_bottom;
473 assert(idx < oopmap->size(), "overflow");
474 oopmap->set_bit(idx);
475 }
476
477 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
478 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
479 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
480 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
481
482 oop fake_oop = cast_to_oop(buffered_addr);
483 if (!UseCompactObjectHeaders) {
484 fake_oop->set_narrow_klass(nk);
485 }
486
487 // We need to retain the identity_hash, because it may have been used by some hashtables
488 // in the shared heap. This also has the side effect of pre-initializing the
489 // identity_hash for all shared objects, so they are less likely to be written
490 // into during run time, increasing the potential of memory sharing.
491 if (src_obj != nullptr) {
492 intptr_t src_hash = src_obj->identity_hash();
493 if (UseCompactObjectHeaders) {
494 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
495 } else {
496 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
497 }
498 assert(fake_oop->mark().is_unlocked(), "sanity");
499
500 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
501 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
502 }
503 }
504
505 // Relocate an element in the buffered copy of HeapShared::roots()
506 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
507 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
508 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
509 }
510
511 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
512 oop _src_obj;
513 address _buffered_obj;
514 CHeapBitMap* _oopmap;
515
516 public:
517 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|