203
204 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
205 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
206 int length = roots != nullptr ? roots->length() : 0;
207 _heap_roots_word_size = objArrayOopDesc::object_size(length);
208 size_t byte_size = _heap_roots_word_size * HeapWordSize;
209 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
210 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
211 vm_exit(1);
212 }
213
214 maybe_fill_gc_region_gap(byte_size);
215
216 size_t new_top = _buffer_top + byte_size;
217 ensure_buffer_space(new_top);
218
219 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
220 memset(mem, 0, byte_size);
221 {
222 // This is copied from MemAllocator::finish
223 oopDesc::set_mark(mem, markWord::prototype());
224 oopDesc::release_set_klass(mem, k);
225 }
226 {
227 // This is copied from ObjArrayAllocator::initialize
228 arrayOopDesc::set_length(mem, length);
229 }
230
231 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
232 for (int i = 0; i < length; i++) {
233 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
234 oop o = roots->at(i);
235 if (UseCompressedOops) {
236 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
237 } else {
238 * arrayOop->obj_at_addr<oop>(i) = o;
239 }
240 }
241 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
242
243 _heap_roots_bottom = _buffer_top;
244 _buffer_top = new_top;
283 assert(is_object_aligned(fill_bytes), "must be");
284 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
285
286 int initial_length = to_array_length(fill_bytes / elemSize);
287 for (int length = initial_length; length >= 0; length --) {
288 size_t array_byte_size = filler_array_byte_size(length);
289 if (array_byte_size == fill_bytes) {
290 return length;
291 }
292 }
293
294 ShouldNotReachHere();
295 return -1;
296 }
297
298 void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
299 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
300 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
301 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
302 memset(mem, 0, fill_bytes);
303 oopDesc::set_mark(mem, markWord::prototype());
304 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
305 cast_to_oop(mem)->set_narrow_klass(nk);
306 arrayOopDesc::set_length(mem, array_length);
307 }
308
309 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
310 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
311 // leftover space is smaller than a zero-sized array object). Therefore, we need to
312 // make sure there's enough space of min_filler_byte_size in the current region after
313 // required_byte_size has been allocated. If not, fill the remainder of the current
314 // region.
315 size_t min_filler_byte_size = filler_array_byte_size(0);
316 size_t new_top = _buffer_top + required_byte_size + min_filler_byte_size;
317
318 const size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
319 const size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
320
321 if (cur_min_region_bottom != next_min_region_bottom) {
322 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
323 // we can map the region in any region-based collector.
324 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
325 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
475 assert(request_p >= region_bottom, "must be");
476 BitMap::idx_t idx = request_p - region_bottom;
477 assert(idx < oopmap->size(), "overflow");
478 oopmap->set_bit(idx);
479 }
480
481 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
482 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
483 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
484 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
485
486 oop fake_oop = cast_to_oop(buffered_addr);
487 fake_oop->set_narrow_klass(nk);
488
489 // We need to retain the identity_hash, because it may have been used by some hashtables
490 // in the shared heap. This also has the side effect of pre-initializing the
491 // identity_hash for all shared objects, so they are less likely to be written
492 // into during run time, increasing the potential of memory sharing.
493 if (src_obj != nullptr) {
494 int src_hash = src_obj->identity_hash();
495 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
496 assert(fake_oop->mark().is_unlocked(), "sanity");
497
498 DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
499 assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
500 }
501 }
502
503 // Relocate an element in the buffered copy of HeapShared::roots()
504 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index) {
505 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
506 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset));
507 }
508
509 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
510 oop _src_obj;
511 address _buffered_obj;
512
513 public:
514 EmbeddedOopRelocator(oop src_obj, address buffered_obj) :
515 _src_obj(src_obj), _buffered_obj(buffered_obj) {}
|
203
204 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
205 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
206 int length = roots != nullptr ? roots->length() : 0;
207 _heap_roots_word_size = objArrayOopDesc::object_size(length);
208 size_t byte_size = _heap_roots_word_size * HeapWordSize;
209 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
210 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
211 vm_exit(1);
212 }
213
214 maybe_fill_gc_region_gap(byte_size);
215
216 size_t new_top = _buffer_top + byte_size;
217 ensure_buffer_space(new_top);
218
219 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
220 memset(mem, 0, byte_size);
221 {
222 // This is copied from MemAllocator::finish
223 if (UseCompactObjectHeaders) {
224 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(k);
225 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
226 } else {
227 oopDesc::set_mark(mem, markWord::prototype());
228 oopDesc::release_set_klass(mem, k);
229 }
230 }
231 {
232 // This is copied from ObjArrayAllocator::initialize
233 arrayOopDesc::set_length(mem, length);
234 }
235
236 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
237 for (int i = 0; i < length; i++) {
238 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
239 oop o = roots->at(i);
240 if (UseCompressedOops) {
241 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
242 } else {
243 * arrayOop->obj_at_addr<oop>(i) = o;
244 }
245 }
246 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
247
248 _heap_roots_bottom = _buffer_top;
249 _buffer_top = new_top;
288 assert(is_object_aligned(fill_bytes), "must be");
289 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
290
291 int initial_length = to_array_length(fill_bytes / elemSize);
292 for (int length = initial_length; length >= 0; length --) {
293 size_t array_byte_size = filler_array_byte_size(length);
294 if (array_byte_size == fill_bytes) {
295 return length;
296 }
297 }
298
299 ShouldNotReachHere();
300 return -1;
301 }
302
303 void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
304 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
305 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
306 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
307 memset(mem, 0, fill_bytes);
308 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
309 if (UseCompactObjectHeaders) {
310 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
311 } else {
312 oopDesc::set_mark(mem, markWord::prototype());
313 cast_to_oop(mem)->set_narrow_klass(nk);
314 }
315 arrayOopDesc::set_length(mem, array_length);
316 }
317
318 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
319 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
320 // leftover space is smaller than a zero-sized array object). Therefore, we need to
321 // make sure there's enough space of min_filler_byte_size in the current region after
322 // required_byte_size has been allocated. If not, fill the remainder of the current
323 // region.
324 size_t min_filler_byte_size = filler_array_byte_size(0);
325 size_t new_top = _buffer_top + required_byte_size + min_filler_byte_size;
326
327 const size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
328 const size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
329
330 if (cur_min_region_bottom != next_min_region_bottom) {
331 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
332 // we can map the region in any region-based collector.
333 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
334 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
484 assert(request_p >= region_bottom, "must be");
485 BitMap::idx_t idx = request_p - region_bottom;
486 assert(idx < oopmap->size(), "overflow");
487 oopmap->set_bit(idx);
488 }
489
490 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
491 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
492 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
493 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
494
495 oop fake_oop = cast_to_oop(buffered_addr);
496 fake_oop->set_narrow_klass(nk);
497
498 // We need to retain the identity_hash, because it may have been used by some hashtables
499 // in the shared heap. This also has the side effect of pre-initializing the
500 // identity_hash for all shared objects, so they are less likely to be written
501 // into during run time, increasing the potential of memory sharing.
502 if (src_obj != nullptr) {
503 int src_hash = src_obj->identity_hash();
504 if (UseCompactObjectHeaders) {
505 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
506 } else {
507 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
508 }
509 assert(fake_oop->mark().is_unlocked(), "sanity");
510
511 DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
512 assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
513 }
514 }
515
516 // Relocate an element in the buffered copy of HeapShared::roots()
517 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index) {
518 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
519 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset));
520 }
521
522 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
523 oop _src_obj;
524 address _buffered_obj;
525
526 public:
527 EmbeddedOopRelocator(oop src_obj, address buffered_obj) :
528 _src_obj(src_obj), _buffered_obj(buffered_obj) {}
|