176
177 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
178 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
179 int length = roots->length();
180 _heap_roots_word_size = objArrayOopDesc::object_size(length);
181 size_t byte_size = _heap_roots_word_size * HeapWordSize;
182 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
183 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
184 vm_exit(1);
185 }
186
187 maybe_fill_gc_region_gap(byte_size);
188
189 size_t new_used = _buffer_used + byte_size;
190 ensure_buffer_space(new_used);
191
192 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
193 memset(mem, 0, byte_size);
194 {
195 // This is copied from MemAllocator::finish
196 oopDesc::set_mark(mem, markWord::prototype());
197 oopDesc::release_set_klass(mem, k);
198 }
199 {
200 // This is copied from ObjArrayAllocator::initialize
201 arrayOopDesc::set_length(mem, length);
202 }
203
204 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
205 for (int i = 0; i < length; i++) {
206 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
207 oop o = roots->at(i);
208 if (UseCompressedOops) {
209 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
210 } else {
211 * arrayOop->obj_at_addr<oop>(i) = o;
212 }
213 }
214 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
215
216 _heap_roots_bottom_offset = _buffer_used;
217 _buffer_used = new_used;
243 assert(is_object_aligned(fill_bytes), "must be");
244 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
245
246 int initial_length = to_array_length(fill_bytes / elemSize);
247 for (int length = initial_length; length >= 0; length --) {
248 size_t array_byte_size = filler_array_byte_size(length);
249 if (array_byte_size == fill_bytes) {
250 return length;
251 }
252 }
253
254 ShouldNotReachHere();
255 return -1;
256 }
257
258 void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
259 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
260 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
261 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
262 memset(mem, 0, fill_bytes);
263 oopDesc::set_mark(mem, markWord::prototype());
264 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
265 cast_to_oop(mem)->set_narrow_klass(nk);
266 arrayOopDesc::set_length(mem, array_length);
267 }
268
269 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
270 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
271 // leftover space is smaller than a zero-sized array object). Therefore, we need to
272 // make sure there's enough space of min_filler_byte_size in the current region after
273 // required_byte_size has been allocated. If not, fill the remainder of the current
274 // region.
275 size_t min_filler_byte_size = filler_array_byte_size(0);
276 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
277
278 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
279 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
280
281 if (cur_min_region_bottom != next_min_region_bottom) {
282 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
283 // we can map the region in any region-based collector.
284 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
285 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
405 address requested_region_bottom;
406
407 assert(request_p >= (T*)_requested_bottom, "sanity");
408 assert(request_p < (T*)_requested_top, "sanity");
409 requested_region_bottom = _requested_bottom;
410
411 // Mark the pointer in the oopmap
412 T* region_bottom = (T*)requested_region_bottom;
413 assert(request_p >= region_bottom, "must be");
414 BitMap::idx_t idx = request_p - region_bottom;
415 assert(idx < oopmap->size(), "overflow");
416 oopmap->set_bit(idx);
417 }
418
419 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
420 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
421 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
422 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
423
424 oop fake_oop = cast_to_oop(buffered_addr);
425 fake_oop->set_narrow_klass(nk);
426
427 // We need to retain the identity_hash, because it may have been used by some hashtables
428 // in the shared heap.
429 if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
430 int src_hash = src_obj->identity_hash();
431 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
432 assert(fake_oop->mark().is_unlocked(), "sanity");
433
434 DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
435 assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
436 }
437 }
438
439 // Relocate an element in the buffered copy of HeapShared::roots()
440 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
441 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
442 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
443 }
444
445 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
446 oop _src_obj;
447 address _buffered_obj;
448 CHeapBitMap* _oopmap;
449
450 public:
451 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|
176
177 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
178 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
179 int length = roots->length();
180 _heap_roots_word_size = objArrayOopDesc::object_size(length);
181 size_t byte_size = _heap_roots_word_size * HeapWordSize;
182 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
183 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
184 vm_exit(1);
185 }
186
187 maybe_fill_gc_region_gap(byte_size);
188
189 size_t new_used = _buffer_used + byte_size;
190 ensure_buffer_space(new_used);
191
192 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
193 memset(mem, 0, byte_size);
194 {
195 // This is copied from MemAllocator::finish
196 if (UseCompactObjectHeaders) {
197 oopDesc::release_set_mark(mem, k->prototype_header());
198 } else {
199 oopDesc::set_mark(mem, markWord::prototype());
200 oopDesc::release_set_klass(mem, k);
201 }
202 }
203 {
204 // This is copied from ObjArrayAllocator::initialize
205 arrayOopDesc::set_length(mem, length);
206 }
207
208 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
209 for (int i = 0; i < length; i++) {
210 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
211 oop o = roots->at(i);
212 if (UseCompressedOops) {
213 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
214 } else {
215 * arrayOop->obj_at_addr<oop>(i) = o;
216 }
217 }
218 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
219
220 _heap_roots_bottom_offset = _buffer_used;
221 _buffer_used = new_used;
247 assert(is_object_aligned(fill_bytes), "must be");
248 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
249
250 int initial_length = to_array_length(fill_bytes / elemSize);
251 for (int length = initial_length; length >= 0; length --) {
252 size_t array_byte_size = filler_array_byte_size(length);
253 if (array_byte_size == fill_bytes) {
254 return length;
255 }
256 }
257
258 ShouldNotReachHere();
259 return -1;
260 }
261
262 void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
263 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
264 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
265 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
266 memset(mem, 0, fill_bytes);
267 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
268 if (UseCompactObjectHeaders) {
269 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
270 } else {
271 oopDesc::set_mark(mem, markWord::prototype());
272 cast_to_oop(mem)->set_narrow_klass(nk);
273 }
274 arrayOopDesc::set_length(mem, array_length);
275 }
276
277 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
278 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
279 // leftover space is smaller than a zero-sized array object). Therefore, we need to
280 // make sure there's enough space of min_filler_byte_size in the current region after
281 // required_byte_size has been allocated. If not, fill the remainder of the current
282 // region.
283 size_t min_filler_byte_size = filler_array_byte_size(0);
284 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
285
286 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
287 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
288
289 if (cur_min_region_bottom != next_min_region_bottom) {
290 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
291 // we can map the region in any region-based collector.
292 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
293 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
413 address requested_region_bottom;
414
415 assert(request_p >= (T*)_requested_bottom, "sanity");
416 assert(request_p < (T*)_requested_top, "sanity");
417 requested_region_bottom = _requested_bottom;
418
419 // Mark the pointer in the oopmap
420 T* region_bottom = (T*)requested_region_bottom;
421 assert(request_p >= region_bottom, "must be");
422 BitMap::idx_t idx = request_p - region_bottom;
423 assert(idx < oopmap->size(), "overflow");
424 oopmap->set_bit(idx);
425 }
426
427 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
428 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
429 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
430 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
431
432 oop fake_oop = cast_to_oop(buffered_addr);
433 if (UseCompactObjectHeaders) {
434 fake_oop->set_mark(fake_oop->mark().set_narrow_klass(nk));
435 } else {
436 fake_oop->set_narrow_klass(nk);
437 }
438
439 // We need to retain the identity_hash, because it may have been used by some hashtables
440 // in the shared heap.
441 if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
442 int src_hash = src_obj->identity_hash();
443 if (UseCompactObjectHeaders) {
444 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
445 } else {
446 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
447 }
448 assert(fake_oop->mark().is_unlocked(), "sanity");
449
450 DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
451 assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
452 }
453 }
454
455 // Relocate an element in the buffered copy of HeapShared::roots()
456 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
457 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
458 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
459 }
460
461 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
462 oop _src_obj;
463 address _buffered_obj;
464 CHeapBitMap* _oopmap;
465
466 public:
467 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|