176
177 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
178 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
179 int length = roots->length();
180 _heap_roots_word_size = objArrayOopDesc::object_size(length);
181 size_t byte_size = _heap_roots_word_size * HeapWordSize;
182 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
183 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
184 vm_exit(1);
185 }
186
187 maybe_fill_gc_region_gap(byte_size);
188
189 size_t new_used = _buffer_used + byte_size;
190 ensure_buffer_space(new_used);
191
192 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
193 memset(mem, 0, byte_size);
194 {
195 // This is copied from MemAllocator::finish
196 oopDesc::set_mark(mem, markWord::prototype());
197 oopDesc::release_set_klass(mem, k);
198 }
199 {
200 // This is copied from ObjArrayAllocator::initialize
201 arrayOopDesc::set_length(mem, length);
202 }
203
204 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
205 for (int i = 0; i < length; i++) {
206 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
207 oop o = roots->at(i);
208 if (UseCompressedOops) {
209 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
210 } else {
211 * arrayOop->obj_at_addr<oop>(i) = o;
212 }
213 }
214 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
215
216 _heap_roots_bottom_offset = _buffer_used;
217 _buffer_used = new_used;
243 assert(is_object_aligned(fill_bytes), "must be");
244 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
245
246 int initial_length = to_array_length(fill_bytes / elemSize);
247 for (int length = initial_length; length >= 0; length --) {
248 size_t array_byte_size = filler_array_byte_size(length);
249 if (array_byte_size == fill_bytes) {
250 return length;
251 }
252 }
253
254 ShouldNotReachHere();
255 return -1;
256 }
257
258 void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
259 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
260 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
261 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
262 memset(mem, 0, fill_bytes);
263 oopDesc::set_mark(mem, markWord::prototype());
264 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
265 cast_to_oop(mem)->set_narrow_klass(nk);
266 arrayOopDesc::set_length(mem, array_length);
267 }
268
269 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
270 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
271 // leftover space is smaller than a zero-sized array object). Therefore, we need to
272 // make sure there's enough space of min_filler_byte_size in the current region after
273 // required_byte_size has been allocated. If not, fill the remainder of the current
274 // region.
275 size_t min_filler_byte_size = filler_array_byte_size(0);
276 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
277
278 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
279 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
280
281 if (cur_min_region_bottom != next_min_region_bottom) {
282 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
283 // we can map the region in any region-based collector.
284 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
285 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
405 address requested_region_bottom;
406
407 assert(request_p >= (T*)_requested_bottom, "sanity");
408 assert(request_p < (T*)_requested_top, "sanity");
409 requested_region_bottom = _requested_bottom;
410
411 // Mark the pointer in the oopmap
412 T* region_bottom = (T*)requested_region_bottom;
413 assert(request_p >= region_bottom, "must be");
414 BitMap::idx_t idx = request_p - region_bottom;
415 assert(idx < oopmap->size(), "overflow");
416 oopmap->set_bit(idx);
417 }
418
419 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
420 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
421 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
422 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
423
424 oop fake_oop = cast_to_oop(buffered_addr);
425 fake_oop->set_narrow_klass(nk);
426
427 // We need to retain the identity_hash, because it may have been used by some hashtables
428 // in the shared heap. This also has the side effect of pre-initializing the
429 // identity_hash for all shared objects, so they are less likely to be written
430 // into during run time, increasing the potential of memory sharing.
431 if (src_obj != nullptr) {
432 int src_hash = src_obj->identity_hash();
433 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
434 assert(fake_oop->mark().is_unlocked(), "sanity");
435
436 DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
437 assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
438 }
439 }
440
441 // Relocate an element in the buffered copy of HeapShared::roots()
442 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
443 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
444 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
445 }
446
447 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
448 oop _src_obj;
449 address _buffered_obj;
450 CHeapBitMap* _oopmap;
451
452 public:
453 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|
176
177 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
178 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
179 int length = roots->length();
180 _heap_roots_word_size = objArrayOopDesc::object_size(length);
181 size_t byte_size = _heap_roots_word_size * HeapWordSize;
182 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
183 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
184 vm_exit(1);
185 }
186
187 maybe_fill_gc_region_gap(byte_size);
188
189 size_t new_used = _buffer_used + byte_size;
190 ensure_buffer_space(new_used);
191
192 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
193 memset(mem, 0, byte_size);
194 {
195 // This is copied from MemAllocator::finish
196 if (UseCompactObjectHeaders) {
197 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(k);
198 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
199 } else {
200 oopDesc::set_mark(mem, markWord::prototype());
201 oopDesc::release_set_klass(mem, k);
202 }
203 }
204 {
205 // This is copied from ObjArrayAllocator::initialize
206 arrayOopDesc::set_length(mem, length);
207 }
208
209 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
210 for (int i = 0; i < length; i++) {
211 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
212 oop o = roots->at(i);
213 if (UseCompressedOops) {
214 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
215 } else {
216 * arrayOop->obj_at_addr<oop>(i) = o;
217 }
218 }
219 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
220
221 _heap_roots_bottom_offset = _buffer_used;
222 _buffer_used = new_used;
248 assert(is_object_aligned(fill_bytes), "must be");
249 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
250
251 int initial_length = to_array_length(fill_bytes / elemSize);
252 for (int length = initial_length; length >= 0; length --) {
253 size_t array_byte_size = filler_array_byte_size(length);
254 if (array_byte_size == fill_bytes) {
255 return length;
256 }
257 }
258
259 ShouldNotReachHere();
260 return -1;
261 }
262
263 void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
264 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
265 Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
266 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
267 memset(mem, 0, fill_bytes);
268 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
269 if (UseCompactObjectHeaders) {
270 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
271 } else {
272 oopDesc::set_mark(mem, markWord::prototype());
273 cast_to_oop(mem)->set_narrow_klass(nk);
274 }
275 arrayOopDesc::set_length(mem, array_length);
276 }
277
278 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
279 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
280 // leftover space is smaller than a zero-sized array object). Therefore, we need to
281 // make sure there's enough space of min_filler_byte_size in the current region after
282 // required_byte_size has been allocated. If not, fill the remainder of the current
283 // region.
284 size_t min_filler_byte_size = filler_array_byte_size(0);
285 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
286
287 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
288 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
289
290 if (cur_min_region_bottom != next_min_region_bottom) {
291 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
292 // we can map the region in any region-based collector.
293 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
294 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
414 address requested_region_bottom;
415
416 assert(request_p >= (T*)_requested_bottom, "sanity");
417 assert(request_p < (T*)_requested_top, "sanity");
418 requested_region_bottom = _requested_bottom;
419
420 // Mark the pointer in the oopmap
421 T* region_bottom = (T*)requested_region_bottom;
422 assert(request_p >= region_bottom, "must be");
423 BitMap::idx_t idx = request_p - region_bottom;
424 assert(idx < oopmap->size(), "overflow");
425 oopmap->set_bit(idx);
426 }
427
428 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
429 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
430 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
431 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
432
433 oop fake_oop = cast_to_oop(buffered_addr);
434 if (!UseCompactObjectHeaders) {
435 fake_oop->set_narrow_klass(nk);
436 }
437
438 // We need to retain the identity_hash, because it may have been used by some hashtables
439 // in the shared heap. This also has the side effect of pre-initializing the
440 // identity_hash for all shared objects, so they are less likely to be written
441 // into during run time, increasing the potential of memory sharing.
442 if (src_obj != nullptr) {
443 int src_hash = src_obj->identity_hash();
444 if (UseCompactObjectHeaders) {
445 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
446 } else {
447 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
448 }
449 assert(fake_oop->mark().is_unlocked(), "sanity");
450
451 DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
452 assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
453 }
454 }
455
456 // Relocate an element in the buffered copy of HeapShared::roots()
457 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
458 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
459 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
460 }
461
462 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
463 oop _src_obj;
464 address _buffered_obj;
465 CHeapBitMap* _oopmap;
466
467 public:
468 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
|