294
295 template <typename T>
296 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
297 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
298 *field_addr = value;
299 }
300
301 static bool needs_explicit_size(oop src_obj) {
302 Klass* klass = src_obj->klass();
303 int lh = klass->layout_helper();
304
305 // Simple instances or arrays don't need explicit size
306 if (Klass::layout_helper_is_instance(lh)) {
307 return Klass::layout_helper_needs_slow_path(lh);
308 }
309
310 return !Klass::layout_helper_is_array(lh);
311 }
312
313 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
314 if (needs_explicit_size(src_obj)) {
315 // Explicitly write object size for more complex objects, to avoid having to
316 // pretend the buffer objects are objects when loading the objects, in order
317 // to read the size. Most of the time, the layout helper of the class is enough.
318 write<size_t>(src_obj->size());
319 }
320 size_t byte_size = src_obj->size() * HeapWordSize;
321 assert(byte_size > 0, "no zero-size objects");
322
323 size_t new_used = _buffer_used + byte_size;
324 assert(new_used > _buffer_used, "no wrap around");
325
326 ensure_buffer_space(new_used);
327
328 if (is_interned_string(src_obj)) {
329 java_lang_String::hash_code(src_obj); // Sets the hash code field(s)
330 java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
331 assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
332 }
333
334 address from = cast_from_oop<address>(src_obj);
335 address to = offset_to_buffered_address<address>(_buffer_used);
336 assert(is_object_aligned(_buffer_used), "sanity");
337 assert(is_object_aligned(byte_size), "sanity");
338 memcpy(to, from, byte_size);
339
340 if (java_lang_Module::is_instance(src_obj)) {
341 // These native pointers will be restored explicitly at run time.
342 Modules::check_archived_module_oop(src_obj);
343 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
344 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
345 #ifdef ASSERT
346 // We only archive these loaders
347 if (src_obj != SystemDictionary::java_platform_loader() &&
348 src_obj != SystemDictionary::java_system_loader()) {
349 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
350 }
351 #endif
352 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
353 }
354
355 size_t buffered_obj_offset = _buffer_used;
356 _buffer_used = new_used;
357
358 return buffered_obj_offset;
381 } else {
382 int dfs_index = *_dfs_order_table->get(obj);
383 store_oop_in_buffer(field_addr_in_buffer, dfs_index);
384 }
385
386 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
387 }
388
389 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass) {
390 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
391 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
392
393 markWord mw = markWord::prototype();
394 oopDesc* fake_oop = (oopDesc*)buffered_addr;
395
396 // We need to retain the identity_hash, because it may have been used by some hashtables
397 // in the shared heap. This also has the side effect of pre-initializing the
398 // identity_hash for all shared objects, so they are less likely to be written
399 // into during run time, increasing the potential of memory sharing.
400 if (src_obj != nullptr) {
401 intptr_t src_hash = src_obj->identity_hash();
402 mw = mw.copy_set_hash(src_hash);
403 }
404
405 if (is_interned_string(src_obj)) {
406 // Mark the mark word of interned string so the loader knows to link these to
407 // the string table at runtime.
408 mw = mw.set_marked();
409 }
410
411 if (UseCompactObjectHeaders) {
412 fake_oop->set_mark(mw.set_narrow_klass(nk));
413 } else {
414 fake_oop->set_mark(mw);
415 fake_oop->set_narrow_klass(nk);
416 }
417 }
418
419 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
420 oop _src_obj;
421 address _buffered_obj;
422 CHeapBitMap* _oopmap;
528 StreamedWriterOopIterator(address buffer_start,
529 int num_archived_objects,
530 int num_archived_roots,
531 int* roots)
532 : _current(0),
533 _next(1),
534 _buffer_start(buffer_start),
535 _num_archived_objects(num_archived_objects),
536 _num_archived_roots(num_archived_roots),
537 _roots(roots) {
538 }
539
540 AOTMapLogger::OopData capture(int dfs_index) {
541 size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
542 address buffered_addr = _buffer_start + buffered_offset;
543 oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
544 assert(src_obj != nullptr, "why is this null?");
545 oopDesc* raw_oop = (oopDesc*)buffered_addr;
546 Klass* klass = src_obj->klass();
547 size_t size = src_obj->size();
548
549 intptr_t target_location = (intptr_t)buffered_offset;
550 uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
551
552 address requested_addr = (address)buffered_offset;
553
554 return { buffered_addr,
555 requested_addr,
556 target_location,
557 narrow_location,
558 raw_oop,
559 klass,
560 size,
561 false };
562 }
563
564 bool has_next() override {
565 return _next <= _num_archived_objects;
566 }
567
|
294
295 template <typename T>
296 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
297 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
298 *field_addr = value;
299 }
300
301 static bool needs_explicit_size(oop src_obj) {
302 Klass* klass = src_obj->klass();
303 int lh = klass->layout_helper();
304
305 // Simple instances or arrays don't need explicit size
306 if (Klass::layout_helper_is_instance(lh)) {
307 return Klass::layout_helper_needs_slow_path(lh);
308 }
309
310 return !Klass::layout_helper_is_array(lh);
311 }
312
313 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
314 size_t old_size = src_obj->size();
315 size_t size = src_obj->copy_size_cds(old_size, src_obj->mark());
316 if (needs_explicit_size(src_obj)) {
317 // Explicitly write object size for more complex objects, to avoid having to
318 // pretend the buffer objects are objects when loading the objects, in order
319 // to read the size. Most of the time, the layout helper of the class is enough.
320 write<size_t>(size);
321 }
322 size_t byte_size = size * HeapWordSize;
323 assert(byte_size > 0, "no zero-size objects");
324
325 size_t new_used = _buffer_used + byte_size;
326 assert(new_used > _buffer_used, "no wrap around");
327
328 ensure_buffer_space(new_used);
329
330 if (is_interned_string(src_obj)) {
331 java_lang_String::hash_code(src_obj); // Sets the hash code field(s)
332 java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
333 assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
334 }
335
336 address from = cast_from_oop<address>(src_obj);
337 address to = offset_to_buffered_address<address>(_buffer_used);
338 assert(is_object_aligned(_buffer_used), "sanity");
339 assert(is_object_aligned(byte_size), "sanity");
340 memcpy(to, from, MIN2(size, old_size) * HeapWordSize);
341
342 if (java_lang_Module::is_instance(src_obj)) {
343 // These native pointers will be restored explicitly at run time.
344 Modules::check_archived_module_oop(src_obj);
345 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
346 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
347 #ifdef ASSERT
348 // We only archive these loaders
349 if (src_obj != SystemDictionary::java_platform_loader() &&
350 src_obj != SystemDictionary::java_system_loader()) {
351 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
352 }
353 #endif
354 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
355 }
356
357 size_t buffered_obj_offset = _buffer_used;
358 _buffer_used = new_used;
359
360 return buffered_obj_offset;
383 } else {
384 int dfs_index = *_dfs_order_table->get(obj);
385 store_oop_in_buffer(field_addr_in_buffer, dfs_index);
386 }
387
388 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
389 }
390
391 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass) {
392 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
393 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
394
395 markWord mw = markWord::prototype();
396 oopDesc* fake_oop = (oopDesc*)buffered_addr;
397
398 // We need to retain the identity_hash, because it may have been used by some hashtables
399 // in the shared heap. This also has the side effect of pre-initializing the
400 // identity_hash for all shared objects, so they are less likely to be written
401 // into during run time, increasing the potential of memory sharing.
402 if (src_obj != nullptr) {
403 if (UseCompactObjectHeaders) {
404 mw = mw.copy_hashctrl_from(src_obj->mark());
405 if (mw.is_hashed_not_expanded()) {
406 mw = fake_oop->initialize_hash_if_necessary(src_obj, src_klass, mw);
407 } else if (mw.is_not_hashed_expanded()) {
408 // If a scratch mirror class has not been hashed until now, then reset its
409 // hash bits to initial state.
410 mw = mw.set_not_hashed_not_expanded();
411 }
412 } else {
413 intptr_t src_hash = src_obj->identity_hash();
414 mw = mw.copy_set_hash(src_hash);
415 }
416 }
417
418 if (is_interned_string(src_obj)) {
419 // Mark the mark word of interned string so the loader knows to link these to
420 // the string table at runtime.
421 mw = mw.set_marked();
422 }
423
424 if (UseCompactObjectHeaders) {
425 fake_oop->set_mark(mw.set_narrow_klass(nk));
426 } else {
427 fake_oop->set_mark(mw);
428 fake_oop->set_narrow_klass(nk);
429 }
430 }
431
432 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
433 oop _src_obj;
434 address _buffered_obj;
435 CHeapBitMap* _oopmap;
541 StreamedWriterOopIterator(address buffer_start,
542 int num_archived_objects,
543 int num_archived_roots,
544 int* roots)
545 : _current(0),
546 _next(1),
547 _buffer_start(buffer_start),
548 _num_archived_objects(num_archived_objects),
549 _num_archived_roots(num_archived_roots),
550 _roots(roots) {
551 }
552
553 AOTMapLogger::OopData capture(int dfs_index) {
554 size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
555 address buffered_addr = _buffer_start + buffered_offset;
556 oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
557 assert(src_obj != nullptr, "why is this null?");
558 oopDesc* raw_oop = (oopDesc*)buffered_addr;
559 Klass* klass = src_obj->klass();
560 size_t size = src_obj->size();
561 size = src_obj->copy_size_cds(size, src_obj->mark());
562
563 intptr_t target_location = (intptr_t)buffered_offset;
564 uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
565
566 address requested_addr = (address)buffered_offset;
567
568 return { buffered_addr,
569 requested_addr,
570 target_location,
571 narrow_location,
572 raw_oop,
573 klass,
574 size,
575 false };
576 }
577
578 bool has_next() override {
579 return _next <= _num_archived_objects;
580 }
581
|