< prev index next >

src/hotspot/share/cds/aotStreamedHeapWriter.cpp

Print this page

276 
277 template <typename T>
278 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
279   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
280   *field_addr = value;
281 }
282 
283 static bool needs_explicit_size(oop src_obj) {
284   Klass* klass = src_obj->klass();
285   int lh = klass->layout_helper();
286 
287   // Simple instances or arrays don't need explicit size
288   if (Klass::layout_helper_is_instance(lh)) {
289     return Klass::layout_helper_needs_slow_path(lh);
290   }
291 
292   return !Klass::layout_helper_is_array(lh);
293 }
294 
295 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {


296   if (needs_explicit_size(src_obj)) {
297     // Explicitly write object size for more complex objects, to avoid having to
298     // pretend the buffer objects are objects when loading the objects, in order
299     // to read the size. Most of the time, the layout helper of the class is enough.
300     write<size_t>(src_obj->size());
301   }
302   size_t byte_size = src_obj->size() * HeapWordSize;
303   assert(byte_size > 0, "no zero-size objects");
304 
305   size_t new_used = _buffer_used + byte_size;
306   assert(new_used > _buffer_used, "no wrap around");
307 
308   ensure_buffer_space(new_used);
309 
310   if (HeapShared::is_interned_string(src_obj)) {
311     java_lang_String::hash_code(src_obj);                   // Sets the hash code field(s)
312     java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
313     assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
314   }
315 
316   address from = cast_from_oop<address>(src_obj);
317   address to = offset_to_buffered_address<address>(_buffer_used);
318   assert(is_object_aligned(_buffer_used), "sanity");
319   assert(is_object_aligned(byte_size), "sanity");
320   memcpy(to, from, byte_size);
321 
322   if (java_lang_Module::is_instance(src_obj)) {
323     // These native pointers will be restored explicitly at run time.
324     Modules::check_archived_module_oop(src_obj);
325     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
326   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
327 #ifdef ASSERT
328     // We only archive these loaders
329     if (src_obj != SystemDictionary::java_platform_loader() &&
330         src_obj != SystemDictionary::java_system_loader()) {
331       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
332     }
333 #endif
334     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
335   }
336 
337   size_t buffered_obj_offset = _buffer_used;
338   _buffer_used = new_used;
339 
340   return buffered_obj_offset;

363   } else {
364     int dfs_index = *_dfs_order_table->get(obj);
365     store_oop_in_buffer(field_addr_in_buffer, dfs_index);
366   }
367 
368   mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
369 }
370 
371 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj,  Klass* src_klass) {
372   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
373   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
374 
375   markWord mw = markWord::prototype();
376   oopDesc* fake_oop = (oopDesc*)buffered_addr;
377 
378   // We need to retain the identity_hash, because it may have been used by some hashtables
379   // in the shared heap. This also has the side effect of pre-initializing the
380   // identity_hash for all shared objects, so they are less likely to be written
381   // into during run time, increasing the potential of memory sharing.
382   if (src_obj != nullptr) {
383     intptr_t src_hash = src_obj->identity_hash();
384     mw = mw.copy_set_hash(src_hash);











385   }
386 
387   if (HeapShared::is_interned_string(src_obj)) {
388     // Mark the mark word of interned string so the loader knows to link these to
389     // the string table at runtime.
390     mw = mw.set_marked();
391   }
392 
393   if (UseCompactObjectHeaders) {
394     fake_oop->set_mark(mw.set_narrow_klass(nk));
395   } else {
396     fake_oop->set_mark(mw);
397     fake_oop->set_narrow_klass(nk);
398   }
399 }
400 
401 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
402   oop _src_obj;
403   address _buffered_obj;
404   CHeapBitMap* _oopmap;

500     int _num_archived_roots;
501     int* _roots;
502 
503   public:
504     StreamedWriterOopIterator(address buffer_start,
505                               int num_archived_objects,
506                               int num_archived_roots,
507                               int* roots)
508       : AOTStreamedHeapOopIterator(buffer_start, num_archived_objects),
509         _num_archived_roots(num_archived_roots),
510         _roots(roots) {}
511 
512     AOTMapLogger::OopData capture(int dfs_index) override {
513       size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
514       address buffered_addr = _buffer_start + buffered_offset;
515       oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
516       assert(src_obj != nullptr, "why is this null?");
517       oopDesc* raw_oop = (oopDesc*)buffered_addr;
518       Klass* klass = src_obj->klass();
519       size_t size = src_obj->size();

520 
521       intptr_t target_location = (intptr_t)buffered_offset;
522       uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
523 
524       address requested_addr = (address)buffered_offset;
525 
526       return { buffered_addr,
527                requested_addr,
528                target_location,
529                narrow_location,
530                raw_oop,
531                klass,
532                size,
533                false };
534     }
535 
536     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
537       GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
538 
539       for (int i = 0; i < _num_archived_roots; ++i) {

276 
277 template <typename T>
278 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
279   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
280   *field_addr = value;
281 }
282 
283 static bool needs_explicit_size(oop src_obj) {
284   Klass* klass = src_obj->klass();
285   int lh = klass->layout_helper();
286 
287   // Simple instances or arrays don't need explicit size
288   if (Klass::layout_helper_is_instance(lh)) {
289     return Klass::layout_helper_needs_slow_path(lh);
290   }
291 
292   return !Klass::layout_helper_is_array(lh);
293 }
294 
295 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
296   size_t old_size = src_obj->size();
297   size_t size = src_obj->copy_size_cds(old_size, src_obj->mark());
298   if (needs_explicit_size(src_obj)) {
299     // Explicitly write object size for more complex objects, to avoid having to
300     // pretend the buffer objects are objects when loading the objects, in order
301     // to read the size. Most of the time, the layout helper of the class is enough.
302     write<size_t>(size);
303   }
304   size_t byte_size = size * HeapWordSize;
305   assert(byte_size > 0, "no zero-size objects");
306 
307   size_t new_used = _buffer_used + byte_size;
308   assert(new_used > _buffer_used, "no wrap around");
309 
310   ensure_buffer_space(new_used);
311 
312   if (HeapShared::is_interned_string(src_obj)) {
313     java_lang_String::hash_code(src_obj);                   // Sets the hash code field(s)
314     java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
315     assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
316   }
317 
318   address from = cast_from_oop<address>(src_obj);
319   address to = offset_to_buffered_address<address>(_buffer_used);
320   assert(is_object_aligned(_buffer_used), "sanity");
321   assert(is_object_aligned(byte_size), "sanity");
322   memcpy(to, from, MIN2(size, old_size) * HeapWordSize);
323 
324   if (java_lang_Module::is_instance(src_obj)) {
325     // These native pointers will be restored explicitly at run time.
326     Modules::check_archived_module_oop(src_obj);
327     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
328   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
329 #ifdef ASSERT
330     // We only archive these loaders
331     if (src_obj != SystemDictionary::java_platform_loader() &&
332         src_obj != SystemDictionary::java_system_loader()) {
333       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
334     }
335 #endif
336     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
337   }
338 
339   size_t buffered_obj_offset = _buffer_used;
340   _buffer_used = new_used;
341 
342   return buffered_obj_offset;

365   } else {
366     int dfs_index = *_dfs_order_table->get(obj);
367     store_oop_in_buffer(field_addr_in_buffer, dfs_index);
368   }
369 
370   mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
371 }
372 
373 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj,  Klass* src_klass) {
374   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
375   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
376 
377   markWord mw = markWord::prototype();
378   oopDesc* fake_oop = (oopDesc*)buffered_addr;
379 
380   // We need to retain the identity_hash, because it may have been used by some hashtables
381   // in the shared heap. This also has the side effect of pre-initializing the
382   // identity_hash for all shared objects, so they are less likely to be written
383   // into during run time, increasing the potential of memory sharing.
384   if (src_obj != nullptr) {
385     if (UseCompactObjectHeaders) {
386       mw = mw.copy_hashctrl_from(src_obj->mark());
387       if (mw.is_hashed_not_expanded()) {
388         mw = fake_oop->initialize_hash_if_necessary(src_obj, src_klass, mw);
389       } else if (mw.is_not_hashed_expanded()) {
390         // If a scratch mirror class has not been hashed until now, then reset its
391         // hash bits to initial state.
392         mw = mw.set_not_hashed_not_expanded();
393       }
394     } else {
395       intptr_t src_hash = src_obj->identity_hash();
396       mw = mw.copy_set_hash(src_hash);
397     }
398   }
399 
400   if (HeapShared::is_interned_string(src_obj)) {
401     // Mark the mark word of interned string so the loader knows to link these to
402     // the string table at runtime.
403     mw = mw.set_marked();
404   }
405 
406   if (UseCompactObjectHeaders) {
407     fake_oop->set_mark(mw.set_narrow_klass(nk));
408   } else {
409     fake_oop->set_mark(mw);
410     fake_oop->set_narrow_klass(nk);
411   }
412 }
413 
414 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
415   oop _src_obj;
416   address _buffered_obj;
417   CHeapBitMap* _oopmap;

513     int _num_archived_roots;
514     int* _roots;
515 
516   public:
517     StreamedWriterOopIterator(address buffer_start,
518                               int num_archived_objects,
519                               int num_archived_roots,
520                               int* roots)
521       : AOTStreamedHeapOopIterator(buffer_start, num_archived_objects),
522         _num_archived_roots(num_archived_roots),
523         _roots(roots) {}
524 
525     AOTMapLogger::OopData capture(int dfs_index) override {
526       size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
527       address buffered_addr = _buffer_start + buffered_offset;
528       oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
529       assert(src_obj != nullptr, "why is this null?");
530       oopDesc* raw_oop = (oopDesc*)buffered_addr;
531       Klass* klass = src_obj->klass();
532       size_t size = src_obj->size();
533       size = src_obj->copy_size_cds(size, src_obj->mark());
534 
535       intptr_t target_location = (intptr_t)buffered_offset;
536       uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
537 
538       address requested_addr = (address)buffered_offset;
539 
540       return { buffered_addr,
541                requested_addr,
542                target_location,
543                narrow_location,
544                raw_oop,
545                klass,
546                size,
547                false };
548     }
549 
550     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
551       GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
552 
553       for (int i = 0; i < _num_archived_roots; ++i) {
< prev index next >