< prev index next >

src/hotspot/share/cds/aotStreamedHeapLoader.cpp

Print this page

 148 
 149 int AOTStreamedHeapLoader::archived_string_value_object_index(oopDesc* archive_object) {
 150     assert(archive_object->klass() == vmClasses::String_klass(), "Must be an archived string");
 151     address archive_string_value_addr = (address)archive_object + java_lang_String::value_offset();
 152     return UseCompressedOops ? *(int*)archive_string_value_addr : (int)*(int64_t*)archive_string_value_addr;
 153 }
 154 
 155 static int archive_array_length(oopDesc* archive_array) {
 156   return *(int*)(address(archive_array) + arrayOopDesc::length_offset_in_bytes());
 157 }
 158 
 159 static size_t archive_object_size(oopDesc* archive_object) {
 160   Klass* klass = archive_object->klass();
 161   int lh = klass->layout_helper();
 162 
 163   if (Klass::layout_helper_is_instance(lh)) {
 164     // Instance
 165     if (Klass::layout_helper_needs_slow_path(lh)) {
 166       return ((size_t*)(archive_object))[-1];
 167     } else {
 168       return (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;




 169     }
 170   } else if (Klass::layout_helper_is_array(lh)) {
 171     // Array
 172     size_t size_in_bytes;
 173     size_t array_length = (size_t)archive_array_length(archive_object);
 174     size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
 175     size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
 176 
 177     return align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;




 178   } else {
 179     // Other
 180     return ((size_t*)(archive_object))[-1];
 181   }
 182 }
 183 
 184 oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
 185   assert(!archive_object->is_stackChunk(), "no such objects are archived");
 186 
 187   oop heap_object;
 188 
 189   Klass* klass = archive_object->klass();

 190   if (klass->is_mirror_instance_klass()) {
 191     heap_object = Universe::heap()->class_allocate(klass, size, CHECK_NULL);


 192   } else if (klass->is_instance_klass()) {
 193     heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
 194   } else {
 195     assert(klass->is_array_klass(), "must be");
 196     int length = archive_array_length(archive_object);
 197     bool do_zero = klass->is_objArray_klass();
 198     heap_object = Universe::heap()->array_allocate(klass, size, length, do_zero, CHECK_NULL);
 199   }
 200 
 201   heap_object->set_mark(mark);
 202 
 203   return heap_object;
 204 }
 205 
 206 void AOTStreamedHeapLoader::install_root(int root_index, oop heap_object) {
 207   objArrayOop roots = objArrayOop(_roots.resolve());
 208   OrderAccess::release(); // Once the store below publishes an object, it can be concurrently picked up by another thread without using the lock
 209   roots->obj_at_put(root_index, heap_object);
 210 }
 211 

 298       // intended linked object.
 299       oop obj = linker(heap_p_offset, pointee_object_index);
 300       if (obj != nullptr) {
 301         heap_object->obj_field_put(heap_p_offset, obj);
 302       }
 303 
 304       unfinished_bit++;
 305       next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
 306     }
 307   }
 308 }
 309 
 310 template <bool use_coops, typename LinkerT>
 311 void AOTStreamedHeapLoader::copy_object_impl(oopDesc* archive_object,
 312                                              oop heap_object,
 313                                              size_t size,
 314                                              LinkerT linker) {
 315   if (!_allow_gc) {
 316     // Without concurrent GC running, we can copy incorrect object references
 317     // and metadata references into the heap object and then fix them up in-place.
 318     size_t payload_size = size - 1;
 319     HeapWord* archive_start = ((HeapWord*)archive_object) + 1;
 320     HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object) + 1;



 321 
 322     Copy::disjoint_words(archive_start, heap_start, payload_size);



 323 
 324     // In-place linking fixes up object indices from references of the heap object,
 325     // and patches them up to refer to objects. This can be done because we just copied
 326     // the payload of the object from the archive to the heap object, including the
 327     // reference object indices. However, this is only okay to do before the GC can run.
 328     // A concurrent GC thread might racingly read the object payload after GC is enabled.
 329     InPlaceLinkingOopClosure cl(heap_object, linker);
 330     heap_object->oop_iterate(&cl);
 331     HeapShared::remap_loaded_metadata(heap_object);
 332     return;
 333   }
 334 
 335   // When a concurrent GC may be running, we take care not to copy incorrect oops,
 336   // narrowOops or Metadata* into the heap objects. Transitions go from 0 to the
 337   // intended runtime linked values only.
 338   size_t word_scale = use_coops ? 2 : 1;
 339   using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
 340 
 341   // Skip the markWord; it is set at allocation time
 342   size_t header_size = word_scale;
 343 
 344   size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
 345   const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
 346   const BitMap::idx_t start_bit = header_bit + header_size;
 347   const BitMap::idx_t end_bit = header_bit + size * word_scale;
 348 
 349   BitMap::idx_t curr_bit = start_bit;
 350 







 351   // We are a bit paranoid about GC or other safepointing operations observing
 352   // shady metadata fields from the archive that do not point at real metadata.
 353   // We deal with this by explicitly reading the requested address from the
 354   // archive and fixing it to real Metadata before writing it into the heap object.
 355   HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
 356     BitMap::idx_t metadata_field_idx = header_bit + (size_t)metadata_offset / sizeof(RawElementT);
 357     BitMap::idx_t skip = word_scale;
 358     assert(metadata_field_idx >= start_bit && metadata_field_idx + skip <= end_bit,
 359            "Metadata field out of bounds");
 360 
 361     // Copy payload before metadata field
 362     copy_payload_carefully<use_coops>(archive_object,
 363                                       heap_object,
 364                                       header_bit,
 365                                       curr_bit,
 366                                       metadata_field_idx,
 367                                       linker);
 368 
 369     // Copy metadata field
 370     Metadata* const archive_metadata = *(Metadata**)(uintptr_t(archive_object) + (size_t)metadata_offset);

 148 
 149 int AOTStreamedHeapLoader::archived_string_value_object_index(oopDesc* archive_object) {
 150     assert(archive_object->klass() == vmClasses::String_klass(), "Must be an archived string");
 151     address archive_string_value_addr = (address)archive_object + java_lang_String::value_offset();
 152     return UseCompressedOops ? *(int*)archive_string_value_addr : (int)*(int64_t*)archive_string_value_addr;
 153 }
 154 
 155 static int archive_array_length(oopDesc* archive_array) {
 156   return *(int*)(address(archive_array) + arrayOopDesc::length_offset_in_bytes());
 157 }
 158 
 159 static size_t archive_object_size(oopDesc* archive_object) {
 160   Klass* klass = archive_object->klass();
 161   int lh = klass->layout_helper();
 162 
 163   if (Klass::layout_helper_is_instance(lh)) {
 164     // Instance
 165     if (Klass::layout_helper_needs_slow_path(lh)) {
 166       return ((size_t*)(archive_object))[-1];
 167     } else {
 168       size_t size = (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;
 169       if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
 170         size = align_object_size(size + 1);
 171       }
 172       return size;
 173     }
 174   } else if (Klass::layout_helper_is_array(lh)) {
 175     // Array
 176     size_t size_in_bytes;
 177     size_t array_length = (size_t)archive_array_length(archive_object);
 178     size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
 179     size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
 180 
 181     size_t size = align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;
 182     if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
 183       size = align_object_size(size + 1);
 184     }
 185     return size;
 186   } else {
 187     // Other
 188     return ((size_t*)(archive_object))[-1];
 189   }
 190 }
 191 
 192 oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
 193   assert(!archive_object->is_stackChunk(), "no such objects are archived");
 194 
 195   oop heap_object;
 196 
 197   Klass* klass = archive_object->klass();
 198   assert(!(UseCompactObjectHeaders && mark.is_hashed_not_expanded()), "Must not be hashed/not-expanded");
 199   if (klass->is_mirror_instance_klass()) {
 200     size_t base_size = size;
 201     assert(!(UseCompactObjectHeaders && mark.is_not_hashed_expanded()), "should not happen");
 202     heap_object = Universe::heap()->class_allocate(klass, size, base_size, CHECK_NULL);
 203   } else if (klass->is_instance_klass()) {
 204     heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
 205   } else {
 206     assert(klass->is_array_klass(), "must be");
 207     int length = archive_array_length(archive_object);
 208     bool do_zero = klass->is_objArray_klass();
 209     heap_object = Universe::heap()->array_allocate(klass, size, length, do_zero, CHECK_NULL);
 210   }
 211 
 212   heap_object->set_mark(mark);
 213 
 214   return heap_object;
 215 }
 216 
 217 void AOTStreamedHeapLoader::install_root(int root_index, oop heap_object) {
 218   objArrayOop roots = objArrayOop(_roots.resolve());
 219   OrderAccess::release(); // Once the store below publishes an object, it can be concurrently picked up by another thread without using the lock
 220   roots->obj_at_put(root_index, heap_object);
 221 }
 222 

 309       // intended linked object.
 310       oop obj = linker(heap_p_offset, pointee_object_index);
 311       if (obj != nullptr) {
 312         heap_object->obj_field_put(heap_p_offset, obj);
 313       }
 314 
 315       unfinished_bit++;
 316       next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
 317     }
 318   }
 319 }
 320 
 321 template <bool use_coops, typename LinkerT>
 322 void AOTStreamedHeapLoader::copy_object_impl(oopDesc* archive_object,
 323                                              oop heap_object,
 324                                              size_t size,
 325                                              LinkerT linker) {
 326   if (!_allow_gc) {
 327     // Without concurrent GC running, we can copy incorrect object references
 328     // and metadata references into the heap object and then fix them up in-place.
 329     size_t offset = 1;
 330     size_t payload_size = size - offset;
 331     HeapWord* archive_start = ((HeapWord*)archive_object);
 332     HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
 333 
 334     Copy::disjoint_words(archive_start + offset, heap_start + offset, payload_size);
 335 
 336     if (UseCompactObjectHeaders) {
 337       // The copying might have missed the first 4 bytes of payload/arraylength, copy that also.
 338       *(reinterpret_cast<jint*>(heap_start) + 1) = *(reinterpret_cast<jint*>(archive_start) + 1);
 339     }
 340 
 341     // In-place linking fixes up object indices from references of the heap object,
 342     // and patches them up to refer to objects. This can be done because we just copied
 343     // the payload of the object from the archive to the heap object, including the
 344     // reference object indices. However, this is only okay to do before the GC can run.
 345     // A concurrent GC thread might racingly read the object payload after GC is enabled.
 346     InPlaceLinkingOopClosure cl(heap_object, linker);
 347     heap_object->oop_iterate(&cl);
 348     HeapShared::remap_loaded_metadata(heap_object);
 349     return;
 350   }
 351 
 352   // When a concurrent GC may be running, we take care not to copy incorrect oops,
 353   // narrowOops or Metadata* into the heap objects. Transitions go from 0 to the
 354   // intended runtime linked values only.
 355   size_t word_scale = use_coops ? 2 : 1;
 356   using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
 357 
 358   // Skip the markWord; it is set at allocation time
 359   size_t header_size = (UseCompactObjectHeaders && use_coops) ? 1 : word_scale;
 360 
 361   size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
 362   const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
 363   const BitMap::idx_t start_bit = header_bit + header_size;
 364   const BitMap::idx_t end_bit = header_bit + size * word_scale;
 365 
 366   BitMap::idx_t curr_bit = start_bit;
 367 
 368   if (UseCompactObjectHeaders && !use_coops) {
 369     // Copy first 4 primitive bytes.
 370     jint* archive_start = reinterpret_cast<jint*>(archive_object);
 371     HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
 372     *(reinterpret_cast<jint*>(heap_start) + 1) = *(archive_start + 1);
 373   }
 374 
 375   // We are a bit paranoid about GC or other safepointing operations observing
 376   // shady metadata fields from the archive that do not point at real metadata.
 377   // We deal with this by explicitly reading the requested address from the
 378   // archive and fixing it to real Metadata before writing it into the heap object.
 379   HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
 380     BitMap::idx_t metadata_field_idx = header_bit + (size_t)metadata_offset / sizeof(RawElementT);
 381     BitMap::idx_t skip = word_scale;
 382     assert(metadata_field_idx >= start_bit && metadata_field_idx + skip <= end_bit,
 383            "Metadata field out of bounds");
 384 
 385     // Copy payload before metadata field
 386     copy_payload_carefully<use_coops>(archive_object,
 387                                       heap_object,
 388                                       header_bit,
 389                                       curr_bit,
 390                                       metadata_field_idx,
 391                                       linker);
 392 
 393     // Copy metadata field
 394     Metadata* const archive_metadata = *(Metadata**)(uintptr_t(archive_object) + (size_t)metadata_offset);
< prev index next >