< prev index next >

src/hotspot/share/cds/archiveBuilder.cpp

Print this page
@@ -212,12 +212,14 @@
      Klass* klass = (Klass*)ref->obj();
      assert(klass->is_klass(), "must be");
      if (!is_excluded(klass)) {
        _klasses->append(klass);
      }
-     // See RunTimeClassInfo::get_for()
-     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);
+     // See RunTimeClassInfo::get_for(): make sure we have enough space for both maximum
+     // Klass alignment as well as the RuntimeInfo* pointer we will embed in front of a Klass.
+     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, CompressedKlassPointers::klass_alignment_in_bytes()) +
+         align_up(sizeof(void*), SharedSpaceObjectAlignment);
    } else if (ref->msotype() == MetaspaceObj::SymbolType) {
      // Make sure the symbol won't be GC'ed while we are dumping the archive.
      Symbol* sym = (Symbol*)ref->obj();
      sym->increment_refcount();
      _symbols->append(sym);

@@ -626,21 +628,32 @@
    char* oldtop;
    char* newtop;
  
    oldtop = dump_region->top();
    if (src_info->msotype() == MetaspaceObj::ClassType) {
-     // Save a pointer immediate in front of an InstanceKlass, so
+     // Allocate space for a pointer directly in front of the future InstanceKlass, so
      // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
      // without building another hashtable. See RunTimeClassInfo::get_for()
      // in systemDictionaryShared.cpp.
      Klass* klass = (Klass*)src;
      if (klass->is_instance_klass()) {
        SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
        dump_region->allocate(sizeof(address));
      }
+     // Allocate space for the future InstanceKlass with proper alignment
+     const size_t alignment =
+ #ifdef _LP64
+       UseCompressedClassPointers ?
+         nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()) :
+         SharedSpaceObjectAlignment;
+ #else
+     SharedSpaceObjectAlignment;
+ #endif
+     dest = dump_region->allocate(bytes, alignment);
+   } else {
+     dest = dump_region->allocate(bytes);
    }
-   dest = dump_region->allocate(bytes);
    newtop = dump_region->top();
  
    memcpy(dest, src, bytes);
  
    // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents

@@ -667,10 +680,12 @@
  
    log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
    src_info->set_buffered_addr((address)dest);
  
    _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
+ 
+   DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
  }
  
  // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
  // not handled by MetaspaceClosure.
  void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {

@@ -727,10 +742,19 @@
      const char* unlinked = "";
      const char* hidden = "";
      const char* generated = "";
      Klass* k = get_buffered_addr(klasses()->at(i));
      k->remove_java_mirror();
+ #ifdef _LP64
+     if (UseCompactObjectHeaders) {
+       Klass* requested_k = to_requested(k);
+       address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
+       const int narrow_klass_shift = precomputed_narrow_klass_shift();
+       narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
+       k->set_prototype_header(markWord::prototype().set_narrow_klass(nk));
+     }
+ #endif //_LP64
      if (k->is_objArray_klass()) {
        // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
        // on their array classes.
        num_obj_array_klasses ++;
        type = "array";

@@ -824,13 +848,19 @@
  #if INCLUDE_CDS_JAVA_HEAP
  narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
    assert(CDSConfig::is_dumping_heap(), "sanity");
    k = get_buffered_klass(k);
    Klass* requested_k = to_requested(k);
+   const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
+ #ifdef ASSERT
+   const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift));
+   assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k));
+ #endif
    address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
-   const int narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift;
-   return CompressedKlassPointers::encode_not_null(requested_k, narrow_klass_base, narrow_klass_shift);
+   // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any
+   // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting.
+   return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
  }
  #endif // INCLUDE_CDS_JAVA_HEAP
  
  // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
  // so that the archive can be mapped to the "requested" location without runtime relocation.

@@ -1360,10 +1390,24 @@
  
    log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
    MetaspaceShared::unrecoverable_writing_error();
  }
  
+ #ifdef _LP64
+ int ArchiveBuilder::precomputed_narrow_klass_shift() {
+   // Legacy Mode:
+   //    We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
+   // CompactObjectHeader Mode:
+   //    narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
+   //    Klass encoding range.
+   //
+   // Note that all of this may change in the future, if we decide to correct the pre-calculated
+   // narrow Klass IDs at archive load time.
+   assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
+   return CompressedKlassPointers::tiny_classpointer_mode() ?  CompressedKlassPointers::max_shift() : 0;
+ }
+ #endif // _LP64
  
  #ifndef PRODUCT
  void ArchiveBuilder::assert_is_vm_thread() {
    assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
  }
< prev index next >