< prev index next >

src/hotspot/share/cds/archiveBuilder.cpp

Print this page

 210   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 211 
 212   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 213     return _builder->gather_klass_and_symbol(ref, read_only);
 214   }
 215 };
 216 
 217 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 218   if (ref->obj() == nullptr) {
 219     return false;
 220   }
 221   if (get_follow_mode(ref) != make_a_copy) {
 222     return false;
 223   }
 224   if (ref->msotype() == MetaspaceObj::ClassType) {
 225     Klass* klass = (Klass*)ref->obj();
 226     assert(klass->is_klass(), "must be");
 227     if (!is_excluded(klass)) {
 228       _klasses->append(klass);
 229     }
 230     // See RunTimeClassInfo::get_for()
 231     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);


 232   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 233     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 234     Symbol* sym = (Symbol*)ref->obj();
 235     sym->increment_refcount();
 236     _symbols->append(sym);
 237   }
 238 
 239   int bytes = ref->size() * BytesPerWord;
 240   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 241 
 242   return true; // recurse
 243 }
 244 
 245 void ArchiveBuilder::gather_klasses_and_symbols() {
 246   ResourceMark rm;
 247   log_info(cds)("Gathering classes and symbols ... ");
 248   GatherKlassesAndSymbols doit(this);
 249   iterate_roots(&doit);
 250 #if INCLUDE_CDS_JAVA_HEAP
 251   if (CDSConfig::is_dumping_full_module_graph()) {

 644   RegeneratedClasses::record_regenerated_objects();
 645 }
 646 
 647 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 648                                          const ArchiveBuilder::SourceObjList* src_objs) {
 649   for (int i = 0; i < src_objs->objs()->length(); i++) {
 650     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 651   }
 652   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 653 }
 654 
 655 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 656   address src = src_info->source_addr();
 657   int bytes = src_info->size_in_bytes();
 658   char* dest;
 659   char* oldtop;
 660   char* newtop;
 661 
 662   oldtop = dump_region->top();
 663   if (src_info->msotype() == MetaspaceObj::ClassType) {
 664     // Save a pointer immediate in front of an InstanceKlass, so
 665     // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
 666     // without building another hashtable. See RunTimeClassInfo::get_for()
 667     // in systemDictionaryShared.cpp.
 668     Klass* klass = (Klass*)src;
 669     if (klass->is_instance_klass()) {
 670       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 671       dump_region->allocate(sizeof(address));
 672     }












 673   }
 674   dest = dump_region->allocate(bytes);
 675   newtop = dump_region->top();
 676 
 677   memcpy(dest, src, bytes);
 678 
 679   // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
 680   if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
 681     Symbol* buffered_symbol = (Symbol*)dest;
 682     assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
 683     buffered_symbol->update_identity_hash();
 684   }
 685 
 686   {
 687     bool created;
 688     _buffered_to_src_table.put_if_absent((address)dest, src, &created);
 689     assert(created, "must be");
 690     if (_buffered_to_src_table.maybe_grow()) {
 691       log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
 692     }
 693   }
 694 
 695   intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
 696   if (archived_vtable != nullptr) {
 697     *(address*)dest = (address)archived_vtable;
 698     ArchivePtrMarker::mark_pointer((address*)dest);
 699   }
 700 
 701   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 702   src_info->set_buffered_addr((address)dest);
 703 
 704   _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());


 705 }
 706 
 707 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
 708 // not handled by MetaspaceClosure.
 709 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
 710   assert(is_in_buffer_space(ptr_location), "must be");
 711   if (src_addr == nullptr) {
 712     *ptr_location = nullptr;
 713     ArchivePtrMarker::clear_pointer(ptr_location);
 714   } else {
 715     *ptr_location = get_buffered_addr(src_addr);
 716     ArchivePtrMarker::mark_pointer(ptr_location);
 717   }
 718 }
 719 
 720 void ArchiveBuilder::mark_and_relocate_to_buffered_addr(address* ptr_location) {
 721   assert(*ptr_location != nullptr, "sanity");
 722   if (!is_in_mapped_static_archive(*ptr_location)) {
 723     *ptr_location = get_buffered_addr(*ptr_location);
 724   }

 763   int num_obj_array_klasses = 0;
 764   int num_type_array_klasses = 0;
 765 
 766   for (int i = 0; i < klasses()->length(); i++) {
 767     // Some of the code in ConstantPool::remove_unshareable_info() requires the classes
 768     // to be in linked state, so it must be call here before the next loop, which returns
 769     // all classes to unlinked state.
 770     Klass* k = get_buffered_addr(klasses()->at(i));
 771     if (k->is_instance_klass()) {
 772       InstanceKlass::cast(k)->constants()->remove_unshareable_info();
 773     }
 774   }
 775 
 776   for (int i = 0; i < klasses()->length(); i++) {
 777     const char* type;
 778     const char* unlinked = "";
 779     const char* hidden = "";
 780     const char* generated = "";
 781     Klass* k = get_buffered_addr(klasses()->at(i));
 782     k->remove_java_mirror();









 783     if (k->is_objArray_klass()) {
 784       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 785       // on their array classes.
 786       num_obj_array_klasses ++;
 787       type = "array";
 788     } else if (k->is_typeArray_klass()) {
 789       num_type_array_klasses ++;
 790       type = "array";
 791       k->remove_unshareable_info();
 792     } else {
 793       assert(k->is_instance_klass(), " must be");
 794       num_instance_klasses ++;
 795       InstanceKlass* ik = InstanceKlass::cast(k);
 796       if (ik->is_shared_boot_class()) {
 797         type = "boot";
 798         num_boot_klasses ++;
 799       } else if (ik->is_shared_platform_class()) {
 800         type = "plat";
 801         num_platform_klasses ++;
 802       } else if (ik->is_shared_app_class()) {

 860   return requested_p - _requested_static_archive_bottom;
 861 }
 862 
 863 uintx ArchiveBuilder::any_to_offset(address p) const {
 864   if (is_in_mapped_static_archive(p)) {
 865     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
 866     return p - _mapped_static_archive_bottom;
 867   }
 868   if (!is_in_buffer_space(p)) {
 869     // p must be a "source" address
 870     p = get_buffered_addr(p);
 871   }
 872   return buffer_to_offset(p);
 873 }
 874 
 875 #if INCLUDE_CDS_JAVA_HEAP
 876 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
 877   assert(CDSConfig::is_dumping_heap(), "sanity");
 878   k = get_buffered_klass(k);
 879   Klass* requested_k = to_requested(k);





 880   address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 881   const int narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift;
 882   return CompressedKlassPointers::encode_not_null(requested_k, narrow_klass_base, narrow_klass_shift);

 883 }
 884 #endif // INCLUDE_CDS_JAVA_HEAP
 885 
 886 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 887 // so that the archive can be mapped to the "requested" location without runtime relocation.
 888 //
 889 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 890 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 891 // - Every pointer must have one of the following values:
 892 //   [a] nullptr:
 893 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 894 //       consider it at runtime.
 895 //   [b] Points into an object X which is inside the buffer:
 896 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 897 //       when the archive is mapped at the requested location.
 898 //   [c] Points into an object Y which is inside mapped static archive:
 899 //       - This happens only during dynamic dump
 900 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 901 //         so it points to Y when the static archive is mapped at the requested location.
 902 template <bool STATIC_DUMP>

1396 
1397 void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
1398   char* start = info->buffer_start();
1399   size_t size = info->buffer_byte_size();
1400   char* top = start + size;
1401   log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1402                      size, size/double(total_size)*100.0, size, p2i(start));
1403 }
1404 
1405 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1406   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1407   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1408   // or so.
1409   _rw_region.print_out_of_space_msg(name, needed_bytes);
1410   _ro_region.print_out_of_space_msg(name, needed_bytes);
1411 
1412   log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1413   MetaspaceShared::unrecoverable_writing_error();
1414 }
1415 














1416 
1417 #ifndef PRODUCT
1418 void ArchiveBuilder::assert_is_vm_thread() {
1419   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1420 }
1421 #endif

 210   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 211 
 212   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 213     return _builder->gather_klass_and_symbol(ref, read_only);
 214   }
 215 };
 216 
 217 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 218   if (ref->obj() == nullptr) {
 219     return false;
 220   }
 221   if (get_follow_mode(ref) != make_a_copy) {
 222     return false;
 223   }
 224   if (ref->msotype() == MetaspaceObj::ClassType) {
 225     Klass* klass = (Klass*)ref->obj();
 226     assert(klass->is_klass(), "must be");
 227     if (!is_excluded(klass)) {
 228       _klasses->append(klass);
 229     }
 230     // See RunTimeClassInfo::get_for(): make sure we have enough space for both maximum
 231     // Klass alignment as well as the RuntimeInfo* pointer we will embed in front of a Klass.
 232     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, CompressedKlassPointers::klass_alignment_in_bytes()) +
 233         align_up(sizeof(void*), SharedSpaceObjectAlignment);
 234   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 235     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 236     Symbol* sym = (Symbol*)ref->obj();
 237     sym->increment_refcount();
 238     _symbols->append(sym);
 239   }
 240 
 241   int bytes = ref->size() * BytesPerWord;
 242   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 243 
 244   return true; // recurse
 245 }
 246 
 247 void ArchiveBuilder::gather_klasses_and_symbols() {
 248   ResourceMark rm;
 249   log_info(cds)("Gathering classes and symbols ... ");
 250   GatherKlassesAndSymbols doit(this);
 251   iterate_roots(&doit);
 252 #if INCLUDE_CDS_JAVA_HEAP
 253   if (CDSConfig::is_dumping_full_module_graph()) {

 646   RegeneratedClasses::record_regenerated_objects();
 647 }
 648 
 649 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 650                                          const ArchiveBuilder::SourceObjList* src_objs) {
 651   for (int i = 0; i < src_objs->objs()->length(); i++) {
 652     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 653   }
 654   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 655 }
 656 
 657 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 658   address src = src_info->source_addr();
 659   int bytes = src_info->size_in_bytes();
 660   char* dest;
 661   char* oldtop;
 662   char* newtop;
 663 
 664   oldtop = dump_region->top();
 665   if (src_info->msotype() == MetaspaceObj::ClassType) {
 666     // Allocate space for a pointer directly in front of the future InstanceKlass, so
 667     // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
 668     // without building another hashtable. See RunTimeClassInfo::get_for()
 669     // in systemDictionaryShared.cpp.
 670     Klass* klass = (Klass*)src;
 671     if (klass->is_instance_klass()) {
 672       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 673       dump_region->allocate(sizeof(address));
 674     }
 675     // Allocate space for the future InstanceKlass with proper alignment
 676     const size_t alignment =
 677 #ifdef _LP64
 678       UseCompressedClassPointers ?
 679         nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()) :
 680         SharedSpaceObjectAlignment;
 681 #else
 682     SharedSpaceObjectAlignment;
 683 #endif
 684     dest = dump_region->allocate(bytes, alignment);
 685   } else {
 686     dest = dump_region->allocate(bytes);
 687   }

 688   newtop = dump_region->top();
 689 
 690   memcpy(dest, src, bytes);
 691 
 692   // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
 693   if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
 694     Symbol* buffered_symbol = (Symbol*)dest;
 695     assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
 696     buffered_symbol->update_identity_hash();
 697   }
 698 
 699   {
 700     bool created;
 701     _buffered_to_src_table.put_if_absent((address)dest, src, &created);
 702     assert(created, "must be");
 703     if (_buffered_to_src_table.maybe_grow()) {
 704       log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
 705     }
 706   }
 707 
 708   intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
 709   if (archived_vtable != nullptr) {
 710     *(address*)dest = (address)archived_vtable;
 711     ArchivePtrMarker::mark_pointer((address*)dest);
 712   }
 713 
 714   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 715   src_info->set_buffered_addr((address)dest);
 716 
 717   _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
 718 
 719   DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
 720 }
 721 
 722 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
 723 // not handled by MetaspaceClosure.
 724 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
 725   assert(is_in_buffer_space(ptr_location), "must be");
 726   if (src_addr == nullptr) {
 727     *ptr_location = nullptr;
 728     ArchivePtrMarker::clear_pointer(ptr_location);
 729   } else {
 730     *ptr_location = get_buffered_addr(src_addr);
 731     ArchivePtrMarker::mark_pointer(ptr_location);
 732   }
 733 }
 734 
 735 void ArchiveBuilder::mark_and_relocate_to_buffered_addr(address* ptr_location) {
 736   assert(*ptr_location != nullptr, "sanity");
 737   if (!is_in_mapped_static_archive(*ptr_location)) {
 738     *ptr_location = get_buffered_addr(*ptr_location);
 739   }

 778   int num_obj_array_klasses = 0;
 779   int num_type_array_klasses = 0;
 780 
 781   for (int i = 0; i < klasses()->length(); i++) {
 782     // Some of the code in ConstantPool::remove_unshareable_info() requires the classes
 783     // to be in linked state, so it must be call here before the next loop, which returns
 784     // all classes to unlinked state.
 785     Klass* k = get_buffered_addr(klasses()->at(i));
 786     if (k->is_instance_klass()) {
 787       InstanceKlass::cast(k)->constants()->remove_unshareable_info();
 788     }
 789   }
 790 
 791   for (int i = 0; i < klasses()->length(); i++) {
 792     const char* type;
 793     const char* unlinked = "";
 794     const char* hidden = "";
 795     const char* generated = "";
 796     Klass* k = get_buffered_addr(klasses()->at(i));
 797     k->remove_java_mirror();
 798 #ifdef _LP64
 799     if (UseCompactObjectHeaders) {
 800       Klass* requested_k = to_requested(k);
 801       address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 802       const int narrow_klass_shift = precomputed_narrow_klass_shift();
 803       narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
 804       k->set_prototype_header(markWord::prototype().set_narrow_klass(nk));
 805     }
 806 #endif //_LP64
 807     if (k->is_objArray_klass()) {
 808       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 809       // on their array classes.
 810       num_obj_array_klasses ++;
 811       type = "array";
 812     } else if (k->is_typeArray_klass()) {
 813       num_type_array_klasses ++;
 814       type = "array";
 815       k->remove_unshareable_info();
 816     } else {
 817       assert(k->is_instance_klass(), " must be");
 818       num_instance_klasses ++;
 819       InstanceKlass* ik = InstanceKlass::cast(k);
 820       if (ik->is_shared_boot_class()) {
 821         type = "boot";
 822         num_boot_klasses ++;
 823       } else if (ik->is_shared_platform_class()) {
 824         type = "plat";
 825         num_platform_klasses ++;
 826       } else if (ik->is_shared_app_class()) {

 884   return requested_p - _requested_static_archive_bottom;
 885 }
 886 
 887 uintx ArchiveBuilder::any_to_offset(address p) const {
 888   if (is_in_mapped_static_archive(p)) {
 889     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
 890     return p - _mapped_static_archive_bottom;
 891   }
 892   if (!is_in_buffer_space(p)) {
 893     // p must be a "source" address
 894     p = get_buffered_addr(p);
 895   }
 896   return buffer_to_offset(p);
 897 }
 898 
 899 #if INCLUDE_CDS_JAVA_HEAP
 900 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
 901   assert(CDSConfig::is_dumping_heap(), "sanity");
 902   k = get_buffered_klass(k);
 903   Klass* requested_k = to_requested(k);
 904   const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
 905 #ifdef ASSERT
 906   const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift));
 907   assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k));
 908 #endif
 909   address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 910   // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any
 911   // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting.
 912   return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
 913 }
 914 #endif // INCLUDE_CDS_JAVA_HEAP
 915 
 916 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 917 // so that the archive can be mapped to the "requested" location without runtime relocation.
 918 //
 919 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 920 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 921 // - Every pointer must have one of the following values:
 922 //   [a] nullptr:
 923 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 924 //       consider it at runtime.
 925 //   [b] Points into an object X which is inside the buffer:
 926 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 927 //       when the archive is mapped at the requested location.
 928 //   [c] Points into an object Y which is inside mapped static archive:
 929 //       - This happens only during dynamic dump
 930 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 931 //         so it points to Y when the static archive is mapped at the requested location.
 932 template <bool STATIC_DUMP>

1426 
1427 void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
1428   char* start = info->buffer_start();
1429   size_t size = info->buffer_byte_size();
1430   char* top = start + size;
1431   log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1432                      size, size/double(total_size)*100.0, size, p2i(start));
1433 }
1434 
1435 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1436   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1437   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1438   // or so.
1439   _rw_region.print_out_of_space_msg(name, needed_bytes);
1440   _ro_region.print_out_of_space_msg(name, needed_bytes);
1441 
1442   log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1443   MetaspaceShared::unrecoverable_writing_error();
1444 }
1445 
1446 #ifdef _LP64
1447 int ArchiveBuilder::precomputed_narrow_klass_shift() {
1448   // Legacy Mode:
1449   //    We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
1450   // CompactObjectHeader Mode:
1451   //    narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
1452   //    Klass encoding range.
1453   //
1454   // Note that all of this may change in the future, if we decide to correct the pre-calculated
1455   // narrow Klass IDs at archive load time.
1456   assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
1457   return CompressedKlassPointers::tiny_classpointer_mode() ?  CompressedKlassPointers::max_shift() : 0;
1458 }
1459 #endif // _LP64
1460 
1461 #ifndef PRODUCT
1462 void ArchiveBuilder::assert_is_vm_thread() {
1463   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1464 }
1465 #endif
< prev index next >