< prev index next >

src/hotspot/share/cds/archiveBuilder.cpp

Print this page

 197   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 198 
 199   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 200     return _builder->gather_klass_and_symbol(ref, read_only);
 201   }
 202 };
 203 
 204 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 205   if (ref->obj() == nullptr) {
 206     return false;
 207   }
 208   if (get_follow_mode(ref) != make_a_copy) {
 209     return false;
 210   }
 211   if (ref->msotype() == MetaspaceObj::ClassType) {
 212     Klass* klass = (Klass*)ref->obj();
 213     assert(klass->is_klass(), "must be");
 214     if (!is_excluded(klass)) {
 215       _klasses->append(klass);
 216     }
 217     // See RunTimeClassInfo::get_for()
 218     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);


 219   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 220     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 221     Symbol* sym = (Symbol*)ref->obj();
 222     sym->increment_refcount();
 223     _symbols->append(sym);
 224   }
 225 
 226   int bytes = ref->size() * BytesPerWord;
 227   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 228 
 229   return true; // recurse
 230 }
 231 
 232 void ArchiveBuilder::gather_klasses_and_symbols() {
 233   ResourceMark rm;
 234   log_info(cds)("Gathering classes and symbols ... ");
 235   GatherKlassesAndSymbols doit(this);
 236   iterate_roots(&doit);
 237 #if INCLUDE_CDS_JAVA_HEAP
 238   if (CDSConfig::is_dumping_full_module_graph()) {

 611   RegeneratedClasses::record_regenerated_objects();
 612 }
 613 
 614 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 615                                          const ArchiveBuilder::SourceObjList* src_objs) {
 616   for (int i = 0; i < src_objs->objs()->length(); i++) {
 617     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 618   }
 619   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 620 }
 621 
 622 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 623   address src = src_info->source_addr();
 624   int bytes = src_info->size_in_bytes();
 625   char* dest;
 626   char* oldtop;
 627   char* newtop;
 628 
 629   oldtop = dump_region->top();
 630   if (src_info->msotype() == MetaspaceObj::ClassType) {
 631     // Save a pointer immediate in front of an InstanceKlass, so
 632     // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
 633     // without building another hashtable. See RunTimeClassInfo::get_for()
 634     // in systemDictionaryShared.cpp.
 635     Klass* klass = (Klass*)src;
 636     if (klass->is_instance_klass()) {
 637       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 638       dump_region->allocate(sizeof(address));
 639     }












 640   }
 641   dest = dump_region->allocate(bytes);
 642   newtop = dump_region->top();
 643 
 644   memcpy(dest, src, bytes);
 645 
 646   // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
 647   if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
 648     Symbol* buffered_symbol = (Symbol*)dest;
 649     assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
 650     buffered_symbol->update_identity_hash();
 651   }
 652 
 653   {
 654     bool created;
 655     _buffered_to_src_table.put_if_absent((address)dest, src, &created);
 656     assert(created, "must be");
 657     if (_buffered_to_src_table.maybe_grow()) {
 658       log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
 659     }
 660   }
 661 
 662   intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
 663   if (archived_vtable != nullptr) {
 664     *(address*)dest = (address)archived_vtable;
 665     ArchivePtrMarker::mark_pointer((address*)dest);
 666   }
 667 
 668   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 669   src_info->set_buffered_addr((address)dest);
 670 
 671   _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());


 672 }
 673 
 674 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
 675 // not handled by MetaspaceClosure.
 676 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
 677   assert(is_in_buffer_space(ptr_location), "must be");
 678   if (src_addr == nullptr) {
 679     *ptr_location = nullptr;
 680     ArchivePtrMarker::clear_pointer(ptr_location);
 681   } else {
 682     *ptr_location = get_buffered_addr(src_addr);
 683     ArchivePtrMarker::mark_pointer(ptr_location);
 684   }
 685 }
 686 
 687 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
 688   SourceObjInfo* p = _src_obj_table.get(src_addr);
 689   assert(p != nullptr, "src_addr " INTPTR_FORMAT " is used but has not been archived",
 690          p2i(src_addr));
 691 

 712 }
 713 
 714 void ArchiveBuilder::make_klasses_shareable() {
 715   int num_instance_klasses = 0;
 716   int num_boot_klasses = 0;
 717   int num_platform_klasses = 0;
 718   int num_app_klasses = 0;
 719   int num_hidden_klasses = 0;
 720   int num_unlinked_klasses = 0;
 721   int num_unregistered_klasses = 0;
 722   int num_obj_array_klasses = 0;
 723   int num_type_array_klasses = 0;
 724 
 725   for (int i = 0; i < klasses()->length(); i++) {
 726     const char* type;
 727     const char* unlinked = "";
 728     const char* hidden = "";
 729     const char* generated = "";
 730     Klass* k = get_buffered_addr(klasses()->at(i));
 731     k->remove_java_mirror();









 732     if (k->is_objArray_klass()) {
 733       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 734       // on their array classes.
 735       num_obj_array_klasses ++;
 736       type = "array";
 737     } else if (k->is_typeArray_klass()) {
 738       num_type_array_klasses ++;
 739       type = "array";
 740       k->remove_unshareable_info();
 741     } else {
 742       assert(k->is_instance_klass(), " must be");
 743       num_instance_klasses ++;
 744       InstanceKlass* ik = InstanceKlass::cast(k);
 745       if (ik->is_shared_boot_class()) {
 746         type = "boot";
 747         num_boot_klasses ++;
 748       } else if (ik->is_shared_platform_class()) {
 749         type = "plat";
 750         num_platform_klasses ++;
 751       } else if (ik->is_shared_app_class()) {

 809   return requested_p - _requested_static_archive_bottom;
 810 }
 811 
 812 uintx ArchiveBuilder::any_to_offset(address p) const {
 813   if (is_in_mapped_static_archive(p)) {
 814     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
 815     return p - _mapped_static_archive_bottom;
 816   }
 817   if (!is_in_buffer_space(p)) {
 818     // p must be a "source" address
 819     p = get_buffered_addr(p);
 820   }
 821   return buffer_to_offset(p);
 822 }
 823 
 824 #if INCLUDE_CDS_JAVA_HEAP
 825 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
 826   assert(CDSConfig::is_dumping_heap(), "sanity");
 827   k = get_buffered_klass(k);
 828   Klass* requested_k = to_requested(k);





 829   address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 830   const int narrow_klass_shift = ArchiveHeapWriter::precomputed_narrow_klass_shift;
 831   return CompressedKlassPointers::encode_not_null(requested_k, narrow_klass_base, narrow_klass_shift);

 832 }
 833 #endif // INCLUDE_CDS_JAVA_HEAP
 834 
 835 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 836 // so that the archive can be mapped to the "requested" location without runtime relocation.
 837 //
 838 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 839 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 840 // - Every pointer must have one of the following values:
 841 //   [a] nullptr:
 842 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 843 //       consider it at runtime.
 844 //   [b] Points into an object X which is inside the buffer:
 845 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 846 //       when the archive is mapped at the requested location.
 847 //   [c] Points into an object Y which is inside mapped static archive:
 848 //       - This happens only during dynamic dump
 849 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 850 //         so it points to Y when the static archive is mapped at the requested location.
 851 template <bool STATIC_DUMP>

1345 
1346 void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
1347   char* start = info->buffer_start();
1348   size_t size = info->buffer_byte_size();
1349   char* top = start + size;
1350   log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1351                      size, size/double(total_size)*100.0, size, p2i(start));
1352 }
1353 
1354 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1355   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1356   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1357   // or so.
1358   _rw_region.print_out_of_space_msg(name, needed_bytes);
1359   _ro_region.print_out_of_space_msg(name, needed_bytes);
1360 
1361   log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1362   MetaspaceShared::unrecoverable_writing_error();
1363 }
1364 














1365 
1366 #ifndef PRODUCT
1367 void ArchiveBuilder::assert_is_vm_thread() {
1368   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1369 }
1370 #endif

 197   GatherKlassesAndSymbols(ArchiveBuilder* builder) : _builder(builder) {}
 198 
 199   virtual bool do_unique_ref(Ref* ref, bool read_only) {
 200     return _builder->gather_klass_and_symbol(ref, read_only);
 201   }
 202 };
 203 
 204 bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only) {
 205   if (ref->obj() == nullptr) {
 206     return false;
 207   }
 208   if (get_follow_mode(ref) != make_a_copy) {
 209     return false;
 210   }
 211   if (ref->msotype() == MetaspaceObj::ClassType) {
 212     Klass* klass = (Klass*)ref->obj();
 213     assert(klass->is_klass(), "must be");
 214     if (!is_excluded(klass)) {
 215       _klasses->append(klass);
 216     }
 217     // See RunTimeClassInfo::get_for(): make sure we have enough space for both maximum
 218     // Klass alignment as well as the RuntimeInfo* pointer we will embed in front of a Klass.
 219     _estimated_metaspaceobj_bytes += align_up(BytesPerWord, CompressedKlassPointers::klass_alignment_in_bytes()) +
 220         align_up(sizeof(void*), SharedSpaceObjectAlignment);
 221   } else if (ref->msotype() == MetaspaceObj::SymbolType) {
 222     // Make sure the symbol won't be GC'ed while we are dumping the archive.
 223     Symbol* sym = (Symbol*)ref->obj();
 224     sym->increment_refcount();
 225     _symbols->append(sym);
 226   }
 227 
 228   int bytes = ref->size() * BytesPerWord;
 229   _estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
 230 
 231   return true; // recurse
 232 }
 233 
 234 void ArchiveBuilder::gather_klasses_and_symbols() {
 235   ResourceMark rm;
 236   log_info(cds)("Gathering classes and symbols ... ");
 237   GatherKlassesAndSymbols doit(this);
 238   iterate_roots(&doit);
 239 #if INCLUDE_CDS_JAVA_HEAP
 240   if (CDSConfig::is_dumping_full_module_graph()) {

 613   RegeneratedClasses::record_regenerated_objects();
 614 }
 615 
 616 void ArchiveBuilder::make_shallow_copies(DumpRegion *dump_region,
 617                                          const ArchiveBuilder::SourceObjList* src_objs) {
 618   for (int i = 0; i < src_objs->objs()->length(); i++) {
 619     make_shallow_copy(dump_region, src_objs->objs()->at(i));
 620   }
 621   log_info(cds)("done (%d objects)", src_objs->objs()->length());
 622 }
 623 
 624 void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info) {
 625   address src = src_info->source_addr();
 626   int bytes = src_info->size_in_bytes();
 627   char* dest;
 628   char* oldtop;
 629   char* newtop;
 630 
 631   oldtop = dump_region->top();
 632   if (src_info->msotype() == MetaspaceObj::ClassType) {
 633     // Allocate space for a pointer directly in front of the future InstanceKlass, so
 634     // we can do a quick lookup from InstanceKlass* -> RunTimeClassInfo*
 635     // without building another hashtable. See RunTimeClassInfo::get_for()
 636     // in systemDictionaryShared.cpp.
 637     Klass* klass = (Klass*)src;
 638     if (klass->is_instance_klass()) {
 639       SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass));
 640       dump_region->allocate(sizeof(address));
 641     }
 642     // Allocate space for the future InstanceKlass with proper alignment
 643     const size_t alignment =
 644 #ifdef _LP64
 645       UseCompressedClassPointers ?
 646         nth_bit(ArchiveBuilder::precomputed_narrow_klass_shift()) :
 647         SharedSpaceObjectAlignment;
 648 #else
 649     SharedSpaceObjectAlignment;
 650 #endif
 651     dest = dump_region->allocate(bytes, alignment);
 652   } else {
 653     dest = dump_region->allocate(bytes);
 654   }

 655   newtop = dump_region->top();
 656 
 657   memcpy(dest, src, bytes);
 658 
 659   // Update the hash of buffered sorted symbols for static dump so that the symbols have deterministic contents
 660   if (CDSConfig::is_dumping_static_archive() && (src_info->msotype() == MetaspaceObj::SymbolType)) {
 661     Symbol* buffered_symbol = (Symbol*)dest;
 662     assert(((Symbol*)src)->is_permanent(), "archived symbols must be permanent");
 663     buffered_symbol->update_identity_hash();
 664   }
 665 
 666   {
 667     bool created;
 668     _buffered_to_src_table.put_if_absent((address)dest, src, &created);
 669     assert(created, "must be");
 670     if (_buffered_to_src_table.maybe_grow()) {
 671       log_info(cds, hashtables)("Expanded _buffered_to_src_table table to %d", _buffered_to_src_table.table_size());
 672     }
 673   }
 674 
 675   intptr_t* archived_vtable = CppVtables::get_archived_vtable(src_info->msotype(), (address)dest);
 676   if (archived_vtable != nullptr) {
 677     *(address*)dest = (address)archived_vtable;
 678     ArchivePtrMarker::mark_pointer((address*)dest);
 679   }
 680 
 681   log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(src), p2i(dest), bytes);
 682   src_info->set_buffered_addr((address)dest);
 683 
 684   _alloc_stats.record(src_info->msotype(), int(newtop - oldtop), src_info->read_only());
 685 
 686   DEBUG_ONLY(_alloc_stats.verify((int)dump_region->used(), src_info->read_only()));
 687 }
 688 
 689 // This is used by code that hand-assembles data structures, such as the LambdaProxyClassKey, that are
 690 // not handled by MetaspaceClosure.
 691 void ArchiveBuilder::write_pointer_in_buffer(address* ptr_location, address src_addr) {
 692   assert(is_in_buffer_space(ptr_location), "must be");
 693   if (src_addr == nullptr) {
 694     *ptr_location = nullptr;
 695     ArchivePtrMarker::clear_pointer(ptr_location);
 696   } else {
 697     *ptr_location = get_buffered_addr(src_addr);
 698     ArchivePtrMarker::mark_pointer(ptr_location);
 699   }
 700 }
 701 
 702 address ArchiveBuilder::get_buffered_addr(address src_addr) const {
 703   SourceObjInfo* p = _src_obj_table.get(src_addr);
 704   assert(p != nullptr, "src_addr " INTPTR_FORMAT " is used but has not been archived",
 705          p2i(src_addr));
 706 

 727 }
 728 
 729 void ArchiveBuilder::make_klasses_shareable() {
 730   int num_instance_klasses = 0;
 731   int num_boot_klasses = 0;
 732   int num_platform_klasses = 0;
 733   int num_app_klasses = 0;
 734   int num_hidden_klasses = 0;
 735   int num_unlinked_klasses = 0;
 736   int num_unregistered_klasses = 0;
 737   int num_obj_array_klasses = 0;
 738   int num_type_array_klasses = 0;
 739 
 740   for (int i = 0; i < klasses()->length(); i++) {
 741     const char* type;
 742     const char* unlinked = "";
 743     const char* hidden = "";
 744     const char* generated = "";
 745     Klass* k = get_buffered_addr(klasses()->at(i));
 746     k->remove_java_mirror();
 747 #ifdef _LP64
 748     if (UseCompactObjectHeaders) {
 749       Klass* requested_k = to_requested(k);
 750       address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 751       const int narrow_klass_shift = precomputed_narrow_klass_shift();
 752       narrowKlass nk = CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
 753       k->set_prototype_header(markWord::prototype().set_narrow_klass(nk));
 754     }
 755 #endif //_LP64
 756     if (k->is_objArray_klass()) {
 757       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
 758       // on their array classes.
 759       num_obj_array_klasses ++;
 760       type = "array";
 761     } else if (k->is_typeArray_klass()) {
 762       num_type_array_klasses ++;
 763       type = "array";
 764       k->remove_unshareable_info();
 765     } else {
 766       assert(k->is_instance_klass(), " must be");
 767       num_instance_klasses ++;
 768       InstanceKlass* ik = InstanceKlass::cast(k);
 769       if (ik->is_shared_boot_class()) {
 770         type = "boot";
 771         num_boot_klasses ++;
 772       } else if (ik->is_shared_platform_class()) {
 773         type = "plat";
 774         num_platform_klasses ++;
 775       } else if (ik->is_shared_app_class()) {

 833   return requested_p - _requested_static_archive_bottom;
 834 }
 835 
 836 uintx ArchiveBuilder::any_to_offset(address p) const {
 837   if (is_in_mapped_static_archive(p)) {
 838     assert(CDSConfig::is_dumping_dynamic_archive(), "must be");
 839     return p - _mapped_static_archive_bottom;
 840   }
 841   if (!is_in_buffer_space(p)) {
 842     // p must be a "source" address
 843     p = get_buffered_addr(p);
 844   }
 845   return buffer_to_offset(p);
 846 }
 847 
 848 #if INCLUDE_CDS_JAVA_HEAP
 849 narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
 850   assert(CDSConfig::is_dumping_heap(), "sanity");
 851   k = get_buffered_klass(k);
 852   Klass* requested_k = to_requested(k);
 853   const int narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
 854 #ifdef ASSERT
 855   const size_t klass_alignment = MAX2(SharedSpaceObjectAlignment, (size_t)nth_bit(narrow_klass_shift));
 856   assert(is_aligned(k, klass_alignment), "Klass " PTR_FORMAT " misaligned.", p2i(k));
 857 #endif
 858   address narrow_klass_base = _requested_static_archive_bottom; // runtime encoding base == runtime mapping start
 859   // Note: use the "raw" version of encode that takes explicit narrow klass base and shift. Don't use any
 860   // of the variants that do sanity checks, nor any of those that use the current - dump - JVM's encoding setting.
 861   return CompressedKlassPointers::encode_not_null_without_asserts(requested_k, narrow_klass_base, narrow_klass_shift);
 862 }
 863 #endif // INCLUDE_CDS_JAVA_HEAP
 864 
 865 // RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
 866 // so that the archive can be mapped to the "requested" location without runtime relocation.
 867 //
 868 // - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
 869 // - ArchivePtrMarker::ptrmap() marks all the pointers in the rw/ro regions
 870 // - Every pointer must have one of the following values:
 871 //   [a] nullptr:
 872 //       No relocation is needed. Remove this pointer from ptrmap so we don't need to
 873 //       consider it at runtime.
 874 //   [b] Points into an object X which is inside the buffer:
 875 //       Adjust this pointer by _buffer_to_requested_delta, so it points to X
 876 //       when the archive is mapped at the requested location.
 877 //   [c] Points into an object Y which is inside mapped static archive:
 878 //       - This happens only during dynamic dump
 879 //       - Adjust this pointer by _mapped_to_requested_static_archive_delta,
 880 //         so it points to Y when the static archive is mapped at the requested location.
 881 template <bool STATIC_DUMP>

1375 
1376 void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
1377   char* start = info->buffer_start();
1378   size_t size = info->buffer_byte_size();
1379   char* top = start + size;
1380   log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
1381                      size, size/double(total_size)*100.0, size, p2i(start));
1382 }
1383 
1384 void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {
1385   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space.
1386   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes
1387   // or so.
1388   _rw_region.print_out_of_space_msg(name, needed_bytes);
1389   _ro_region.print_out_of_space_msg(name, needed_bytes);
1390 
1391   log_error(cds)("Unable to allocate from '%s' region: Please reduce the number of shared classes.", name);
1392   MetaspaceShared::unrecoverable_writing_error();
1393 }
1394 
1395 #ifdef _LP64
1396 int ArchiveBuilder::precomputed_narrow_klass_shift() {
1397   // Legacy Mode:
1398   //    We use 32 bits for narrowKlass, which should cover the full 4G Klass range. Shift can be 0.
1399   // CompactObjectHeader Mode:
1400   //    narrowKlass is much smaller, and we use the highest possible shift value to later get the maximum
1401   //    Klass encoding range.
1402   //
1403   // Note that all of this may change in the future, if we decide to correct the pre-calculated
1404   // narrow Klass IDs at archive load time.
1405   assert(UseCompressedClassPointers, "Only needed for compressed class pointers");
1406   return CompressedKlassPointers::tiny_classpointer_mode() ?  CompressedKlassPointers::max_shift() : 0;
1407 }
1408 #endif // _LP64
1409 
1410 #ifndef PRODUCT
1411 void ArchiveBuilder::assert_is_vm_thread() {
1412   assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
1413 }
1414 #endif
< prev index next >