< prev index next >

src/hotspot/share/runtime/arguments.cpp

Print this page

1410 void Arguments::set_use_compressed_oops() {
1411 #ifdef _LP64
1412   // MaxHeapSize is not set up properly at this point, but
1413   // the only value that can override MaxHeapSize if we are
1414   // to use UseCompressedOops are InitialHeapSize and MinHeapSize.
1415   size_t max_heap_size = MAX3(MaxHeapSize, InitialHeapSize, MinHeapSize);
1416 
1417   if (max_heap_size <= max_heap_for_compressed_oops()) {
1418     if (FLAG_IS_DEFAULT(UseCompressedOops)) {
1419       FLAG_SET_ERGO(UseCompressedOops, true);
1420     }
1421   } else {
1422     if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
1423       warning("Max heap size too large for Compressed Oops");
1424       FLAG_SET_DEFAULT(UseCompressedOops, false);
1425     }
1426   }
1427 #endif // _LP64
1428 }
1429 
1430 void Arguments::set_use_compressed_klass_ptrs() {
1431 #ifdef _LP64
1432   assert(!UseCompressedClassPointers || CompressedClassSpaceSize <= KlassEncodingMetaspaceMax,
1433          "CompressedClassSpaceSize is too large for UseCompressedClassPointers");
1434 #endif // _LP64
1435 }
1436 
1437 void Arguments::set_conservative_max_heap_alignment() {
1438   // The conservative maximum required alignment for the heap is the maximum of
1439   // the alignments imposed by several sources: any requirements from the heap
1440   // itself and the maximum page size we may run the VM with.
1441   size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment();
1442   _conservative_max_heap_alignment = MAX4(heap_alignment,
1443                                           os::vm_allocation_granularity(),
1444                                           os::max_page_size(),
1445                                           GCArguments::compute_heap_alignment());
1446 }
1447 
1448 jint Arguments::set_ergonomics_flags() {
1449   GCConfig::initialize();
1450 
1451   set_conservative_max_heap_alignment();
1452 
1453 #ifdef _LP64
1454   set_use_compressed_oops();
1455   set_use_compressed_klass_ptrs();
1456 
1457   // Also checks that certain machines are slower with compressed oops
1458   // in vm_version initialization code.
1459 #endif // _LP64
1460 
1461   return JNI_OK;
1462 }
1463 
1464 size_t Arguments::limit_heap_by_allocatable_memory(size_t limit) {
1465   size_t max_allocatable;
1466   size_t result = limit;
1467   if (os::has_allocatable_memory_limit(&max_allocatable)) {
1468     // The AggressiveHeap check is a temporary workaround to avoid calling
1469     // GCarguments::heap_virtual_to_physical_ratio() before a GC has been
1470     // selected. This works because AggressiveHeap implies UseParallelGC
1471     // where we know the ratio will be 1. Once the AggressiveHeap option is
1472     // removed, this can be cleaned up.
1473     size_t heap_virtual_to_physical_ratio = (AggressiveHeap ? 1 : GCConfig::arguments()->heap_virtual_to_physical_ratio());
1474     size_t fraction = MaxVirtMemFraction * heap_virtual_to_physical_ratio;
1475     result = MIN2(result, max_allocatable / fraction);

1815       }
1816     }
1817   }
1818 #endif
1819 
1820 #if INCLUDE_JFR
1821   if (status && (FlightRecorderOptions || StartFlightRecording)) {
1822     if (!create_numbered_module_property("jdk.module.addmods", "jdk.jfr", addmods_count++)) {
1823       return false;
1824     }
1825   }
1826 #endif
1827 
1828 #ifndef SUPPORT_RESERVED_STACK_AREA
1829   if (StackReservedPages != 0) {
1830     FLAG_SET_CMDLINE(StackReservedPages, 0);
1831     warning("Reserved Stack Area not supported on this platform");
1832   }
1833 #endif
1834 
1835 #if !defined(X86) && !defined(AARCH64) && !defined(RISCV64) && !defined(ARM) && !defined(PPC64) && !defined(S390)
1836   if (LockingMode == LM_LIGHTWEIGHT) {
1837     FLAG_SET_CMDLINE(LockingMode, LM_LEGACY);
1838     warning("New lightweight locking not supported on this platform");
1839   }
1840 #endif
1841 
1842 #if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) && !defined(S390)
1843   if (LockingMode == LM_MONITOR) {
1844     jio_fprintf(defaultStream::error_stream(),
1845                 "LockingMode == 0 (LM_MONITOR) is not fully implemented on this architecture\n");
1846     return false;
1847   }
1848 #endif
1849 #if defined(X86) && !defined(ZERO)
1850   if (LockingMode == LM_MONITOR && UseRTMForStackLocks) {
1851     jio_fprintf(defaultStream::error_stream(),
1852                 "LockingMode == 0 (LM_MONITOR) and -XX:+UseRTMForStackLocks are mutually exclusive\n");
1853 
1854     return false;
1855   }

2942   UNSUPPORTED_OPTION(ProfileInterpreter);
2943 #endif
2944 
2945   // Parse the CompilationMode flag
2946   if (!CompilationModeFlag::initialize()) {
2947     return JNI_ERR;
2948   }
2949 
2950   if (!check_vm_args_consistency()) {
2951     return JNI_ERR;
2952   }
2953 
2954   if (!CDSConfig::check_vm_args_consistency(patch_mod_javabase, mode_flag_cmd_line)) {
2955     return JNI_ERR;
2956   }
2957 
2958 #ifndef CAN_SHOW_REGISTERS_ON_ASSERT
2959   UNSUPPORTED_OPTION(ShowRegistersOnAssert);
2960 #endif // CAN_SHOW_REGISTERS_ON_ASSERT
2961 













2962   return JNI_OK;
2963 }
2964 
2965 // Helper class for controlling the lifetime of JavaVMInitArgs
2966 // objects.  The contents of the JavaVMInitArgs are guaranteed to be
2967 // deleted on the destruction of the ScopedVMInitArgs object.
2968 class ScopedVMInitArgs : public StackObj {
2969  private:
2970   JavaVMInitArgs _args;
2971   char*          _container_name;
2972   bool           _is_set;
2973   char*          _vm_options_file_arg;
2974 
2975  public:
2976   ScopedVMInitArgs(const char *container_name) {
2977     _args.version = JNI_VERSION_1_2;
2978     _args.nOptions = 0;
2979     _args.options = nullptr;
2980     _args.ignoreUnrecognized = false;
2981     _container_name = (char *)container_name;

3646   apply_debugger_ergo();
3647 
3648   if (log_is_enabled(Info, arguments)) {
3649     LogStream st(Log(arguments)::info());
3650     Arguments::print_on(&st);
3651   }
3652 
3653   return JNI_OK;
3654 }
3655 
3656 jint Arguments::apply_ergo() {
3657   // Set flags based on ergonomics.
3658   jint result = set_ergonomics_flags();
3659   if (result != JNI_OK) return result;
3660 
3661   // Set heap size based on available physical memory
3662   set_heap_size();
3663 
3664   GCConfig::arguments()->initialize();
3665 




3666   CDSConfig::initialize();
3667 
3668   // Initialize Metaspace flags and alignments
3669   Metaspace::ergo_initialize();
3670 
3671   if (!StringDedup::ergo_initialize()) {
3672     return JNI_EINVAL;
3673   }
3674 
3675   // Set compiler flags after GC is selected and GC specific
3676   // flags (LoopStripMiningIter) are set.
3677   CompilerConfig::ergo_initialize();
3678 
3679   // Set bytecode rewriting flags
3680   set_bytecode_flags();
3681 
3682   // Set flags if aggressive optimization flags are enabled
3683   jint code = set_aggressive_opts_flags();
3684   if (code != JNI_OK) {
3685     return code;

1410 void Arguments::set_use_compressed_oops() {
1411 #ifdef _LP64
1412   // MaxHeapSize is not set up properly at this point, but
1413   // the only value that can override MaxHeapSize if we are
1414   // to use UseCompressedOops are InitialHeapSize and MinHeapSize.
1415   size_t max_heap_size = MAX3(MaxHeapSize, InitialHeapSize, MinHeapSize);
1416 
1417   if (max_heap_size <= max_heap_for_compressed_oops()) {
1418     if (FLAG_IS_DEFAULT(UseCompressedOops)) {
1419       FLAG_SET_ERGO(UseCompressedOops, true);
1420     }
1421   } else {
1422     if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
1423       warning("Max heap size too large for Compressed Oops");
1424       FLAG_SET_DEFAULT(UseCompressedOops, false);
1425     }
1426   }
1427 #endif // _LP64
1428 }
1429 







1430 void Arguments::set_conservative_max_heap_alignment() {
1431   // The conservative maximum required alignment for the heap is the maximum of
1432   // the alignments imposed by several sources: any requirements from the heap
1433   // itself and the maximum page size we may run the VM with.
1434   size_t heap_alignment = GCConfig::arguments()->conservative_max_heap_alignment();
1435   _conservative_max_heap_alignment = MAX4(heap_alignment,
1436                                           os::vm_allocation_granularity(),
1437                                           os::max_page_size(),
1438                                           GCArguments::compute_heap_alignment());
1439 }
1440 
1441 jint Arguments::set_ergonomics_flags() {
1442   GCConfig::initialize();
1443 
1444   set_conservative_max_heap_alignment();
1445 
1446 #ifdef _LP64
1447   set_use_compressed_oops();

1448 
1449   // Also checks that certain machines are slower with compressed oops
1450   // in vm_version initialization code.
1451 #endif // _LP64
1452 
1453   return JNI_OK;
1454 }
1455 
1456 size_t Arguments::limit_heap_by_allocatable_memory(size_t limit) {
1457   size_t max_allocatable;
1458   size_t result = limit;
1459   if (os::has_allocatable_memory_limit(&max_allocatable)) {
1460     // The AggressiveHeap check is a temporary workaround to avoid calling
1461     // GCarguments::heap_virtual_to_physical_ratio() before a GC has been
1462     // selected. This works because AggressiveHeap implies UseParallelGC
1463     // where we know the ratio will be 1. Once the AggressiveHeap option is
1464     // removed, this can be cleaned up.
1465     size_t heap_virtual_to_physical_ratio = (AggressiveHeap ? 1 : GCConfig::arguments()->heap_virtual_to_physical_ratio());
1466     size_t fraction = MaxVirtMemFraction * heap_virtual_to_physical_ratio;
1467     result = MIN2(result, max_allocatable / fraction);

1807       }
1808     }
1809   }
1810 #endif
1811 
1812 #if INCLUDE_JFR
1813   if (status && (FlightRecorderOptions || StartFlightRecording)) {
1814     if (!create_numbered_module_property("jdk.module.addmods", "jdk.jfr", addmods_count++)) {
1815       return false;
1816     }
1817   }
1818 #endif
1819 
1820 #ifndef SUPPORT_RESERVED_STACK_AREA
1821   if (StackReservedPages != 0) {
1822     FLAG_SET_CMDLINE(StackReservedPages, 0);
1823     warning("Reserved Stack Area not supported on this platform");
1824   }
1825 #endif
1826 
1827 #if !defined(X86) && !defined(AARCH64)
1828   if (LockingMode == LM_LIGHTWEIGHT) {
1829     FLAG_SET_CMDLINE(LockingMode, LM_LEGACY);
1830     warning("New lightweight locking not supported on this platform");
1831   }
1832 #endif
1833 
1834 #if !defined(X86) && !defined(AARCH64) && !defined(PPC64) && !defined(RISCV64) && !defined(S390)
1835   if (LockingMode == LM_MONITOR) {
1836     jio_fprintf(defaultStream::error_stream(),
1837                 "LockingMode == 0 (LM_MONITOR) is not fully implemented on this architecture\n");
1838     return false;
1839   }
1840 #endif
1841 #if defined(X86) && !defined(ZERO)
1842   if (LockingMode == LM_MONITOR && UseRTMForStackLocks) {
1843     jio_fprintf(defaultStream::error_stream(),
1844                 "LockingMode == 0 (LM_MONITOR) and -XX:+UseRTMForStackLocks are mutually exclusive\n");
1845 
1846     return false;
1847   }

2934   UNSUPPORTED_OPTION(ProfileInterpreter);
2935 #endif
2936 
2937   // Parse the CompilationMode flag
2938   if (!CompilationModeFlag::initialize()) {
2939     return JNI_ERR;
2940   }
2941 
2942   if (!check_vm_args_consistency()) {
2943     return JNI_ERR;
2944   }
2945 
2946   if (!CDSConfig::check_vm_args_consistency(patch_mod_javabase, mode_flag_cmd_line)) {
2947     return JNI_ERR;
2948   }
2949 
2950 #ifndef CAN_SHOW_REGISTERS_ON_ASSERT
2951   UNSUPPORTED_OPTION(ShowRegistersOnAssert);
2952 #endif // CAN_SHOW_REGISTERS_ON_ASSERT
2953 
2954 #ifdef _LP64
2955   if (UseCompactObjectHeaders && FLAG_IS_CMDLINE(UseCompressedClassPointers) && !UseCompressedClassPointers) {
2956     warning("Compact object headers require compressed class pointers. Disabling compact object headers.");
2957     FLAG_SET_DEFAULT(UseCompactObjectHeaders, false);
2958   }
2959   if (UseCompactObjectHeaders && LockingMode != LM_LIGHTWEIGHT) {
2960     FLAG_SET_DEFAULT(LockingMode, LM_LIGHTWEIGHT);
2961   }
2962   if (UseCompactObjectHeaders && !UseCompressedClassPointers) {
2963     FLAG_SET_DEFAULT(UseCompressedClassPointers, true);
2964   }
2965 #endif
2966 
2967   return JNI_OK;
2968 }
2969 
2970 // Helper class for controlling the lifetime of JavaVMInitArgs
2971 // objects.  The contents of the JavaVMInitArgs are guaranteed to be
2972 // deleted on the destruction of the ScopedVMInitArgs object.
2973 class ScopedVMInitArgs : public StackObj {
2974  private:
2975   JavaVMInitArgs _args;
2976   char*          _container_name;
2977   bool           _is_set;
2978   char*          _vm_options_file_arg;
2979 
2980  public:
2981   ScopedVMInitArgs(const char *container_name) {
2982     _args.version = JNI_VERSION_1_2;
2983     _args.nOptions = 0;
2984     _args.options = nullptr;
2985     _args.ignoreUnrecognized = false;
2986     _container_name = (char *)container_name;

3651   apply_debugger_ergo();
3652 
3653   if (log_is_enabled(Info, arguments)) {
3654     LogStream st(Log(arguments)::info());
3655     Arguments::print_on(&st);
3656   }
3657 
3658   return JNI_OK;
3659 }
3660 
3661 jint Arguments::apply_ergo() {
3662   // Set flags based on ergonomics.
3663   jint result = set_ergonomics_flags();
3664   if (result != JNI_OK) return result;
3665 
3666   // Set heap size based on available physical memory
3667   set_heap_size();
3668 
3669   GCConfig::arguments()->initialize();
3670 
3671   if (UseCompressedClassPointers) {
3672     CompressedKlassPointers::pre_initialize();
3673   }
3674 
3675   CDSConfig::initialize();
3676 
3677   // Initialize Metaspace flags and alignments
3678   Metaspace::ergo_initialize();
3679 
3680   if (!StringDedup::ergo_initialize()) {
3681     return JNI_EINVAL;
3682   }
3683 
3684   // Set compiler flags after GC is selected and GC specific
3685   // flags (LoopStripMiningIter) are set.
3686   CompilerConfig::ergo_initialize();
3687 
3688   // Set bytecode rewriting flags
3689   set_bytecode_flags();
3690 
3691   // Set flags if aggressive optimization flags are enabled
3692   jint code = set_aggressive_opts_flags();
3693   if (code != JNI_OK) {
3694     return code;
< prev index next >