1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "cds/cdsConfig.hpp"
 27 #include "gc/g1/g1Arguments.hpp"
 28 #include "gc/g1/g1CardSet.hpp"
 29 #include "gc/g1/g1CardSetContainers.inline.hpp"
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1HeapRegion.hpp"
 32 #include "gc/g1/g1HeapRegionBounds.inline.hpp"
 33 #include "gc/g1/g1HeapRegionRemSet.hpp"
 34 #include "gc/g1/g1HeapVerifier.hpp"
 35 #include "gc/shared/cardTable.hpp"
 36 #include "gc/shared/gcArguments.hpp"
 37 #include "gc/shared/workerPolicy.hpp"
 38 #include "runtime/globals.hpp"
 39 #include "runtime/globals_extension.hpp"
 40 #include "runtime/java.hpp"
 41 
 42 static size_t calculate_heap_alignment(size_t space_alignment) {
 43   size_t card_table_alignment = CardTable::ct_max_alignment_constraint();
 44   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 45   return MAX3(card_table_alignment, space_alignment, page_size);
 46 }
 47 
 48 void G1Arguments::initialize_alignments() {
 49   // Initialize card size before initializing alignments
 50   CardTable::initialize_card_size();
 51 
 52   // Set up the region size and associated fields.
 53   //
 54   // There is a circular dependency here. We base the region size on the heap
 55   // size, but the heap size should be aligned with the region size. To get
 56   // around this we use the unaligned values for the heap.
 57   G1HeapRegion::setup_heap_region_size(MaxHeapSize);
 58 
 59   SpaceAlignment = G1HeapRegion::GrainBytes;
 60   HeapAlignment = calculate_heap_alignment(SpaceAlignment);
 61 
 62   // We need to initialize card set configuration as soon as heap region size is
 63   // known as it depends on it and is used really early.
 64   initialize_card_set_configuration();
 65   // Needs remembered set initialization as the ergonomics are based
 66   // on it.
 67   if (FLAG_IS_DEFAULT(G1EagerReclaimRemSetThreshold)) {
 68     FLAG_SET_ERGO(G1EagerReclaimRemSetThreshold, G1RemSetArrayOfCardsEntries);
 69   }
 70 }
 71 
 72 size_t G1Arguments::conservative_max_heap_alignment() {
 73   return G1HeapRegion::max_region_size();
 74 }
 75 
 76 void G1Arguments::initialize_verification_types() {
 77   if (strlen(VerifyGCType) > 0) {
 78     const char delimiter[] = " ,\n";
 79     size_t length = strlen(VerifyGCType);
 80     char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
 81     strncpy(type_list, VerifyGCType, length + 1);
 82     char* save_ptr;
 83 
 84     char* token = strtok_r(type_list, delimiter, &save_ptr);
 85     while (token != nullptr) {
 86       parse_verification_type(token);
 87       token = strtok_r(nullptr, delimiter, &save_ptr);
 88     }
 89     FREE_C_HEAP_ARRAY(char, type_list);
 90   }
 91 }
 92 
 93 void G1Arguments::parse_verification_type(const char* type) {
 94   if (strcmp(type, "young-normal") == 0) {
 95     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungNormal);
 96   } else if (strcmp(type, "concurrent-start") == 0) {
 97     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyConcurrentStart);
 98   } else if (strcmp(type, "mixed") == 0) {
 99     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
100   } else if (strcmp(type, "young-evac-fail") == 0) {
101     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungEvacFail);
102   } else if (strcmp(type, "remark") == 0) {
103     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
104   } else if (strcmp(type, "cleanup") == 0) {
105     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
106   } else if (strcmp(type, "full") == 0) {
107     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
108   } else {
109     log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
110                             "young-normal, young-evac-fail, concurrent-start, mixed, remark, cleanup and full", type);
111   }
112 }
113 
114 // Returns the maximum number of workers to be used in a concurrent
115 // phase based on the number of GC workers being used in a STW
116 // phase.
117 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
118   return MAX2((num_gc_workers + 2) / 4, 1U);
119 }
120 
121 void G1Arguments::initialize_mark_stack_size() {
122   if (FLAG_IS_DEFAULT(MarkStackSize)) {
123     size_t mark_stack_size = MIN2(MarkStackSizeMax,
124                                   MAX2(MarkStackSize, (size_t)ConcGCThreads * TASKQUEUE_SIZE));
125     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
126   }
127 }
128 
129 void G1Arguments::initialize_card_set_configuration() {
130   assert(G1HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
131   // Array of Cards card set container globals.
132   const uint LOG_M = 20;
133   assert(log2i_exact(G1HeapRegionBounds::min_size()) == LOG_M, "inv");
134   assert(G1HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
135   uint region_size_log_mb = G1HeapRegion::LogOfHRGrainBytes - LOG_M;
136 
137   if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) {
138     uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion);
139     FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2,
140                                                     G1RemSetArrayOfCardsEntriesBase << region_size_log_mb));
141   }
142 
143   // Howl card set container globals.
144   if (FLAG_IS_DEFAULT(G1RemSetHowlNumBuckets)) {
145     FLAG_SET_ERGO(G1RemSetHowlNumBuckets, G1CardSetHowl::num_buckets(G1HeapRegion::CardsPerRegion,
146                                                                      G1RemSetArrayOfCardsEntries,
147                                                                      G1RemSetHowlMaxNumBuckets));
148   }
149 
150   if (FLAG_IS_DEFAULT(G1RemSetHowlMaxNumBuckets)) {
151     FLAG_SET_ERGO(G1RemSetHowlMaxNumBuckets, MAX2(G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets));
152   } else if (G1RemSetHowlMaxNumBuckets < G1RemSetHowlNumBuckets) {
153     FormatBuffer<> buf("Maximum Howl card set container bucket size %u smaller than requested bucket size %u",
154                        G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets);
155     vm_exit_during_initialization(buf);
156   }
157 }
158 
159 void G1Arguments::initialize() {
160   GCArguments::initialize();
161   assert(UseG1GC, "Error");
162   FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
163   if (ParallelGCThreads == 0) {
164     assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
165     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", nullptr);
166   }
167 
168   // When dumping the CDS heap we want to reduce fragmentation by
169   // triggering a full collection. To get as low fragmentation as
170   // possible we only use one worker thread.
171   if (CDSConfig::is_dumping_heap()) {
172     FLAG_SET_ERGO(ParallelGCThreads, 1);
173   }
174 
175   if (!G1UseConcRefinement) {
176     if (!FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
177       log_warning(gc, ergo)("Ignoring -XX:G1ConcRefinementThreads "
178                             "because of -XX:-G1UseConcRefinement");
179     }
180     FLAG_SET_DEFAULT(G1ConcRefinementThreads, 0);
181   } else if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
182     FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
183   }
184 
185   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
186     // Calculate the number of concurrent worker threads by scaling
187     // the number of parallel GC threads.
188     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
189     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
190   }
191 
192   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
193     // In G1, we want the default GC overhead goal to be higher than
194     // it is for PS, or the heap might be expanded too aggressively.
195     // We set it here to ~8%.
196     FLAG_SET_DEFAULT(GCTimeRatio, 12);
197   }
198 
199   // Below, we might need to calculate the pause time interval based on
200   // the pause target. When we do so we are going to give G1 maximum
201   // flexibility and allow it to do pauses when it needs to. So, we'll
202   // arrange that the pause interval to be pause time target + 1 to
203   // ensure that a) the pause time target is maximized with respect to
204   // the pause interval and b) we maintain the invariant that pause
205   // time target < pause interval. If the user does not want this
206   // maximum flexibility, they will have to set the pause interval
207   // explicitly.
208 
209   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
210     // The default pause time target in G1 is 200ms
211     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
212   }
213 
214   // Then, if the interval parameter was not set, set it according to
215   // the pause time target (this will also deal with the case when the
216   // pause time target is the default value).
217   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
218     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
219   }
220 
221   if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
222     FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
223   }
224 
225 #ifdef COMPILER2
226   // Enable loop strip mining to offer better pause time guarantees
227   if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
228     FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
229     if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
230       FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
231     }
232   }
233 #endif
234 
235   initialize_mark_stack_size();
236   initialize_verification_types();
237 
238   // Verify that the maximum parallelism isn't too high to eventually overflow
239   // the refcount in G1CardSetContainer.
240   uint max_parallel_refinement_threads = G1ConcRefinementThreads + G1DirtyCardQueueSet::num_par_ids();
241   uint const divisor = 3;  // Safe divisor; we increment by 2 for each claim, but there is a small initial value.
242   if (max_parallel_refinement_threads > UINT_MAX / divisor) {
243     vm_exit_during_initialization("Too large parallelism for remembered sets.");
244   }
245 }
246 
247 CollectedHeap* G1Arguments::create_heap() {
248   return new G1CollectedHeap();
249 }
250 
251 size_t G1Arguments::heap_reserved_size_bytes() {
252   return MaxHeapSize;
253 }