1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "cds/cdsConfig.hpp"
 27 #include "gc/g1/g1Arguments.hpp"
 28 #include "gc/g1/g1CardSet.hpp"
 29 #include "gc/g1/g1CardSetContainers.inline.hpp"
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1HeapRegion.hpp"
 32 #include "gc/g1/g1HeapRegionBounds.inline.hpp"
 33 #include "gc/g1/g1HeapRegionRemSet.hpp"
 34 #include "gc/g1/g1HeapVerifier.hpp"
 35 #include "gc/shared/cardTable.hpp"
 36 #include "gc/shared/gcArguments.hpp"
 37 #include "gc/shared/workerPolicy.hpp"
 38 #include "runtime/globals.hpp"
 39 #include "runtime/globals_extension.hpp"
 40 #include "runtime/java.hpp"
 41 
 42 static size_t calculate_heap_alignment(size_t space_alignment) {
 43   size_t card_table_alignment = CardTable::ct_max_alignment_constraint();
 44   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 45   return MAX3(card_table_alignment, space_alignment, page_size);
 46 }
 47 
 48 void G1Arguments::initialize_alignments() {
 49   // Initialize card size before initializing alignments
 50   CardTable::initialize_card_size();
 51 
 52   // Set up the region size and associated fields.
 53   //
 54   // There is a circular dependency here. We base the region size on the heap
 55   // size, but the heap size should be aligned with the region size. To get
 56   // around this we use the unaligned values for the heap.
 57   G1HeapRegion::setup_heap_region_size(MaxHeapSize);
 58 
 59   SpaceAlignment = G1HeapRegion::GrainBytes;
 60   HeapAlignment = calculate_heap_alignment(SpaceAlignment);
 61 
 62   // We need to initialize card set configuration as soon as heap region size is
 63   // known as it depends on it and is used really early.
 64   initialize_card_set_configuration();
 65   // Needs remembered set initialization as the ergonomics are based
 66   // on it.
 67   if (FLAG_IS_DEFAULT(G1EagerReclaimRemSetThreshold)) {
 68     FLAG_SET_ERGO(G1EagerReclaimRemSetThreshold, G1RemSetArrayOfCardsEntries);
 69   }
 70 }
 71 
 72 size_t G1Arguments::conservative_max_heap_alignment() {
 73   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
 74     return G1HeapRegion::max_ergonomics_size();
 75   }
 76   return G1HeapRegion::max_region_size();
 77 }
 78 
 79 void G1Arguments::initialize_verification_types() {
 80   if (strlen(VerifyGCType) > 0) {
 81     const char delimiter[] = " ,\n";
 82     size_t length = strlen(VerifyGCType);
 83     char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
 84     strncpy(type_list, VerifyGCType, length + 1);
 85     char* save_ptr;
 86 
 87     char* token = strtok_r(type_list, delimiter, &save_ptr);
 88     while (token != nullptr) {
 89       parse_verification_type(token);
 90       token = strtok_r(nullptr, delimiter, &save_ptr);
 91     }
 92     FREE_C_HEAP_ARRAY(char, type_list);
 93   }
 94 }
 95 
 96 void G1Arguments::parse_verification_type(const char* type) {
 97   if (strcmp(type, "young-normal") == 0) {
 98     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungNormal);
 99   } else if (strcmp(type, "concurrent-start") == 0) {
100     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyConcurrentStart);
101   } else if (strcmp(type, "mixed") == 0) {
102     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
103   } else if (strcmp(type, "young-evac-fail") == 0) {
104     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungEvacFail);
105   } else if (strcmp(type, "remark") == 0) {
106     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
107   } else if (strcmp(type, "cleanup") == 0) {
108     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
109   } else if (strcmp(type, "full") == 0) {
110     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
111   } else {
112     log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
113                             "young-normal, young-evac-fail, concurrent-start, mixed, remark, cleanup and full", type);
114   }
115 }
116 
117 // Returns the maximum number of workers to be used in a concurrent
118 // phase based on the number of GC workers being used in a STW
119 // phase.
120 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
121   return MAX2((num_gc_workers + 2) / 4, 1U);
122 }
123 
124 void G1Arguments::initialize_mark_stack_size() {
125   if (FLAG_IS_DEFAULT(MarkStackSize)) {
126     size_t mark_stack_size = MIN2(MarkStackSizeMax,
127                                   MAX2(MarkStackSize, (size_t)ConcGCThreads * TASKQUEUE_SIZE));
128     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
129   }
130 }
131 
132 void G1Arguments::initialize_card_set_configuration() {
133   assert(G1HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
134   // Array of Cards card set container globals.
135   const uint LOG_M = 20;
136   assert(log2i_exact(G1HeapRegionBounds::min_size()) == LOG_M, "inv");
137   assert(G1HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
138   uint region_size_log_mb = G1HeapRegion::LogOfHRGrainBytes - LOG_M;
139 
140   if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) {
141     uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion);
142     FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2,
143                                                     G1RemSetArrayOfCardsEntriesBase << region_size_log_mb));
144   }
145 
146   // Howl card set container globals.
147   if (FLAG_IS_DEFAULT(G1RemSetHowlNumBuckets)) {
148     FLAG_SET_ERGO(G1RemSetHowlNumBuckets, G1CardSetHowl::num_buckets(G1HeapRegion::CardsPerRegion,
149                                                                      G1RemSetArrayOfCardsEntries,
150                                                                      G1RemSetHowlMaxNumBuckets));
151   }
152 
153   if (FLAG_IS_DEFAULT(G1RemSetHowlMaxNumBuckets)) {
154     FLAG_SET_ERGO(G1RemSetHowlMaxNumBuckets, MAX2(G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets));
155   } else if (G1RemSetHowlMaxNumBuckets < G1RemSetHowlNumBuckets) {
156     FormatBuffer<> buf("Maximum Howl card set container bucket size %u smaller than requested bucket size %u",
157                        G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets);
158     vm_exit_during_initialization(buf);
159   }
160 }
161 
162 void G1Arguments::initialize() {
163   GCArguments::initialize();
164   assert(UseG1GC, "Error");
165   FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
166   if (ParallelGCThreads == 0) {
167     assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
168     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", nullptr);
169   }
170 
171   // When dumping the CDS heap we want to reduce fragmentation by
172   // triggering a full collection. To get as low fragmentation as
173   // possible we only use one worker thread.
174   if (CDSConfig::is_dumping_heap()) {
175     FLAG_SET_ERGO(ParallelGCThreads, 1);
176   }
177 
178   if (!G1UseConcRefinement) {
179     if (!FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
180       log_warning(gc, ergo)("Ignoring -XX:G1ConcRefinementThreads "
181                             "because of -XX:-G1UseConcRefinement");
182     }
183     FLAG_SET_DEFAULT(G1ConcRefinementThreads, 0);
184   } else if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
185     FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
186   }
187 
188   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
189     // Calculate the number of concurrent worker threads by scaling
190     // the number of parallel GC threads.
191     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
192     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
193   }
194 
195   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
196     // In G1, we want the default GC overhead goal to be higher than
197     // it is for PS, or the heap might be expanded too aggressively.
198     // We set it here to ~8%.
199     FLAG_SET_DEFAULT(GCTimeRatio, 12);
200   }
201 
202   // Below, we might need to calculate the pause time interval based on
203   // the pause target. When we do so we are going to give G1 maximum
204   // flexibility and allow it to do pauses when it needs to. So, we'll
205   // arrange that the pause interval to be pause time target + 1 to
206   // ensure that a) the pause time target is maximized with respect to
207   // the pause interval and b) we maintain the invariant that pause
208   // time target < pause interval. If the user does not want this
209   // maximum flexibility, they will have to set the pause interval
210   // explicitly.
211 
212   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
213     // The default pause time target in G1 is 200ms
214     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
215   }
216 
217   // Then, if the interval parameter was not set, set it according to
218   // the pause time target (this will also deal with the case when the
219   // pause time target is the default value).
220   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
221     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
222   }
223 
224   if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
225     FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
226   }
227 
228 #ifdef COMPILER2
229   // Enable loop strip mining to offer better pause time guarantees
230   if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
231     FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
232     if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
233       FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
234     }
235   }
236 #endif
237 
238   initialize_mark_stack_size();
239   initialize_verification_types();
240 
241   // Verify that the maximum parallelism isn't too high to eventually overflow
242   // the refcount in G1CardSetContainer.
243   uint max_parallel_refinement_threads = G1ConcRefinementThreads + G1DirtyCardQueueSet::num_par_ids();
244   uint const divisor = 3;  // Safe divisor; we increment by 2 for each claim, but there is a small initial value.
245   if (max_parallel_refinement_threads > UINT_MAX / divisor) {
246     vm_exit_during_initialization("Too large parallelism for remembered sets.");
247   }
248 }
249 
250 CollectedHeap* G1Arguments::create_heap() {
251   return new G1CollectedHeap();
252 }
253 
254 size_t G1Arguments::heap_reserved_size_bytes() {
255   return MaxHeapSize;
256 }