1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "cds/cdsConfig.hpp"
 27 #include "gc/g1/g1Arguments.hpp"
 28 #include "gc/g1/g1CardSet.hpp"
 29 #include "gc/g1/g1CardSetContainers.inline.hpp"
 30 #include "gc/g1/g1CollectedHeap.inline.hpp"
 31 #include "gc/g1/g1HeapRegion.hpp"
 32 #include "gc/g1/g1HeapRegionBounds.inline.hpp"
 33 #include "gc/g1/g1HeapRegionRemSet.hpp"
 34 #include "gc/g1/g1HeapVerifier.hpp"
 35 #include "gc/shared/cardTable.hpp"
 36 #include "gc/shared/fullGCForwarding.hpp"
 37 #include "gc/shared/gcArguments.hpp"
 38 #include "gc/shared/workerPolicy.hpp"
 39 #include "runtime/globals.hpp"
 40 #include "runtime/globals_extension.hpp"
 41 #include "runtime/java.hpp"
 42 
 43 static size_t calculate_heap_alignment(size_t space_alignment) {
 44   size_t card_table_alignment = CardTable::ct_max_alignment_constraint();
 45   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 46   return MAX3(card_table_alignment, space_alignment, page_size);
 47 }
 48 
 49 void G1Arguments::initialize_alignments() {
 50   // Initialize card size before initializing alignments
 51   CardTable::initialize_card_size();
 52 
 53   // Set up the region size and associated fields.
 54   //
 55   // There is a circular dependency here. We base the region size on the heap
 56   // size, but the heap size should be aligned with the region size. To get
 57   // around this we use the unaligned values for the heap.
 58   G1HeapRegion::setup_heap_region_size(MaxHeapSize);
 59 
 60   SpaceAlignment = G1HeapRegion::GrainBytes;
 61   HeapAlignment = calculate_heap_alignment(SpaceAlignment);
 62 
 63   // We need to initialize card set configuration as soon as heap region size is
 64   // known as it depends on it and is used really early.
 65   initialize_card_set_configuration();
 66   // Needs remembered set initialization as the ergonomics are based
 67   // on it.
 68   if (FLAG_IS_DEFAULT(G1EagerReclaimRemSetThreshold)) {
 69     FLAG_SET_ERGO(G1EagerReclaimRemSetThreshold, G1RemSetArrayOfCardsEntries);
 70   }
 71   // G1 prefers to use conditional card marking to avoid overwriting cards that
 72   // have already been found to contain a to-collection set reference. This reduces
 73   // refinement effort.
 74   if (FLAG_IS_DEFAULT(UseCondCardMark)) {
 75     FLAG_SET_ERGO(UseCondCardMark, true);
 76   }
 77 }
 78 
 79 size_t G1Arguments::conservative_max_heap_alignment() {
 80   if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
 81     return G1HeapRegion::max_ergonomics_size();
 82   }
 83   return G1HeapRegion::max_region_size();
 84 }
 85 
 86 void G1Arguments::initialize_verification_types() {
 87   if (strlen(VerifyGCType) > 0) {
 88     const char delimiter[] = " ,\n";
 89     size_t length = strlen(VerifyGCType);
 90     char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
 91     strncpy(type_list, VerifyGCType, length + 1);
 92     char* save_ptr;
 93 
 94     char* token = strtok_r(type_list, delimiter, &save_ptr);
 95     while (token != nullptr) {
 96       parse_verification_type(token);
 97       token = strtok_r(nullptr, delimiter, &save_ptr);
 98     }
 99     FREE_C_HEAP_ARRAY(char, type_list);
100   }
101 }
102 
103 void G1Arguments::parse_verification_type(const char* type) {
104   if (strcmp(type, "young-normal") == 0) {
105     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungNormal);
106   } else if (strcmp(type, "concurrent-start") == 0) {
107     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyConcurrentStart);
108   } else if (strcmp(type, "mixed") == 0) {
109     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
110   } else if (strcmp(type, "young-evac-fail") == 0) {
111     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungEvacFail);
112   } else if (strcmp(type, "remark") == 0) {
113     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
114   } else if (strcmp(type, "cleanup") == 0) {
115     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
116   } else if (strcmp(type, "full") == 0) {
117     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
118   } else {
119     log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
120                             "young-normal, young-evac-fail, concurrent-start, mixed, remark, cleanup and full", type);
121   }
122 }
123 
124 // Returns the maximum number of workers to be used in a concurrent
125 // phase based on the number of GC workers being used in a STW
126 // phase.
127 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
128   return MAX2((num_gc_workers + 2) / 4, 1U);
129 }
130 
131 void G1Arguments::initialize_mark_stack_size() {
132   if (FLAG_IS_DEFAULT(MarkStackSize)) {
133     size_t mark_stack_size = MIN2(MarkStackSizeMax,
134                                   MAX2(MarkStackSize, (size_t)ConcGCThreads * TASKQUEUE_SIZE));
135     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
136   }
137 }
138 
139 void G1Arguments::initialize_card_set_configuration() {
140   assert(G1HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
141   // Array of Cards card set container globals.
142   const uint LOG_M = 20;
143   assert(log2i_exact(G1HeapRegionBounds::min_size()) == LOG_M, "inv");
144   assert(G1HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
145   uint region_size_log_mb = G1HeapRegion::LogOfHRGrainBytes - LOG_M;
146 
147   if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) {
148     uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion);
149     FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2,
150                                                     G1RemSetArrayOfCardsEntriesBase << region_size_log_mb));
151   }
152 
153   // Howl card set container globals.
154   if (FLAG_IS_DEFAULT(G1RemSetHowlNumBuckets)) {
155     FLAG_SET_ERGO(G1RemSetHowlNumBuckets, G1CardSetHowl::num_buckets(G1HeapRegion::CardsPerRegion,
156                                                                      G1RemSetArrayOfCardsEntries,
157                                                                      G1RemSetHowlMaxNumBuckets));
158   }
159 
160   if (FLAG_IS_DEFAULT(G1RemSetHowlMaxNumBuckets)) {
161     FLAG_SET_ERGO(G1RemSetHowlMaxNumBuckets, MAX2(G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets));
162   } else if (G1RemSetHowlMaxNumBuckets < G1RemSetHowlNumBuckets) {
163     FormatBuffer<> buf("Maximum Howl card set container bucket size %u smaller than requested bucket size %u",
164                        G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets);
165     vm_exit_during_initialization(buf);
166   }
167 }
168 
169 void G1Arguments::initialize() {
170   GCArguments::initialize();
171   assert(UseG1GC, "Error");
172   FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
173   if (ParallelGCThreads == 0) {
174     assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
175     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", nullptr);
176   }
177 
178   // When dumping the CDS heap we want to reduce fragmentation by
179   // triggering a full collection. To get as low fragmentation as
180   // possible we only use one worker thread.
181   if (CDSConfig::is_dumping_heap()) {
182     FLAG_SET_ERGO(ParallelGCThreads, 1);
183   }
184 
185   if (!G1UseConcRefinement) {
186     if (!FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
187       log_warning(gc, ergo)("Ignoring -XX:G1ConcRefinementThreads "
188                             "because of -XX:-G1UseConcRefinement");
189     }
190     FLAG_SET_DEFAULT(G1ConcRefinementThreads, 0);
191   } else if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
192     FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
193   }
194 
195   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
196     // Calculate the number of concurrent worker threads by scaling
197     // the number of parallel GC threads.
198     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
199     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
200   }
201 
202   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
203     // In G1, we want the default GC overhead goal to be higher than
204     // it is for PS, or the heap might be expanded too aggressively.
205     // We set it here to 4%.
206     FLAG_SET_DEFAULT(GCTimeRatio, 24);
207   }
208 
209   // Below, we might need to calculate the pause time interval based on
210   // the pause target. When we do so we are going to give G1 maximum
211   // flexibility and allow it to do pauses when it needs to. So, we'll
212   // arrange that the pause interval to be pause time target + 1 to
213   // ensure that a) the pause time target is maximized with respect to
214   // the pause interval and b) we maintain the invariant that pause
215   // time target < pause interval. If the user does not want this
216   // maximum flexibility, they will have to set the pause interval
217   // explicitly.
218 
219   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
220     // The default pause time target in G1 is 200ms
221     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
222   }
223 
224   // Then, if the interval parameter was not set, set it according to
225   // the pause time target (this will also deal with the case when the
226   // pause time target is the default value).
227   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
228     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
229   }
230 
231   if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
232     FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
233   }
234 
235 #ifdef COMPILER2
236   // Enable loop strip mining to offer better pause time guarantees
237   if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
238     FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
239     if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
240       FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
241     }
242   }
243 #endif
244 
245   initialize_mark_stack_size();
246   initialize_verification_types();
247 
248   // Verify that the maximum parallelism isn't too high to eventually overflow
249   // the refcount in G1CardSetContainer.
250   uint const divisor = 3;  // Safe divisor; we increment by 2 for each claim, but there is a small initial value.
251   if (G1ConcRefinementThreads > UINT_MAX / divisor) {
252     vm_exit_during_initialization("Too large parallelism for remembered sets.");
253   }
254 
255   FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
256 }
257 
258 CollectedHeap* G1Arguments::create_heap() {
259   return new G1CollectedHeap();
260 }
261 
262 size_t G1Arguments::heap_reserved_size_bytes() {
263   return MaxHeapSize;
264 }