1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #include "precompiled.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "gc/g1/g1Arguments.hpp"
 29 #include "gc/g1/g1CardSet.hpp"
 30 #include "gc/g1/g1CardSetContainers.inline.hpp"
 31 #include "gc/g1/g1CollectedHeap.inline.hpp"
 32 #include "gc/g1/g1HeapRegion.hpp"
 33 #include "gc/g1/g1HeapRegionBounds.inline.hpp"
 34 #include "gc/g1/g1HeapRegionRemSet.hpp"
 35 #include "gc/g1/g1HeapVerifier.hpp"
 36 #include "gc/shared/cardTable.hpp"
 37 #include "gc/shared/fullGCForwarding.hpp"
 38 #include "gc/shared/gcArguments.hpp"
 39 #include "gc/shared/workerPolicy.hpp"
 40 #include "runtime/globals.hpp"
 41 #include "runtime/globals_extension.hpp"
 42 #include "runtime/java.hpp"
 43 
 44 static size_t calculate_heap_alignment(size_t space_alignment) {
 45   size_t card_table_alignment = CardTable::ct_max_alignment_constraint();
 46   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 47   return MAX3(card_table_alignment, space_alignment, page_size);
 48 }
 49 
 50 void G1Arguments::initialize_alignments() {
 51   // Initialize card size before initializing alignments
 52   CardTable::initialize_card_size();
 53 
 54   // Set up the region size and associated fields.
 55   //
 56   // There is a circular dependency here. We base the region size on the heap
 57   // size, but the heap size should be aligned with the region size. To get
 58   // around this we use the unaligned values for the heap.
 59   G1HeapRegion::setup_heap_region_size(MaxHeapSize);
 60 
 61   SpaceAlignment = G1HeapRegion::GrainBytes;
 62   HeapAlignment = calculate_heap_alignment(SpaceAlignment);
 63 
 64   // We need to initialize card set configuration as soon as heap region size is
 65   // known as it depends on it and is used really early.
 66   initialize_card_set_configuration();
 67   // Needs remembered set initialization as the ergonomics are based
 68   // on it.
 69   if (FLAG_IS_DEFAULT(G1EagerReclaimRemSetThreshold)) {
 70     FLAG_SET_ERGO(G1EagerReclaimRemSetThreshold, G1RemSetArrayOfCardsEntries);
 71   }
 72 }
 73 
 74 size_t G1Arguments::conservative_max_heap_alignment() {
 75   return G1HeapRegion::max_region_size();
 76 }
 77 
 78 void G1Arguments::initialize_verification_types() {
 79   if (strlen(VerifyGCType) > 0) {
 80     const char delimiter[] = " ,\n";
 81     size_t length = strlen(VerifyGCType);
 82     char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
 83     strncpy(type_list, VerifyGCType, length + 1);
 84     char* save_ptr;
 85 
 86     char* token = strtok_r(type_list, delimiter, &save_ptr);
 87     while (token != nullptr) {
 88       parse_verification_type(token);
 89       token = strtok_r(nullptr, delimiter, &save_ptr);
 90     }
 91     FREE_C_HEAP_ARRAY(char, type_list);
 92   }
 93 }
 94 
 95 void G1Arguments::parse_verification_type(const char* type) {
 96   if (strcmp(type, "young-normal") == 0) {
 97     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungNormal);
 98   } else if (strcmp(type, "concurrent-start") == 0) {
 99     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyConcurrentStart);
100   } else if (strcmp(type, "mixed") == 0) {
101     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
102   } else if (strcmp(type, "young-evac-fail") == 0) {
103     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungEvacFail);
104   } else if (strcmp(type, "remark") == 0) {
105     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
106   } else if (strcmp(type, "cleanup") == 0) {
107     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
108   } else if (strcmp(type, "full") == 0) {
109     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
110   } else {
111     log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
112                             "young-normal, young-evac-fail, concurrent-start, mixed, remark, cleanup and full", type);
113   }
114 }
115 
116 // Returns the maximum number of workers to be used in a concurrent
117 // phase based on the number of GC workers being used in a STW
118 // phase.
119 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
120   return MAX2((num_gc_workers + 2) / 4, 1U);
121 }
122 
123 void G1Arguments::initialize_mark_stack_size() {
124   if (FLAG_IS_DEFAULT(MarkStackSize)) {
125     size_t mark_stack_size = MIN2(MarkStackSizeMax,
126                                   MAX2(MarkStackSize, (size_t)ConcGCThreads * TASKQUEUE_SIZE));
127     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
128   }
129 }
130 
131 void G1Arguments::initialize_card_set_configuration() {
132   assert(G1HeapRegion::LogOfHRGrainBytes != 0, "not initialized");
133   // Array of Cards card set container globals.
134   const uint LOG_M = 20;
135   assert(log2i_exact(G1HeapRegionBounds::min_size()) == LOG_M, "inv");
136   assert(G1HeapRegion::LogOfHRGrainBytes >= LOG_M, "from the above");
137   uint region_size_log_mb = G1HeapRegion::LogOfHRGrainBytes - LOG_M;
138 
139   if (FLAG_IS_DEFAULT(G1RemSetArrayOfCardsEntries)) {
140     uint max_cards_in_inline_ptr = G1CardSetConfiguration::max_cards_in_inline_ptr(G1HeapRegion::LogCardsPerRegion);
141     FLAG_SET_ERGO(G1RemSetArrayOfCardsEntries, MAX2(max_cards_in_inline_ptr * 2,
142                                                     G1RemSetArrayOfCardsEntriesBase << region_size_log_mb));
143   }
144 
145   // Howl card set container globals.
146   if (FLAG_IS_DEFAULT(G1RemSetHowlNumBuckets)) {
147     FLAG_SET_ERGO(G1RemSetHowlNumBuckets, G1CardSetHowl::num_buckets(G1HeapRegion::CardsPerRegion,
148                                                                      G1RemSetArrayOfCardsEntries,
149                                                                      G1RemSetHowlMaxNumBuckets));
150   }
151 
152   if (FLAG_IS_DEFAULT(G1RemSetHowlMaxNumBuckets)) {
153     FLAG_SET_ERGO(G1RemSetHowlMaxNumBuckets, MAX2(G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets));
154   } else if (G1RemSetHowlMaxNumBuckets < G1RemSetHowlNumBuckets) {
155     FormatBuffer<> buf("Maximum Howl card set container bucket size %u smaller than requested bucket size %u",
156                        G1RemSetHowlMaxNumBuckets, G1RemSetHowlNumBuckets);
157     vm_exit_during_initialization(buf);
158   }
159 }
160 
161 void G1Arguments::initialize() {
162   GCArguments::initialize();
163   assert(UseG1GC, "Error");
164   FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
165   if (ParallelGCThreads == 0) {
166     assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
167     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", nullptr);
168   }
169 
170   // When dumping the CDS heap we want to reduce fragmentation by
171   // triggering a full collection. To get as low fragmentation as
172   // possible we only use one worker thread.
173   if (CDSConfig::is_dumping_heap()) {
174     FLAG_SET_ERGO(ParallelGCThreads, 1);
175   }
176 
177   if (!G1UseConcRefinement) {
178     if (!FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
179       log_warning(gc, ergo)("Ignoring -XX:G1ConcRefinementThreads "
180                             "because of -XX:-G1UseConcRefinement");
181     }
182     FLAG_SET_DEFAULT(G1ConcRefinementThreads, 0);
183   } else if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
184     FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
185   }
186 
187   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
188     // Calculate the number of concurrent worker threads by scaling
189     // the number of parallel GC threads.
190     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
191     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
192   }
193 
194   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
195     // In G1, we want the default GC overhead goal to be higher than
196     // it is for PS, or the heap might be expanded too aggressively.
197     // We set it here to ~8%.
198     FLAG_SET_DEFAULT(GCTimeRatio, 12);
199   }
200 
201   // Below, we might need to calculate the pause time interval based on
202   // the pause target. When we do so we are going to give G1 maximum
203   // flexibility and allow it to do pauses when it needs to. So, we'll
204   // arrange that the pause interval to be pause time target + 1 to
205   // ensure that a) the pause time target is maximized with respect to
206   // the pause interval and b) we maintain the invariant that pause
207   // time target < pause interval. If the user does not want this
208   // maximum flexibility, they will have to set the pause interval
209   // explicitly.
210 
211   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
212     // The default pause time target in G1 is 200ms
213     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
214   }
215 
216   // Then, if the interval parameter was not set, set it according to
217   // the pause time target (this will also deal with the case when the
218   // pause time target is the default value).
219   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
220     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
221   }
222 
223   if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
224     FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
225   }
226 
227 #ifdef COMPILER2
228   // Enable loop strip mining to offer better pause time guarantees
229   if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
230     FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
231     if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
232       FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
233     }
234   }
235 #endif
236 
237   initialize_mark_stack_size();
238   initialize_verification_types();
239 
240   // Verify that the maximum parallelism isn't too high to eventually overflow
241   // the refcount in G1CardSetContainer.
242   uint max_parallel_refinement_threads = G1ConcRefinementThreads + G1DirtyCardQueueSet::num_par_ids();
243   uint const divisor = 3;  // Safe divisor; we increment by 2 for each claim, but there is a small initial value.
244   if (max_parallel_refinement_threads > UINT_MAX / divisor) {
245     vm_exit_during_initialization("Too large parallelism for remembered sets.");
246   }
247 
248   FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
249 }
250 
251 CollectedHeap* G1Arguments::create_heap() {
252   return new G1CollectedHeap();
253 }
254 
255 size_t G1Arguments::heap_reserved_size_bytes() {
256   return MaxHeapSize;
257 }