1 /*
2 * Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/metadataOnStackMark.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "compiler/oopMap.hpp"
30 #include "gc/g1/g1Allocator.inline.hpp"
31 #include "gc/g1/g1Arguments.hpp"
32 #include "gc/g1/g1BarrierSet.hpp"
33 #include "gc/g1/g1BatchedTask.hpp"
34 #include "gc/g1/g1CollectedHeap.inline.hpp"
35 #include "gc/g1/g1CollectionSet.hpp"
36 #include "gc/g1/g1CollectionSetCandidates.hpp"
37 #include "gc/g1/g1CollectorState.hpp"
38 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
39 #include "gc/g1/g1ConcurrentRefine.hpp"
40 #include "gc/g1/g1ConcurrentRefineThread.hpp"
41 #include "gc/g1/g1EvacStats.inline.hpp"
42 #include "gc/g1/g1FullCollector.hpp"
43 #include "gc/g1/g1GCCounters.hpp"
44 #include "gc/g1/g1GCParPhaseTimesTracker.hpp"
45 #include "gc/g1/g1GCPauseType.hpp"
46 #include "gc/g1/g1GCPhaseTimes.hpp"
47 #include "gc/g1/g1HeapRegion.inline.hpp"
48 #include "gc/g1/g1HeapRegionPrinter.hpp"
49 #include "gc/g1/g1HeapRegionRemSet.inline.hpp"
50 #include "gc/g1/g1HeapRegionSet.inline.hpp"
51 #include "gc/g1/g1HeapSizingPolicy.hpp"
52 #include "gc/g1/g1HeapTransition.hpp"
53 #include "gc/g1/g1HeapVerifier.hpp"
54 #include "gc/g1/g1InitLogger.hpp"
55 #include "gc/g1/g1MemoryPool.hpp"
56 #include "gc/g1/g1MonotonicArenaFreeMemoryTask.hpp"
57 #include "gc/g1/g1OopClosures.inline.hpp"
58 #include "gc/g1/g1ParallelCleaning.hpp"
59 #include "gc/g1/g1ParScanThreadState.inline.hpp"
60 #include "gc/g1/g1PeriodicGCTask.hpp"
61 #include "gc/g1/g1Policy.hpp"
62 #include "gc/g1/g1RegionPinCache.inline.hpp"
63 #include "gc/g1/g1RegionToSpaceMapper.hpp"
64 #include "gc/g1/g1RemSet.hpp"
65 #include "gc/g1/g1ReviseYoungLengthTask.hpp"
66 #include "gc/g1/g1RootClosures.hpp"
67 #include "gc/g1/g1SATBMarkQueueSet.hpp"
68 #include "gc/g1/g1ServiceThread.hpp"
69 #include "gc/g1/g1ThreadLocalData.hpp"
70 #include "gc/g1/g1Trace.hpp"
71 #include "gc/g1/g1UncommitRegionTask.hpp"
72 #include "gc/g1/g1VMOperations.hpp"
73 #include "gc/g1/g1YoungCollector.hpp"
74 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
75 #include "gc/shared/barrierSetNMethod.hpp"
76 #include "gc/shared/classUnloadingContext.hpp"
77 #include "gc/shared/concurrentGCBreakpoints.hpp"
78 #include "gc/shared/fullGCForwarding.inline.hpp"
79 #include "gc/shared/gcBehaviours.hpp"
80 #include "gc/shared/gcHeapSummary.hpp"
81 #include "gc/shared/gcId.hpp"
82 #include "gc/shared/gcTimer.hpp"
83 #include "gc/shared/gcTraceTime.inline.hpp"
84 #include "gc/shared/isGCActiveMark.hpp"
85 #include "gc/shared/locationPrinter.inline.hpp"
86 #include "gc/shared/oopStorageParState.hpp"
87 #include "gc/shared/partialArrayState.hpp"
88 #include "gc/shared/referenceProcessor.inline.hpp"
89 #include "gc/shared/suspendibleThreadSet.hpp"
90 #include "gc/shared/taskqueue.inline.hpp"
91 #include "gc/shared/taskTerminator.hpp"
92 #include "gc/shared/tlab_globals.hpp"
93 #include "gc/shared/weakProcessor.inline.hpp"
94 #include "gc/shared/workerPolicy.hpp"
95 #include "logging/log.hpp"
96 #include "memory/allocation.hpp"
97 #include "memory/heapInspection.hpp"
98 #include "memory/iterator.hpp"
99 #include "memory/memoryReserver.hpp"
100 #include "memory/metaspaceUtils.hpp"
101 #include "memory/resourceArea.hpp"
102 #include "memory/universe.hpp"
103 #include "oops/access.inline.hpp"
104 #include "oops/compressedOops.inline.hpp"
105 #include "oops/oop.inline.hpp"
106 #include "runtime/cpuTimeCounters.hpp"
107 #include "runtime/handles.inline.hpp"
108 #include "runtime/init.hpp"
109 #include "runtime/java.hpp"
110 #include "runtime/orderAccess.hpp"
111 #include "runtime/threads.hpp"
112 #include "runtime/threadSMR.hpp"
113 #include "runtime/vmThread.hpp"
114 #include "utilities/align.hpp"
115 #include "utilities/autoRestore.hpp"
116 #include "utilities/bitMap.inline.hpp"
117 #include "utilities/globalDefinitions.hpp"
118 #include "utilities/stack.inline.hpp"
119
120 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
121
122 // INVARIANTS/NOTES
123 //
124 // All allocation activity covered by the G1CollectedHeap interface is
125 // serialized by acquiring the HeapLock. This happens in mem_allocate
126 // and allocate_new_tlab, which are the "entry" points to the
127 // allocation code from the rest of the JVM. (Note that this does not
128 // apply to TLAB allocation, which is not part of this interface: it
129 // is done by clients of this interface.)
130
131 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
132 G1HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
133 }
134
135 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
136 // The from card cache is not the memory that is actually committed. So we cannot
137 // take advantage of the zero_filled parameter.
138 reset_from_card_cache(start_idx, num_regions);
139 }
140
141 // Collects commonly used scoped objects that are related to initial setup.
142 class G1GCMark : StackObj {
143 ResourceMark _rm;
144 IsSTWGCActiveMark _active_gc_mark;
145 GCIdMark _gc_id_mark;
146 SvcGCMarker _sgcm;
147 GCTraceCPUTime _tcpu;
148
149 public:
150 G1GCMark(GCTracer* tracer, bool is_full_gc) :
151 _rm(),
152 _active_gc_mark(),
153 _gc_id_mark(),
154 _sgcm(is_full_gc ? SvcGCMarker::FULL : SvcGCMarker::MINOR),
155 _tcpu(tracer) {
156
157 assert_at_safepoint_on_vm_thread();
158 }
159 };
160
161 void G1CollectedHeap::run_batch_task(G1BatchedTask* cl) {
162 uint num_workers = MAX2(1u, MIN2(cl->num_workers_estimate(), workers()->active_workers()));
163 cl->set_max_workers(num_workers);
164 workers()->run_task(cl, num_workers);
165 }
166
167 uint G1CollectedHeap::get_chunks_per_region_for_scan() {
168 uint log_region_size = G1HeapRegion::LogOfHRGrainBytes;
169 // Limit the expected input values to current known possible values of the
170 // (log) region size. Adjust as necessary after testing if changing the permissible
171 // values for region size.
172 assert(log_region_size >= 20 && log_region_size <= 29,
173 "expected value in [20,29], but got %u", log_region_size);
174 return 1u << (log_region_size / 2 - 4);
175 }
176
177 uint G1CollectedHeap::get_chunks_per_region_for_merge() {
178 uint log_region_size = G1HeapRegion::LogOfHRGrainBytes;
179 // Limit the expected input values to current known possible values of the
180 // (log) region size. Adjust as necessary after testing if changing the permissible
181 // values for region size.
182 assert(log_region_size >= 20 && log_region_size <= 29,
183 "expected value in [20,29], but got %u", log_region_size);
184
185 uint half_log_region_size = (log_region_size + 1) / 2;
186 return 1 << (half_log_region_size - 9);
187 }
188
189 G1HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
190 MemRegion mr) {
191 return new G1HeapRegion(hrs_index, bot(), mr, &_card_set_config);
192 }
193
194 // Private methods.
195
196 G1HeapRegion* G1CollectedHeap::new_region(size_t word_size,
197 G1HeapRegionType type,
198 bool do_expand,
199 uint node_index) {
200 assert(!is_humongous(word_size) || word_size <= G1HeapRegion::GrainWords,
201 "the only time we use this to allocate a humongous region is "
202 "when we are allocating a single humongous region");
203
204 G1HeapRegion* res = _hrm.allocate_free_region(type, node_index);
205
206 if (res == nullptr && do_expand) {
207 // There are two situations where do_expand is set to true:
208 // - for mutator regions during initialization
209 // - for GC alloc regions during a safepoint
210 // Make sure we only reach here before initialization is complete
211 // or during a safepoint.
212 assert(!is_init_completed() ||
213 SafepointSynchronize::is_at_safepoint() , "invariant");
214
215 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: %zuB",
216 word_size * HeapWordSize);
217
218 assert(word_size * HeapWordSize < G1HeapRegion::GrainBytes,
219 "This kind of expansion should never be more than one region. Size: %zu",
220 word_size * HeapWordSize);
221 if (expand_single_region(node_index)) {
222 // Given that expand_single_region() succeeded in expanding the heap, and we
223 // always expand the heap by an amount aligned to the heap
224 // region size, the free list should in theory not be empty.
225 // In either case allocate_free_region() will check for null.
226 res = _hrm.allocate_free_region(type, node_index);
227 }
228 }
229 return res;
230 }
231
232 void G1CollectedHeap::set_humongous_metadata(G1HeapRegion* first_hr,
233 uint num_regions,
234 size_t word_size,
235 bool update_remsets) {
236 // Calculate the new top of the humongous object.
237 HeapWord* obj_top = first_hr->bottom() + word_size;
238 // The word size sum of all the regions used
239 size_t word_size_sum = num_regions * G1HeapRegion::GrainWords;
240 assert(word_size <= word_size_sum, "sanity");
241
242 // How many words memory we "waste" which cannot hold a filler object.
243 size_t words_not_fillable = 0;
244
245 // Pad out the unused tail of the last region with filler
246 // objects, for improved usage accounting.
247
248 // How many words can we use for filler objects.
249 size_t words_fillable = word_size_sum - word_size;
250
251 if (words_fillable >= G1CollectedHeap::min_fill_size()) {
252 G1CollectedHeap::fill_with_objects(obj_top, words_fillable);
253 } else {
254 // We have space to fill, but we cannot fit an object there.
255 words_not_fillable = words_fillable;
256 words_fillable = 0;
257 }
258
259 // We will set up the first region as "starts humongous". This
260 // will also update the BOT covering all the regions to reflect
261 // that there is a single object that starts at the bottom of the
262 // first region.
263 first_hr->hr_clear(false /* clear_space */);
264 first_hr->set_starts_humongous(obj_top, words_fillable);
265
266 if (update_remsets) {
267 _policy->remset_tracker()->update_at_allocate(first_hr);
268 }
269
270 // Indices of first and last regions in the series.
271 uint first = first_hr->hrm_index();
272 uint last = first + num_regions - 1;
273
274 G1HeapRegion* hr = nullptr;
275 for (uint i = first + 1; i <= last; ++i) {
276 hr = region_at(i);
277 hr->hr_clear(false /* clear_space */);
278 hr->set_continues_humongous(first_hr);
279 if (update_remsets) {
280 _policy->remset_tracker()->update_at_allocate(hr);
281 }
282 }
283
284 // Up to this point no concurrent thread would have been able to
285 // do any scanning on any region in this series. All the top
286 // fields still point to bottom, so the intersection between
287 // [bottom,top] and [card_start,card_end] will be empty. Before we
288 // update the top fields, we'll do a storestore to make sure that
289 // no thread sees the update to top before the zeroing of the
290 // object header and the BOT initialization.
291 OrderAccess::storestore();
292
293 // Now, we will update the top fields of the "continues humongous"
294 // regions except the last one.
295 for (uint i = first; i < last; ++i) {
296 hr = region_at(i);
297 hr->set_top(hr->end());
298 }
299
300 hr = region_at(last);
301 // If we cannot fit a filler object, we must set top to the end
302 // of the humongous object, otherwise we cannot iterate the heap
303 // and the BOT will not be complete.
304 hr->set_top(hr->end() - words_not_fillable);
305
306 assert(hr->bottom() < obj_top && obj_top <= hr->end(),
307 "obj_top should be in last region");
308
309 assert(words_not_fillable == 0 ||
310 first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
311 "Miscalculation in humongous allocation");
312 }
313
314 HeapWord*
315 G1CollectedHeap::humongous_obj_allocate_initialize_regions(G1HeapRegion* first_hr,
316 uint num_regions,
317 size_t word_size) {
318 assert(first_hr != nullptr, "pre-condition");
319 assert(is_humongous(word_size), "word_size should be humongous");
320 assert(num_regions * G1HeapRegion::GrainWords >= word_size, "pre-condition");
321
322 // Index of last region in the series.
323 uint first = first_hr->hrm_index();
324 uint last = first + num_regions - 1;
325
326 // We need to initialize the region(s) we just discovered. This is
327 // a bit tricky given that it can happen concurrently with
328 // refinement threads refining cards on these regions and
329 // potentially wanting to refine the BOT as they are scanning
330 // those cards (this can happen shortly after a cleanup; see CR
331 // 6991377). So we have to set up the region(s) carefully and in
332 // a specific order.
333
334 // The passed in hr will be the "starts humongous" region. The header
335 // of the new object will be placed at the bottom of this region.
336 HeapWord* new_obj = first_hr->bottom();
337
338 // First, we need to zero the header of the space that we will be
339 // allocating. When we update top further down, some refinement
340 // threads might try to scan the region. By zeroing the header we
341 // ensure that any thread that will try to scan the region will
342 // come across the zero klass word and bail out.
343 //
344 // NOTE: It would not have been correct to have used
345 // CollectedHeap::fill_with_object() and make the space look like
346 // an int array. The thread that is doing the allocation will
347 // later update the object header to a potentially different array
348 // type and, for a very short period of time, the klass and length
349 // fields will be inconsistent. This could cause a refinement
350 // thread to calculate the object size incorrectly.
351 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
352
353 // Next, update the metadata for the regions.
354 set_humongous_metadata(first_hr, num_regions, word_size, true);
355
356 G1HeapRegion* last_hr = region_at(last);
357 size_t used = byte_size(first_hr->bottom(), last_hr->top());
358
359 increase_used(used);
360
361 for (uint i = first; i <= last; ++i) {
362 G1HeapRegion *hr = region_at(i);
363 _humongous_set.add(hr);
364 G1HeapRegionPrinter::alloc(hr);
365 }
366
367 return new_obj;
368 }
369
370 size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
371 assert(is_humongous(word_size), "Object of size %zu must be humongous here", word_size);
372 return align_up(word_size, G1HeapRegion::GrainWords) / G1HeapRegion::GrainWords;
373 }
374
375 size_t G1CollectedHeap::allocation_used_bytes(size_t allocation_word_size) {
376 if (is_humongous(allocation_word_size)) {
377 return humongous_obj_size_in_regions(allocation_word_size) * G1HeapRegion::GrainBytes;
378 } else {
379 return allocation_word_size * HeapWordSize;
380 }
381 }
382
383 // If could fit into free regions w/o expansion, try.
384 // Otherwise, if can expand, do so.
385 // Otherwise, if using ex regions might help, try with ex given back.
386 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size) {
387 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
388
389 _verifier->verify_region_sets_optional();
390
391 uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
392 if (obj_regions > num_available_regions()) {
393 // Can't satisfy this allocation; early-return.
394 return nullptr;
395 }
396
397 // Policy: First try to allocate a humongous object in the free list.
398 G1HeapRegion* humongous_start = _hrm.allocate_humongous(obj_regions);
399 if (humongous_start == nullptr) {
400 // Policy: We could not find enough regions for the humongous object in the
401 // free list. Look through the heap to find a mix of free and uncommitted regions.
402 // If so, expand the heap and allocate the humongous object.
403 humongous_start = _hrm.expand_and_allocate_humongous(obj_regions);
404 if (humongous_start != nullptr) {
405 // We managed to find a region by expanding the heap.
406 log_debug(gc, ergo, heap)("Heap expansion (humongous allocation request). Allocation request: %zuB",
407 word_size * HeapWordSize);
408 policy()->record_new_heap_size(num_committed_regions());
409 } else {
410 // Policy: Potentially trigger a defragmentation GC.
411 }
412 }
413
414 HeapWord* result = nullptr;
415 if (humongous_start != nullptr) {
416 result = humongous_obj_allocate_initialize_regions(humongous_start, obj_regions, word_size);
417 assert(result != nullptr, "it should always return a valid result");
418
419 // A successful humongous object allocation changes the used space
420 // information of the old generation so we need to recalculate the
421 // sizes and update the jstat counters here.
422 monitoring_support()->update_sizes();
423 }
424
425 _verifier->verify_region_sets_optional();
426
427 return result;
428 }
429
430 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
431 size_t requested_size,
432 size_t* actual_size) {
433 assert_heap_not_locked_and_not_at_safepoint();
434 assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
435
436 // Do not allow a GC because we are allocating a new TLAB to avoid an issue
437 // with UseGCOverheadLimit: although this GC would return null if the overhead
438 // limit would be exceeded, but it would likely free at least some space.
439 // So the subsequent outside-TLAB allocation could be successful anyway and
440 // the indication that the overhead limit had been exceeded swallowed.
441 return attempt_allocation(min_size, requested_size, actual_size, false /* allow_gc */);
442 }
443
444 HeapWord* G1CollectedHeap::mem_allocate(size_t word_size) {
445 assert_heap_not_locked_and_not_at_safepoint();
446
447 if (is_humongous(word_size)) {
448 return attempt_allocation_humongous(word_size);
449 }
450 size_t dummy = 0;
451 return attempt_allocation(word_size, word_size, &dummy, true /* allow_gc */);
452 }
453
454 HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_size, bool allow_gc) {
455 ResourceMark rm; // For retrieving the thread names in log messages.
456
457 // Make sure you read the note in attempt_allocation_humongous().
458
459 assert_heap_not_locked_and_not_at_safepoint();
460 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
461 "be called for humongous allocation requests");
462
463 // We should only get here after the first-level allocation attempt
464 // (attempt_allocation()) failed to allocate.
465
466 // We will loop until a) we manage to successfully perform the allocation or b)
467 // successfully schedule a collection which fails to perform the allocation.
468 // Case b) is the only case when we'll return null.
469 HeapWord* result = nullptr;
470 for (uint try_count = 1; /* we'll return */; try_count++) {
471 uint gc_count_before;
472
473 {
474 MutexLocker x(Heap_lock);
475
476 // Now that we have the lock, we first retry the allocation in case another
477 // thread changed the region while we were waiting to acquire the lock.
478 result = _allocator->attempt_allocation_locked(node_index, word_size);
479 if (result != nullptr) {
480 return result;
481 } else if (!allow_gc) {
482 return nullptr;
483 }
484
485 // Read the GC count while still holding the Heap_lock.
486 gc_count_before = total_collections();
487 }
488
489 bool succeeded;
490 result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_inc_collection_pause);
491 if (succeeded) {
492 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
493 Thread::current()->name(), p2i(result));
494 return result;
495 }
496
497 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating %zu words",
498 Thread::current()->name(), word_size);
499
500 // Has the gc overhead limit been reached in the meantime? If so, this mutator
501 // should receive null even when unsuccessfully scheduling a collection as well
502 // for global consistency.
503 if (gc_overhead_limit_exceeded()) {
504 return nullptr;
505 }
506
507 // We can reach here if we were unsuccessful in scheduling a collection (because
508 // another thread beat us to it). In this case immediately retry the allocation
509 // attempt because another thread successfully performed a collection and possibly
510 // reclaimed enough space. The first attempt (without holding the Heap_lock) is
511 // here and the follow-on attempt will be at the start of the next loop
512 // iteration (after taking the Heap_lock).
513 size_t dummy = 0;
514 result = _allocator->attempt_allocation(node_index, word_size, word_size, &dummy);
515 if (result != nullptr) {
516 return result;
517 }
518
519 // Give a warning if we seem to be looping forever.
520 if ((QueuedAllocationWarningCount > 0) &&
521 (try_count % QueuedAllocationWarningCount == 0)) {
522 log_warning(gc, alloc)("%s: Retried allocation %u times for %zu words",
523 Thread::current()->name(), try_count, word_size);
524 }
525 }
526
527 ShouldNotReachHere();
528 return nullptr;
529 }
530
531 template <typename Func>
532 void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func) {
533 // Mark each G1 region touched by the range as old, add it to
534 // the old set, and set top.
535 G1HeapRegion* curr_region = _hrm.addr_to_region(range.start());
536 G1HeapRegion* end_region = _hrm.addr_to_region(range.last());
537
538 while (curr_region != nullptr) {
539 bool is_last = curr_region == end_region;
540 G1HeapRegion* next_region = is_last ? nullptr : _hrm.next_region_in_heap(curr_region);
541
542 func(curr_region, is_last);
543
544 curr_region = next_region;
545 }
546 }
547
548 HeapWord* G1CollectedHeap::alloc_archive_region(size_t word_size) {
549 assert(!is_init_completed(), "Expect to be called at JVM init time");
550 MutexLocker x(Heap_lock);
551
552 MemRegion reserved = _hrm.reserved();
553
554 if (reserved.word_size() <= word_size) {
555 log_info(gc, heap)("Unable to allocate regions as archive heap is too large; size requested = %zu"
556 " bytes, heap = %zu bytes", word_size * HeapWordSize, reserved.byte_size());
557 return nullptr;
558 }
559
560 // Temporarily disable pretouching of heap pages. This interface is used
561 // when mmap'ing archived heap data in, so pre-touching is wasted.
562 FlagSetting fs(AlwaysPreTouch, false);
563
564 size_t commits = 0;
565 // Attempt to allocate towards the end of the heap.
566 HeapWord* start_addr = reserved.end() - align_up(word_size, G1HeapRegion::GrainWords);
567 MemRegion range = MemRegion(start_addr, word_size);
568 HeapWord* last_address = range.last();
569 if (!_hrm.allocate_containing_regions(range, &commits, workers())) {
570 return nullptr;
571 }
572 increase_used(word_size * HeapWordSize);
573 if (commits != 0) {
574 log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: %zuB",
575 G1HeapRegion::GrainWords * HeapWordSize * commits);
576 }
577
578 // Mark each G1 region touched by the range as old, add it to
579 // the old set, and set top.
580 auto set_region_to_old = [&] (G1HeapRegion* r, bool is_last) {
581 assert(r->is_empty(), "Region already in use (%u)", r->hrm_index());
582
583 HeapWord* top = is_last ? last_address + 1 : r->end();
584 r->set_top(top);
585
586 r->set_old();
587 G1HeapRegionPrinter::alloc(r);
588 _old_set.add(r);
589 };
590
591 iterate_regions_in_range(range, set_region_to_old);
592 return start_addr;
593 }
594
595 void G1CollectedHeap::populate_archive_regions_bot(MemRegion range) {
596 assert(!is_init_completed(), "Expect to be called at JVM init time");
597
598 iterate_regions_in_range(range,
599 [&] (G1HeapRegion* r, bool is_last) {
600 r->update_bot();
601 });
602 }
603
604 void G1CollectedHeap::dealloc_archive_regions(MemRegion range) {
605 assert(!is_init_completed(), "Expect to be called at JVM init time");
606 MemRegion reserved = _hrm.reserved();
607 size_t size_used = 0;
608
609 // Free the G1 regions that are within the specified range.
610 MutexLocker x(Heap_lock);
611 HeapWord* start_address = range.start();
612 HeapWord* last_address = range.last();
613
614 assert(reserved.contains(start_address) && reserved.contains(last_address),
615 "MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
616 p2i(start_address), p2i(last_address));
617 size_used += range.byte_size();
618
619 uint max_shrink_count = 0;
620 if (capacity() > MinHeapSize) {
621 size_t max_shrink_bytes = capacity() - MinHeapSize;
622 max_shrink_count = (uint)(max_shrink_bytes / G1HeapRegion::GrainBytes);
623 }
624
625 uint shrink_count = 0;
626 // Free, empty and uncommit regions with CDS archive content.
627 auto dealloc_archive_region = [&] (G1HeapRegion* r, bool is_last) {
628 guarantee(r->is_old(), "Expected old region at index %u", r->hrm_index());
629 _old_set.remove(r);
630 r->set_free();
631 r->set_top(r->bottom());
632 if (shrink_count < max_shrink_count) {
633 _hrm.shrink_at(r->hrm_index(), 1);
634 shrink_count++;
635 } else {
636 _hrm.insert_into_free_list(r);
637 }
638 };
639
640 iterate_regions_in_range(range, dealloc_archive_region);
641
642 if (shrink_count != 0) {
643 log_debug(gc, ergo, heap)("Attempt heap shrinking (CDS archive regions). Total size: %zuB (%u Regions)",
644 G1HeapRegion::GrainWords * HeapWordSize * shrink_count, shrink_count);
645 // Explicit uncommit.
646 uncommit_regions(shrink_count);
647 }
648 decrease_used(size_used);
649 }
650
651 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
652 size_t desired_word_size,
653 size_t* actual_word_size,
654 bool allow_gc) {
655 assert_heap_not_locked_and_not_at_safepoint();
656 assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
657 "be called for humongous allocation requests");
658
659 // Fix NUMA node association for the duration of this allocation
660 const uint node_index = _allocator->current_node_index();
661
662 HeapWord* result = _allocator->attempt_allocation(node_index, min_word_size, desired_word_size, actual_word_size);
663
664 if (result == nullptr) {
665 *actual_word_size = desired_word_size;
666 result = attempt_allocation_slow(node_index, desired_word_size, allow_gc);
667 }
668
669 assert_heap_not_locked();
670 if (result != nullptr) {
671 assert(*actual_word_size != 0, "Actual size must have been set here");
672 } else {
673 *actual_word_size = 0;
674 }
675
676 return result;
677 }
678
679 // Helper for [try_]collect().
680 static G1GCCounters collection_counters(G1CollectedHeap* g1h) {
681 MutexLocker ml(Heap_lock);
682 return G1GCCounters(g1h);
683 }
684
685 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
686 ResourceMark rm; // For retrieving the thread names in log messages.
687
688 // The structure of this method has a lot of similarities to
689 // attempt_allocation_slow(). The reason these two were not merged
690 // into a single one is that such a method would require several "if
691 // allocation is not humongous do this, otherwise do that"
692 // conditional paths which would obscure its flow. In fact, an early
693 // version of this code did use a unified method which was harder to
694 // follow and, as a result, it had subtle bugs that were hard to
695 // track down. So keeping these two methods separate allows each to
696 // be more readable. It will be good to keep these two in sync as
697 // much as possible.
698
699 assert_heap_not_locked_and_not_at_safepoint();
700 assert(is_humongous(word_size), "attempt_allocation_humongous() "
701 "should only be called for humongous allocations");
702
703 // Humongous objects can exhaust the heap quickly, so we should check if we
704 // need to start a marking cycle at each humongous object allocation. We do
705 // the check before we do the actual allocation. The reason for doing it
706 // before the allocation is that we avoid having to keep track of the newly
707 // allocated memory while we do a GC.
708 // Only try that if we can actually perform a GC.
709 if (is_init_completed() &&
710 policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
711 try_collect(word_size, GCCause::_g1_humongous_allocation, collection_counters(this));
712 }
713
714 // We will loop until a) we manage to successfully perform the allocation or b)
715 // successfully schedule a collection which fails to perform the allocation.
716 // Case b) is the only case when we'll return null.
717 HeapWord* result = nullptr;
718 for (uint try_count = 1; /* we'll return */; try_count++) {
719 uint gc_count_before;
720
721 // The amount of bytes the humongous object will actually take.
722 size_t humongous_byte_size = G1HeapRegion::align_up_to_region_byte_size(word_size * HeapWordSize);
723
724 {
725 MutexLocker x(Heap_lock);
726
727 // Given that humongous objects are not allocated in young
728 // regions, we'll first try to do the allocation without doing a
729 // collection hoping that there's enough space in the heap.
730 result = humongous_obj_allocate(word_size);
731 if (result != nullptr) {
732 policy()->old_gen_alloc_tracker()->
733 add_allocated_humongous_bytes_since_last_gc(humongous_byte_size);
734 return result;
735 }
736
737 // Read the GC count while still holding the Heap_lock.
738 gc_count_before = total_collections();
739 }
740
741 bool succeeded;
742 result = do_collection_pause(word_size, gc_count_before, &succeeded, GCCause::_g1_humongous_allocation);
743 if (succeeded) {
744 log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT,
745 Thread::current()->name(), p2i(result));
746 if (result != nullptr) {
747 policy()->old_gen_alloc_tracker()->
748 record_collection_pause_humongous_allocation(humongous_byte_size);
749 }
750 return result;
751 }
752
753 log_trace(gc, alloc)("%s: Unsuccessfully scheduled collection allocating %zu",
754 Thread::current()->name(), word_size);
755
756 // Has the gc overhead limit been reached in the meantime? If so, this mutator
757 // should receive null even when unsuccessfully scheduling a collection as well
758 // for global consistency.
759 if (gc_overhead_limit_exceeded()) {
760 return nullptr;
761 }
762
763 // We can reach here if we were unsuccessful in scheduling a collection (because
764 // another thread beat us to it).
765 // Humongous object allocation always needs a lock, so we wait for the retry
766 // in the next iteration of the loop, unlike for the regular iteration case.
767 // Give a warning if we seem to be looping forever.
768
769 if ((QueuedAllocationWarningCount > 0) &&
770 (try_count % QueuedAllocationWarningCount == 0)) {
771 log_warning(gc, alloc)("%s: Retried allocation %u times for %zu words",
772 Thread::current()->name(), try_count, word_size);
773 }
774 }
775
776 ShouldNotReachHere();
777 return nullptr;
778 }
779
780 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
781 bool expect_null_mutator_alloc_region) {
782 assert_at_safepoint_on_vm_thread();
783 assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
784 "the current alloc region was unexpectedly found to be non-null");
785
786 // Fix NUMA node association for the duration of this allocation
787 const uint node_index = _allocator->current_node_index();
788
789 if (!is_humongous(word_size)) {
790 return _allocator->attempt_allocation_locked(node_index, word_size);
791 } else {
792 HeapWord* result = humongous_obj_allocate(word_size);
793 if (result != nullptr &&
794 // We just allocated the humongous object, so the given allocation size is 0.
795 policy()->need_to_start_conc_mark("STW humongous allocation", 0 /* allocation_word_size */)) {
796 collector_state()->set_initiate_conc_mark_if_possible(true);
797 }
798 return result;
799 }
800
801 ShouldNotReachHere();
802 }
803
804 class PostCompactionPrinterClosure: public G1HeapRegionClosure {
805 public:
806 bool do_heap_region(G1HeapRegion* hr) {
807 assert(!hr->is_young(), "not expecting to find young regions");
808 G1HeapRegionPrinter::post_compaction(hr);
809 return false;
810 }
811 };
812
813 void G1CollectedHeap::print_heap_after_full_collection() {
814 // Post collection region logging.
815 // We should do this after we potentially resize the heap so
816 // that all the COMMIT / UNCOMMIT events are generated before
817 // the compaction events.
818 if (G1HeapRegionPrinter::is_active()) {
819 PostCompactionPrinterClosure cl;
820 heap_region_iterate(&cl);
821 }
822 }
823
824 bool G1CollectedHeap::abort_concurrent_cycle() {
825 // Disable discovery and empty the discovered lists
826 // for the CM ref processor.
827 _ref_processor_cm->disable_discovery();
828 _ref_processor_cm->abandon_partial_discovery();
829 _ref_processor_cm->verify_no_references_recorded();
830
831 // Abandon current iterations of concurrent marking and concurrent
832 // refinement, if any are in progress.
833 return concurrent_mark()->concurrent_cycle_abort();
834 }
835
836 void G1CollectedHeap::prepare_heap_for_full_collection() {
837 // Make sure we'll choose a new allocation region afterwards.
838 _allocator->release_mutator_alloc_regions();
839 _allocator->abandon_gc_alloc_regions();
840
841 // We may have added regions to the current incremental collection
842 // set between the last GC or pause and now. We need to clear the
843 // incremental collection set and then start rebuilding it afresh
844 // after this full GC.
845 abandon_collection_set();
846
847 _hrm.remove_all_free_regions();
848 }
849
850 void G1CollectedHeap::verify_before_full_collection() {
851 assert_used_and_recalculate_used_equal(this);
852 if (!VerifyBeforeGC) {
853 return;
854 }
855 if (!G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyFull)) {
856 return;
857 }
858 _verifier->verify_region_sets_optional();
859 _verifier->verify_before_gc();
860 _verifier->verify_bitmap_clear(true /* above_tams_only */);
861 }
862
863 void G1CollectedHeap::prepare_for_mutator_after_full_collection(size_t allocation_word_size) {
864 // Prepare heap for normal collections.
865 assert(num_free_regions() == 0, "we should not have added any free regions");
866 rebuild_region_sets(false /* free_list_only */);
867 abort_refinement();
868 resize_heap_after_full_collection(allocation_word_size);
869
870 // Rebuild the code root lists for each region
871 rebuild_code_roots();
872 finish_codecache_marking_cycle();
873
874 start_new_collection_set();
875 _allocator->init_mutator_alloc_regions();
876
877 // Post collection state updates.
878 MetaspaceGC::compute_new_size();
879 }
880
881 void G1CollectedHeap::abort_refinement() {
882 G1ConcurrentRefineSweepState& sweep_state = concurrent_refine()->sweep_state();
883 if (sweep_state.is_in_progress()) {
884
885 if (!sweep_state.are_java_threads_synched()) {
886 // Synchronize Java threads with global card table that has already been swapped.
887 class SwapThreadCardTableClosure : public ThreadClosure {
888 public:
889
890 virtual void do_thread(Thread* t) {
891 G1BarrierSet* bs = G1BarrierSet::g1_barrier_set();
892 bs->update_card_table_base(t);
893 }
894 } cl;
895 Threads::java_threads_do(&cl);
896 }
897
898 // Record any available refinement statistics.
899 policy()->record_refinement_stats(sweep_state.stats());
900 sweep_state.complete_work(false /* concurrent */, false /* print_log */);
901 }
902 sweep_state.reset_stats();
903 }
904
905 void G1CollectedHeap::verify_after_full_collection() {
906 if (!VerifyAfterGC) {
907 return;
908 }
909 if (!G1HeapVerifier::should_verify(G1HeapVerifier::G1VerifyFull)) {
910 return;
911 }
912 _hrm.verify_optional();
913 _verifier->verify_region_sets_optional();
914 _verifier->verify_card_tables_clean(true /* both_card_tables */);
915 _verifier->verify_after_gc();
916 _verifier->verify_bitmap_clear(false /* above_tams_only */);
917
918 // At this point there should be no regions in the
919 // entire heap tagged as young.
920 assert(check_young_list_empty(), "young list should be empty at this point");
921
922 // Note: since we've just done a full GC, concurrent
923 // marking is no longer active. Therefore we need not
924 // re-enable reference discovery for the CM ref processor.
925 // That will be done at the start of the next marking cycle.
926 // We also know that the STW processor should no longer
927 // discover any new references.
928 assert(!_ref_processor_stw->discovery_enabled(), "Postcondition");
929 assert(!_ref_processor_cm->discovery_enabled(), "Postcondition");
930 _ref_processor_stw->verify_no_references_recorded();
931 _ref_processor_cm->verify_no_references_recorded();
932 }
933
934 void G1CollectedHeap::do_full_collection(size_t allocation_word_size,
935 bool clear_all_soft_refs,
936 bool do_maximal_compaction) {
937 G1FullGCTracer tracer;
938 G1GCMark gc_mark(&tracer, true /* is_full_gc */);
939 GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause(), true);
940
941 G1FullCollector collector(this, clear_all_soft_refs, do_maximal_compaction, &tracer);
942 collector.prepare_collection();
943 collector.collect();
944 collector.complete_collection(allocation_word_size);
945 }
946
947 void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
948 // Currently, there is no facility in the do_full_collection(bool) API to notify
949 // the caller that the collection did not succeed (e.g., because it was locked
950 // out by the GC locker). So, right now, we'll ignore the return value.
951
952 do_full_collection(size_t(0) /* allocation_word_size */,
953 clear_all_soft_refs,
954 false /* do_maximal_compaction */);
955 }
956
957 void G1CollectedHeap::upgrade_to_full_collection() {
958 GCCauseSetter compaction(this, GCCause::_g1_compaction_pause);
959 log_info(gc, ergo)("Attempting full compaction clearing soft references");
960 do_full_collection(size_t(0) /* allocation_word_size */,
961 true /* clear_all_soft_refs */,
962 false /* do_maximal_compaction */);
963 }
964
965
966 void G1CollectedHeap::resize_heap(size_t resize_bytes, bool should_expand) {
967 if (should_expand) {
968 expand(resize_bytes, _workers);
969 } else {
970 shrink(resize_bytes);
971 uncommit_regions_if_necessary();
972 }
973 }
974
975 void G1CollectedHeap::resize_heap_after_full_collection(size_t allocation_word_size) {
976 assert_at_safepoint_on_vm_thread();
977
978 bool should_expand;
979 size_t resize_bytes = _heap_sizing_policy->full_collection_resize_amount(should_expand, allocation_word_size);
980
981 if (resize_bytes != 0) {
982 resize_heap(resize_bytes, should_expand);
983 }
984 }
985
986 void G1CollectedHeap::resize_heap_after_young_collection(size_t allocation_word_size) {
987 Ticks start = Ticks::now();
988
989 bool should_expand;
990
991 size_t resize_bytes = _heap_sizing_policy->young_collection_resize_amount(should_expand, allocation_word_size);
992
993 if (resize_bytes != 0) {
994 resize_heap(resize_bytes, should_expand);
995 }
996
997 phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
998 }
999
1000 void G1CollectedHeap::update_gc_overhead_counter() {
1001 assert(SafepointSynchronize::is_at_safepoint(), "precondition");
1002
1003 if (!UseGCOverheadLimit) {
1004 return;
1005 }
1006
1007 bool gc_time_over_limit = (_policy->analytics()->long_term_gc_time_ratio() * 100) >= GCTimeLimit;
1008 double free_space_percent = percent_of(num_available_regions() * G1HeapRegion::GrainBytes, max_capacity());
1009 bool free_space_below_limit = free_space_percent < GCHeapFreeLimit;
1010
1011 log_debug(gc)("GC Overhead Limit: GC Time %f Free Space %f Counter %zu",
1012 (_policy->analytics()->long_term_gc_time_ratio() * 100),
1013 free_space_percent,
1014 _gc_overhead_counter);
1015
1016 if (gc_time_over_limit && free_space_below_limit) {
1017 _gc_overhead_counter++;
1018 } else {
1019 _gc_overhead_counter = 0;
1020 }
1021 }
1022
1023 bool G1CollectedHeap::gc_overhead_limit_exceeded() {
1024 return _gc_overhead_counter >= GCOverheadLimitThreshold;
1025 }
1026
1027 HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
1028 bool do_gc,
1029 bool maximal_compaction,
1030 bool expect_null_mutator_alloc_region) {
1031 // Skip allocation if GC overhead limit has been exceeded to let the mutator run
1032 // into an OOME. It can either exit "gracefully" or try to free up memory asap.
1033 // For the latter situation, keep running GCs. If the mutator frees up enough
1034 // memory quickly enough, the overhead(s) will go below the threshold(s) again
1035 // and the VM may continue running.
1036 // If we did not continue garbage collections, the (gc overhead) limit may decrease
1037 // enough by itself to not count as exceeding the limit any more, in the worst
1038 // case bouncing back-and-forth all the time.
1039 if (!gc_overhead_limit_exceeded()) {
1040 // Let's attempt the allocation first.
1041 HeapWord* result =
1042 attempt_allocation_at_safepoint(word_size,
1043 expect_null_mutator_alloc_region);
1044 if (result != nullptr) {
1045 return result;
1046 }
1047
1048 // In a G1 heap, we're supposed to keep allocation from failing by
1049 // incremental pauses. Therefore, at least for now, we'll favor
1050 // expansion over collection. (This might change in the future if we can
1051 // do something smarter than full collection to satisfy a failed alloc.)
1052 result = expand_and_allocate(word_size);
1053 if (result != nullptr) {
1054 return result;
1055 }
1056 }
1057
1058 if (do_gc) {
1059 GCCauseSetter compaction(this, GCCause::_g1_compaction_pause);
1060 // Expansion didn't work, we'll try to do a Full GC.
1061 // If maximal_compaction is set we clear all soft references and don't
1062 // allow any dead wood to be left on the heap.
1063 if (maximal_compaction) {
1064 log_info(gc, ergo)("Attempting maximal full compaction clearing soft references");
1065 } else {
1066 log_info(gc, ergo)("Attempting full compaction");
1067 }
1068 do_full_collection(word_size /* allocation_word_size */,
1069 maximal_compaction /* clear_all_soft_refs */,
1070 maximal_compaction /* do_maximal_compaction */);
1071 }
1072
1073 return nullptr;
1074 }
1075
1076 HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
1077 assert_at_safepoint_on_vm_thread();
1078
1079 // Update GC overhead limits after the initial garbage collection leading to this
1080 // allocation attempt.
1081 update_gc_overhead_counter();
1082
1083 // Attempts to allocate followed by Full GC.
1084 HeapWord* result =
1085 satisfy_failed_allocation_helper(word_size,
1086 true, /* do_gc */
1087 false, /* maximal_compaction */
1088 false /* expect_null_mutator_alloc_region */);
1089
1090 if (result != nullptr) {
1091 return result;
1092 }
1093
1094 // Attempts to allocate followed by Full GC that will collect all soft references.
1095 result = satisfy_failed_allocation_helper(word_size,
1096 true, /* do_gc */
1097 true, /* maximal_compaction */
1098 true /* expect_null_mutator_alloc_region */);
1099
1100 if (result != nullptr) {
1101 return result;
1102 }
1103
1104 // Attempts to allocate, no GC
1105 result = satisfy_failed_allocation_helper(word_size,
1106 false, /* do_gc */
1107 false, /* maximal_compaction */
1108 true /* expect_null_mutator_alloc_region */);
1109
1110 if (result != nullptr) {
1111 return result;
1112 }
1113
1114 if (gc_overhead_limit_exceeded()) {
1115 log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
1116 }
1117
1118 // What else? We might try synchronous finalization later. If the total
1119 // space available is large enough for the allocation, then a more
1120 // complete compaction phase than we've tried so far might be
1121 // appropriate.
1122 return nullptr;
1123 }
1124
1125 // Attempting to expand the heap sufficiently
1126 // to support an allocation of the given "word_size". If
1127 // successful, perform the allocation and return the address of the
1128 // allocated block, or else null.
1129
1130 HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size) {
1131 assert_at_safepoint_on_vm_thread();
1132
1133 _verifier->verify_region_sets_optional();
1134
1135 size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
1136 log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: %zuB",
1137 word_size * HeapWordSize);
1138
1139
1140 if (expand(expand_bytes, _workers)) {
1141 _hrm.verify_optional();
1142 _verifier->verify_region_sets_optional();
1143 return attempt_allocation_at_safepoint(word_size,
1144 false /* expect_null_mutator_alloc_region */);
1145 }
1146 return nullptr;
1147 }
1148
1149 bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_workers) {
1150 assert(expand_bytes > 0, "precondition");
1151
1152 size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
1153 aligned_expand_bytes = align_up(aligned_expand_bytes, G1HeapRegion::GrainBytes);
1154
1155 uint num_regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes);
1156
1157 log_debug(gc, ergo, heap)("Heap resize. Requested expansion amount: %zuB aligned expansion amount: %zuB (%u regions)",
1158 expand_bytes, aligned_expand_bytes, num_regions_to_expand);
1159
1160 if (num_inactive_regions() == 0) {
1161 log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)");
1162 return false;
1163 }
1164
1165 uint expanded_by = _hrm.expand_by(num_regions_to_expand, pretouch_workers);
1166
1167 size_t actual_expand_bytes = expanded_by * G1HeapRegion::GrainBytes;
1168 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1169 policy()->record_new_heap_size(num_committed_regions());
1170
1171 return true;
1172 }
1173
1174 bool G1CollectedHeap::expand_single_region(uint node_index) {
1175 uint expanded_by = _hrm.expand_on_preferred_node(node_index);
1176
1177 if (expanded_by == 0) {
1178 assert(num_inactive_regions() == 0, "Should be no regions left, available: %u", num_inactive_regions());
1179 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1180 return false;
1181 }
1182
1183 policy()->record_new_heap_size(num_committed_regions());
1184 return true;
1185 }
1186
1187 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1188 assert(shrink_bytes > 0, "must be");
1189 assert(is_aligned(shrink_bytes, G1HeapRegion::GrainBytes),
1190 "Shrink request for %zuB not aligned to heap region size %zuB",
1191 shrink_bytes, G1HeapRegion::GrainBytes);
1192
1193 uint num_regions_to_remove = (uint)(shrink_bytes / G1HeapRegion::GrainBytes);
1194
1195 uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
1196 size_t shrunk_bytes = num_regions_removed * G1HeapRegion::GrainBytes;
1197
1198 log_debug(gc, ergo, heap)("Heap resize. Requested shrinking amount: %zuB actual shrinking amount: %zuB (%u regions)",
1199 shrink_bytes, shrunk_bytes, num_regions_removed);
1200 if (num_regions_removed > 0) {
1201 policy()->record_new_heap_size(num_committed_regions());
1202 } else {
1203 log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap shrinking operation failed)");
1204 }
1205 }
1206
1207 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1208 if (capacity() == min_capacity()) {
1209 log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap already at minimum)");
1210 return;
1211 }
1212
1213 size_t aligned_shrink_bytes = os::align_down_vm_page_size(shrink_bytes);
1214 aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes);
1215
1216 aligned_shrink_bytes = capacity() - MAX2(capacity() - aligned_shrink_bytes, min_capacity());
1217 assert(is_aligned(aligned_shrink_bytes, G1HeapRegion::GrainBytes), "Bytes to shrink %zuB not aligned", aligned_shrink_bytes);
1218
1219 log_debug(gc, ergo, heap)("Heap resize. Requested shrink amount: %zuB aligned shrink amount: %zuB",
1220 shrink_bytes, aligned_shrink_bytes);
1221
1222 if (aligned_shrink_bytes == 0) {
1223 log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (shrink request too small)");
1224 return;
1225 }
1226
1227 _verifier->verify_region_sets_optional();
1228
1229 // We should only reach here at the end of a Full GC or during Remark which
1230 // means we should not not be holding to any GC alloc regions. The method
1231 // below will make sure of that and do any remaining clean up.
1232 _allocator->abandon_gc_alloc_regions();
1233
1234 // Instead of tearing down / rebuilding the free lists here, we
1235 // could instead use the remove_all_pending() method on free_list to
1236 // remove only the ones that we need to remove.
1237 _hrm.remove_all_free_regions();
1238 shrink_helper(aligned_shrink_bytes);
1239 rebuild_region_sets(true /* free_list_only */);
1240
1241 _hrm.verify_optional();
1242 _verifier->verify_region_sets_optional();
1243 }
1244
1245 class OldRegionSetChecker : public G1HeapRegionSetChecker {
1246 public:
1247 void check_mt_safety() {
1248 // Master Old Set MT safety protocol:
1249 // (a) If we're at a safepoint, operations on the master old set
1250 // should be invoked:
1251 // - by the VM thread (which will serialize them), or
1252 // - by the GC workers while holding the FreeList_lock, if we're
1253 // at a safepoint for an evacuation pause (this lock is taken
1254 // anyway when an GC alloc region is retired so that a new one
1255 // is allocated from the free list), or
1256 // - by the GC workers while holding the OldSets_lock, if we're at a
1257 // safepoint for a cleanup pause.
1258 // (b) If we're not at a safepoint, operations on the master old set
1259 // should be invoked while holding the Heap_lock.
1260
1261 if (SafepointSynchronize::is_at_safepoint()) {
1262 guarantee(Thread::current()->is_VM_thread() ||
1263 G1FreeList_lock->owned_by_self() || G1OldSets_lock->owned_by_self(),
1264 "master old set MT safety protocol at a safepoint");
1265 } else {
1266 guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");
1267 }
1268 }
1269 bool is_correct_type(G1HeapRegion* hr) { return hr->is_old(); }
1270 const char* get_description() { return "Old Regions"; }
1271 };
1272
1273 class HumongousRegionSetChecker : public G1HeapRegionSetChecker {
1274 public:
1275 void check_mt_safety() {
1276 // Humongous Set MT safety protocol:
1277 // (a) If we're at a safepoint, operations on the master humongous
1278 // set should be invoked by either the VM thread (which will
1279 // serialize them) or by the GC workers while holding the
1280 // OldSets_lock.
1281 // (b) If we're not at a safepoint, operations on the master
1282 // humongous set should be invoked while holding the Heap_lock.
1283
1284 if (SafepointSynchronize::is_at_safepoint()) {
1285 guarantee(Thread::current()->is_VM_thread() ||
1286 G1OldSets_lock->owned_by_self(),
1287 "master humongous set MT safety protocol at a safepoint");
1288 } else {
1289 guarantee(Heap_lock->owned_by_self(),
1290 "master humongous set MT safety protocol outside a safepoint");
1291 }
1292 }
1293 bool is_correct_type(G1HeapRegion* hr) { return hr->is_humongous(); }
1294 const char* get_description() { return "Humongous Regions"; }
1295 };
1296
1297 G1CollectedHeap::G1CollectedHeap() :
1298 CollectedHeap(),
1299 _gc_overhead_counter(0),
1300 _service_thread(nullptr),
1301 _periodic_gc_task(nullptr),
1302 _free_arena_memory_task(nullptr),
1303 _revise_young_length_task(nullptr),
1304 _workers(nullptr),
1305 _refinement_epoch(0),
1306 _last_synchronized_start(0),
1307 _last_refinement_epoch_start(0),
1308 _yield_duration_in_refinement_epoch(0),
1309 _last_safepoint_refinement_epoch(0),
1310 _collection_pause_end(Ticks::now()),
1311 _old_set("Old Region Set", new OldRegionSetChecker()),
1312 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1313 _bot(nullptr),
1314 _listener(),
1315 _numa(G1NUMA::create()),
1316 _hrm(),
1317 _allocator(nullptr),
1318 _allocation_failure_injector(),
1319 _verifier(nullptr),
1320 _summary_bytes_used(0),
1321 _bytes_used_during_gc(0),
1322 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1323 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1324 _monitoring_support(nullptr),
1325 _num_humongous_objects(0),
1326 _num_humongous_reclaim_candidates(0),
1327 _collector_state(),
1328 _old_marking_cycles_started(0),
1329 _old_marking_cycles_completed(0),
1330 _eden(),
1331 _survivor(),
1332 _gc_timer_stw(new STWGCTimer()),
1333 _gc_tracer_stw(new G1NewTracer()),
1334 _policy(new G1Policy(_gc_timer_stw)),
1335 _heap_sizing_policy(nullptr),
1336 _collection_set(this, _policy),
1337 _rem_set(nullptr),
1338 _card_set_config(),
1339 _card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()),
1340 _young_regions_cset_group(card_set_config(), &_card_set_freelist_pool, G1CSetCandidateGroup::YoungRegionId),
1341 _cm(nullptr),
1342 _cr(nullptr),
1343 _task_queues(nullptr),
1344 _partial_array_state_manager(nullptr),
1345 _ref_processor_stw(nullptr),
1346 _is_alive_closure_stw(this),
1347 _is_subject_to_discovery_stw(this),
1348 _ref_processor_cm(nullptr),
1349 _is_alive_closure_cm(),
1350 _is_subject_to_discovery_cm(this),
1351 _region_attr() {
1352
1353 _verifier = new G1HeapVerifier(this);
1354
1355 _allocator = new G1Allocator(this);
1356
1357 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _policy->analytics());
1358
1359 _humongous_object_threshold_in_words = humongous_threshold_for(G1HeapRegion::GrainWords);
1360
1361 // Since filler arrays are never referenced, we can make them region sized.
1362 // This simplifies filling up the region in case we have some potentially
1363 // unreferenced (by Java code, but still in use by native code) pinned objects
1364 // in there.
1365 _filler_array_max_size = G1HeapRegion::GrainWords;
1366
1367 // Override the default _stack_chunk_max_size so that no humongous stack chunks are created
1368 _stack_chunk_max_size = _humongous_object_threshold_in_words;
1369
1370 uint n_queues = ParallelGCThreads;
1371 _task_queues = new G1ScannerTasksQueueSet(n_queues);
1372
1373 for (uint i = 0; i < n_queues; i++) {
1374 G1ScannerTasksQueue* q = new G1ScannerTasksQueue();
1375 _task_queues->register_queue(i, q);
1376 }
1377
1378 _partial_array_state_manager = new PartialArrayStateManager(n_queues);
1379
1380 _gc_tracer_stw->initialize();
1381 }
1382
1383 PartialArrayStateManager* G1CollectedHeap::partial_array_state_manager() const {
1384 return _partial_array_state_manager;
1385 }
1386
1387 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1388 size_t size,
1389 size_t translation_factor) {
1390 size_t preferred_page_size = os::page_size_for_region_unaligned(size, 1);
1391
1392 // When a page size is given we don't want to mix large
1393 // and normal pages. If the size is not a multiple of the
1394 // page size it will be aligned up to achieve this.
1395 size_t alignment = os::vm_allocation_granularity();
1396 if (preferred_page_size != os::vm_page_size()) {
1397 alignment = MAX2(preferred_page_size, alignment);
1398 size = align_up(size, alignment);
1399 }
1400
1401 // Allocate a new reserved space, preferring to use large pages.
1402 ReservedSpace rs = MemoryReserver::reserve(size,
1403 alignment,
1404 preferred_page_size,
1405 mtGC);
1406
1407 size_t page_size = rs.page_size();
1408 G1RegionToSpaceMapper* result =
1409 G1RegionToSpaceMapper::create_mapper(rs,
1410 size,
1411 page_size,
1412 G1HeapRegion::GrainBytes,
1413 translation_factor,
1414 mtGC);
1415
1416 os::trace_page_sizes_for_requested_size(description,
1417 size,
1418 preferred_page_size,
1419 rs.base(),
1420 rs.size(),
1421 page_size);
1422
1423 return result;
1424 }
1425
1426 jint G1CollectedHeap::initialize_concurrent_refinement() {
1427 jint ecode = JNI_OK;
1428 _cr = G1ConcurrentRefine::create(this, &ecode);
1429 return ecode;
1430 }
1431
1432 jint G1CollectedHeap::initialize_service_thread() {
1433 _service_thread = new G1ServiceThread();
1434 if (_service_thread->osthread() == nullptr) {
1435 vm_shutdown_during_initialization("Could not create G1ServiceThread");
1436 return JNI_ENOMEM;
1437 }
1438 return JNI_OK;
1439 }
1440
1441 jint G1CollectedHeap::initialize() {
1442
1443 if (!os::is_thread_cpu_time_supported()) {
1444 vm_exit_during_initialization("G1 requires cpu time gathering support");
1445 }
1446 // Necessary to satisfy locking discipline assertions.
1447
1448 MutexLocker x(Heap_lock);
1449
1450 // While there are no constraints in the GC code that HeapWordSize
1451 // be any particular value, there are multiple other areas in the
1452 // system which believe this to be true (e.g. oop->object_size in some
1453 // cases incorrectly returns the size in wordSize units rather than
1454 // HeapWordSize).
1455 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
1456
1457 size_t init_byte_size = InitialHeapSize;
1458 size_t reserved_byte_size = G1Arguments::heap_reserved_size_bytes();
1459
1460 // Ensure that the sizes are properly aligned.
1461 Universe::check_alignment(init_byte_size, G1HeapRegion::GrainBytes, "g1 heap");
1462 Universe::check_alignment(reserved_byte_size, G1HeapRegion::GrainBytes, "g1 heap");
1463 Universe::check_alignment(reserved_byte_size, HeapAlignment, "g1 heap");
1464
1465 // Reserve the maximum.
1466
1467 // When compressed oops are enabled, the preferred heap base
1468 // is calculated by subtracting the requested size from the
1469 // 32Gb boundary and using the result as the base address for
1470 // heap reservation. If the requested size is not aligned to
1471 // G1HeapRegion::GrainBytes (i.e. the alignment that is passed
1472 // into the ReservedHeapSpace constructor) then the actual
1473 // base of the reserved heap may end up differing from the
1474 // address that was requested (i.e. the preferred heap base).
1475 // If this happens then we could end up using a non-optimal
1476 // compressed oops mode.
1477
1478 ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_byte_size,
1479 HeapAlignment);
1480
1481 initialize_reserved_region(heap_rs);
1482
1483 // Create the barrier set for the entire reserved region.
1484 G1CardTable* card_table = new G1CardTable(_reserved);
1485 G1CardTable* refinement_table = new G1CardTable(_reserved);
1486
1487 G1BarrierSet* bs = new G1BarrierSet(card_table, refinement_table);
1488 assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
1489
1490 // Create space mappers.
1491 size_t page_size = heap_rs.page_size();
1492 G1RegionToSpaceMapper* heap_storage =
1493 G1RegionToSpaceMapper::create_mapper(heap_rs,
1494 heap_rs.size(),
1495 page_size,
1496 G1HeapRegion::GrainBytes,
1497 1,
1498 mtJavaHeap);
1499 if(heap_storage == nullptr) {
1500 vm_shutdown_during_initialization("Could not initialize G1 heap");
1501 return JNI_ERR;
1502 }
1503
1504 os::trace_page_sizes("Heap",
1505 min_capacity(),
1506 reserved_byte_size,
1507 heap_rs.base(),
1508 heap_rs.size(),
1509 page_size);
1510 heap_storage->set_mapping_changed_listener(&_listener);
1511
1512 // Create storage for the BOT, card table and the bitmap.
1513 G1RegionToSpaceMapper* bot_storage =
1514 create_aux_memory_mapper("Block Offset Table",
1515 G1BlockOffsetTable::compute_size(heap_rs.size() / HeapWordSize),
1516 G1BlockOffsetTable::heap_map_factor());
1517
1518 G1RegionToSpaceMapper* cardtable_storage =
1519 create_aux_memory_mapper("Card Table",
1520 G1CardTable::compute_size(heap_rs.size() / HeapWordSize),
1521 G1CardTable::heap_map_factor());
1522
1523 G1RegionToSpaceMapper* refinement_cards_storage =
1524 create_aux_memory_mapper("Refinement Card Table",
1525 G1CardTable::compute_size(heap_rs.size() / HeapWordSize),
1526 G1CardTable::heap_map_factor());
1527
1528 size_t bitmap_size = G1CMBitMap::compute_size(heap_rs.size());
1529 G1RegionToSpaceMapper* bitmap_storage =
1530 create_aux_memory_mapper("Mark Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
1531
1532 _hrm.initialize(heap_storage, bitmap_storage, bot_storage, cardtable_storage, refinement_cards_storage);
1533 card_table->initialize(cardtable_storage);
1534 refinement_table->initialize(refinement_cards_storage);
1535
1536 BarrierSet::set_barrier_set(bs);
1537
1538 {
1539 G1SATBMarkQueueSet& satbqs = bs->satb_mark_queue_set();
1540 satbqs.set_process_completed_buffers_threshold(G1SATBProcessCompletedThreshold);
1541 satbqs.set_buffer_enqueue_threshold_percentage(G1SATBBufferEnqueueingThresholdPercent);
1542 }
1543
1544 // 6843694 - ensure that the maximum region index can fit
1545 // in the remembered set structures.
1546 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1547 guarantee((max_num_regions() - 1) <= max_region_idx, "too many regions");
1548
1549 // The G1FromCardCache reserves card with value 0 as "invalid", so the heap must not
1550 // start within the first card.
1551 guarantee((uintptr_t)(heap_rs.base()) >= G1CardTable::card_size(), "Java heap must not start within the first card.");
1552 G1FromCardCache::initialize(max_num_regions());
1553 // Also create a G1 rem set.
1554 _rem_set = new G1RemSet(this);
1555 _rem_set->initialize(max_num_regions());
1556
1557 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1558 guarantee(G1HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1559 guarantee(G1HeapRegion::CardsPerRegion < max_cards_per_region,
1560 "too many cards per region");
1561
1562 G1HeapRegionRemSet::initialize(_reserved);
1563
1564 G1FreeRegionList::set_unrealistically_long_length(max_num_regions() + 1);
1565
1566 _bot = new G1BlockOffsetTable(reserved(), bot_storage);
1567
1568 {
1569 size_t granularity = G1HeapRegion::GrainBytes;
1570
1571 _region_attr.initialize(reserved(), granularity);
1572 }
1573
1574 _workers = new WorkerThreads("GC Thread", ParallelGCThreads);
1575 if (_workers == nullptr) {
1576 return JNI_ENOMEM;
1577 }
1578 _workers->initialize_workers();
1579
1580 _numa->set_region_info(G1HeapRegion::GrainBytes, page_size);
1581
1582 // Create the G1ConcurrentMark data structure and thread.
1583 // (Must do this late, so that "max_[reserved_]regions" is defined.)
1584 _cm = new G1ConcurrentMark(this, bitmap_storage);
1585
1586 // Now expand into the initial heap size.
1587 if (!expand(init_byte_size, _workers)) {
1588 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1589 return JNI_ENOMEM;
1590 }
1591
1592 // Perform any initialization actions delegated to the policy.
1593 policy()->init(this, &_collection_set);
1594
1595 jint ecode = initialize_concurrent_refinement();
1596 if (ecode != JNI_OK) {
1597 return ecode;
1598 }
1599
1600 ecode = initialize_service_thread();
1601 if (ecode != JNI_OK) {
1602 return ecode;
1603 }
1604
1605 // Create and schedule the periodic gc task on the service thread.
1606 _periodic_gc_task = new G1PeriodicGCTask("Periodic GC Task");
1607 _service_thread->register_task(_periodic_gc_task);
1608
1609 _free_arena_memory_task = new G1MonotonicArenaFreeMemoryTask("Card Set Free Memory Task");
1610 _service_thread->register_task(_free_arena_memory_task);
1611
1612 if (policy()->use_adaptive_young_list_length()) {
1613 _revise_young_length_task = new G1ReviseYoungLengthTask("Revise Young Length List Task");
1614 _service_thread->register_task(_revise_young_length_task);
1615 }
1616
1617 // Here we allocate the dummy G1HeapRegion that is required by the
1618 // G1AllocRegion class.
1619 G1HeapRegion* dummy_region = _hrm.get_dummy_region();
1620
1621 // We'll re-use the same region whether the alloc region will
1622 // require BOT updates or not and, if it doesn't, then a non-young
1623 // region will complain that it cannot support allocations without
1624 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1625 dummy_region->set_eden();
1626 // Make sure it's full.
1627 dummy_region->set_top(dummy_region->end());
1628 G1AllocRegion::setup(this, dummy_region);
1629
1630 _allocator->init_mutator_alloc_regions();
1631
1632 // Do create of the monitoring and management support so that
1633 // values in the heap have been properly initialized.
1634 _monitoring_support = new G1MonitoringSupport(this);
1635
1636 _collection_set.initialize(max_num_regions());
1637
1638 start_new_collection_set();
1639
1640 allocation_failure_injector()->reset();
1641
1642 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
1643 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_conc_mark);
1644 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_conc_refine);
1645 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_conc_refine_control);
1646 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_service);
1647
1648 G1InitLogger::print();
1649
1650 FullGCForwarding::initialize(_reserved);
1651
1652 return JNI_OK;
1653 }
1654
1655 void G1CollectedHeap::stop() {
1656 // Stop all concurrent threads. We do this to make sure these threads
1657 // do not continue to execute and access resources (e.g. logging)
1658 // that are destroyed during shutdown.
1659 _cr->stop();
1660 _service_thread->stop();
1661 _cm->stop();
1662 }
1663
1664 void G1CollectedHeap::safepoint_synchronize_begin() {
1665 SuspendibleThreadSet::synchronize();
1666
1667 _last_synchronized_start = os::elapsed_counter();
1668 }
1669
1670 void G1CollectedHeap::safepoint_synchronize_end() {
1671 jlong now = os::elapsed_counter();
1672 jlong synchronize_duration = now - _last_synchronized_start;
1673
1674 if (_last_safepoint_refinement_epoch == _refinement_epoch) {
1675 _yield_duration_in_refinement_epoch += synchronize_duration;
1676 } else {
1677 _last_refinement_epoch_start = now;
1678 _last_safepoint_refinement_epoch = _refinement_epoch;
1679 _yield_duration_in_refinement_epoch = 0;
1680 }
1681
1682 SuspendibleThreadSet::desynchronize();
1683 }
1684
1685 void G1CollectedHeap::set_last_refinement_epoch_start(jlong epoch_start, jlong last_yield_duration) {
1686 _last_refinement_epoch_start = epoch_start;
1687 guarantee(_yield_duration_in_refinement_epoch >= last_yield_duration, "should be");
1688 _yield_duration_in_refinement_epoch -= last_yield_duration;
1689 }
1690
1691 jlong G1CollectedHeap::yield_duration_in_refinement_epoch() {
1692 return _yield_duration_in_refinement_epoch;
1693 }
1694
1695 void G1CollectedHeap::post_initialize() {
1696 CollectedHeap::post_initialize();
1697 ref_processing_init();
1698 }
1699
1700 void G1CollectedHeap::ref_processing_init() {
1701 // Reference processing in G1 currently works as follows:
1702 //
1703 // * There are two reference processor instances. One is
1704 // used to record and process discovered references
1705 // during concurrent marking; the other is used to
1706 // record and process references during STW pauses
1707 // (both full and incremental).
1708 // * Both ref processors need to 'span' the entire heap as
1709 // the regions in the collection set may be dotted around.
1710 //
1711 // * For the concurrent marking ref processor:
1712 // * Reference discovery is enabled at concurrent start.
1713 // * Reference discovery is disabled and the discovered
1714 // references processed etc during remarking.
1715 // * Reference discovery is MT (see below).
1716 // * Reference discovery requires a barrier (see below).
1717 // * Reference processing may or may not be MT
1718 // (depending on the value of ParallelGCThreads).
1719 // * A full GC disables reference discovery by the CM
1720 // ref processor and abandons any entries on it's
1721 // discovered lists.
1722 //
1723 // * For the STW processor:
1724 // * Non MT discovery is enabled at the start of a full GC.
1725 // * Processing and enqueueing during a full GC is non-MT.
1726 // * During a full GC, references are processed after marking.
1727 //
1728 // * Discovery (may or may not be MT) is enabled at the start
1729 // of an incremental evacuation pause.
1730 // * References are processed near the end of a STW evacuation pause.
1731 // * For both types of GC:
1732 // * Discovery is atomic - i.e. not concurrent.
1733 // * Reference discovery will not need a barrier.
1734
1735 _is_alive_closure_cm.initialize(concurrent_mark());
1736 // Concurrent Mark ref processor
1737 _ref_processor_cm =
1738 new ReferenceProcessor(&_is_subject_to_discovery_cm,
1739 ParallelGCThreads, // degree of mt processing
1740 // We discover with the gc worker threads during Remark, so both
1741 // thread counts must be considered for discovery.
1742 MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
1743 true, // Reference discovery is concurrent
1744 &_is_alive_closure_cm); // is alive closure
1745
1746 // STW ref processor
1747 _ref_processor_stw =
1748 new ReferenceProcessor(&_is_subject_to_discovery_stw,
1749 ParallelGCThreads, // degree of mt processing
1750 ParallelGCThreads, // degree of mt discovery
1751 false, // Reference discovery is not concurrent
1752 &_is_alive_closure_stw); // is alive closure
1753 }
1754
1755 size_t G1CollectedHeap::capacity() const {
1756 return _hrm.num_committed_regions() * G1HeapRegion::GrainBytes;
1757 }
1758
1759 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1760 return _hrm.total_free_bytes();
1761 }
1762
1763 // Computes the sum of the storage used by the various regions.
1764 size_t G1CollectedHeap::used() const {
1765 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1766 return result;
1767 }
1768
1769 size_t G1CollectedHeap::used_unlocked() const {
1770 return _summary_bytes_used;
1771 }
1772
1773 class SumUsedClosure: public G1HeapRegionClosure {
1774 size_t _used;
1775 public:
1776 SumUsedClosure() : _used(0) {}
1777 bool do_heap_region(G1HeapRegion* r) {
1778 _used += r->used();
1779 return false;
1780 }
1781 size_t result() { return _used; }
1782 };
1783
1784 size_t G1CollectedHeap::recalculate_used() const {
1785 SumUsedClosure blk;
1786 heap_region_iterate(&blk);
1787 return blk.result();
1788 }
1789
1790 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1791 return GCCause::is_user_requested_gc(cause) && ExplicitGCInvokesConcurrent;
1792 }
1793
1794 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
1795 switch (cause) {
1796 case GCCause::_g1_humongous_allocation: return true;
1797 case GCCause::_g1_periodic_collection: return G1PeriodicGCInvokesConcurrent;
1798 case GCCause::_wb_breakpoint: return true;
1799 case GCCause::_codecache_GC_aggressive: return true;
1800 case GCCause::_codecache_GC_threshold: return true;
1801 default: return is_user_requested_concurrent_full_gc(cause);
1802 }
1803 }
1804
1805 void G1CollectedHeap::increment_old_marking_cycles_started() {
1806 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
1807 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
1808 "Wrong marking cycle count (started: %d, completed: %d)",
1809 _old_marking_cycles_started, _old_marking_cycles_completed);
1810
1811 _old_marking_cycles_started++;
1812 }
1813
1814 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent,
1815 bool whole_heap_examined) {
1816 MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
1817
1818 // We assume that if concurrent == true, then the caller is a
1819 // concurrent thread that was joined the Suspendible Thread
1820 // Set. If there's ever a cheap way to check this, we should add an
1821 // assert here.
1822
1823 // Given that this method is called at the end of a Full GC or of a
1824 // concurrent cycle, and those can be nested (i.e., a Full GC can
1825 // interrupt a concurrent cycle), the number of full collections
1826 // completed should be either one (in the case where there was no
1827 // nesting) or two (when a Full GC interrupted a concurrent cycle)
1828 // behind the number of full collections started.
1829
1830 // This is the case for the inner caller, i.e. a Full GC.
1831 assert(concurrent ||
1832 (_old_marking_cycles_started == _old_marking_cycles_completed + 1) ||
1833 (_old_marking_cycles_started == _old_marking_cycles_completed + 2),
1834 "for inner caller (Full GC): _old_marking_cycles_started = %u "
1835 "is inconsistent with _old_marking_cycles_completed = %u",
1836 _old_marking_cycles_started, _old_marking_cycles_completed);
1837
1838 // This is the case for the outer caller, i.e. the concurrent cycle.
1839 assert(!concurrent ||
1840 (_old_marking_cycles_started == _old_marking_cycles_completed + 1),
1841 "for outer caller (concurrent cycle): "
1842 "_old_marking_cycles_started = %u "
1843 "is inconsistent with _old_marking_cycles_completed = %u",
1844 _old_marking_cycles_started, _old_marking_cycles_completed);
1845
1846 _old_marking_cycles_completed += 1;
1847 if (whole_heap_examined) {
1848 // Signal that we have completed a visit to all live objects.
1849 record_whole_heap_examined_timestamp();
1850 }
1851
1852 // We need to tell G1ConcurrentMark to update the state before
1853 // we wake up any waiters (especially when ExplicitInvokesConcurrent
1854 // is set) so that if a waiter requests another System.gc() it doesn't
1855 // incorrectly see that a marking cycle is still in progress.
1856 if (concurrent) {
1857 _cm->notify_concurrent_cycle_completed();
1858 }
1859
1860 // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
1861 // for a full GC to finish that their wait is over.
1862 ml.notify_all();
1863 }
1864
1865 void G1CollectedHeap::collect(GCCause::Cause cause) {
1866 try_collect(0 /* allocation_word_size */, cause, collection_counters(this));
1867 }
1868
1869 // Return true if (x < y) with allowance for wraparound.
1870 static bool gc_counter_less_than(uint x, uint y) {
1871 return (x - y) > (UINT_MAX/2);
1872 }
1873
1874 // LOG_COLLECT_CONCURRENTLY(cause, msg, args...)
1875 // Macro so msg printing is format-checked.
1876 #define LOG_COLLECT_CONCURRENTLY(cause, ...) \
1877 do { \
1878 LogTarget(Trace, gc) LOG_COLLECT_CONCURRENTLY_lt; \
1879 if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) { \
1880 ResourceMark rm; /* For thread name. */ \
1881 LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
1882 LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
1883 Thread::current()->name(), \
1884 GCCause::to_string(cause)); \
1885 LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__); \
1886 } \
1887 } while (0)
1888
1889 #define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
1890 LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
1891
1892 bool G1CollectedHeap::wait_full_mark_finished(GCCause::Cause cause,
1893 uint old_marking_started_before,
1894 uint old_marking_started_after,
1895 uint old_marking_completed_after) {
1896 // Request is finished if a full collection (concurrent or stw)
1897 // was started after this request and has completed, e.g.
1898 // started_before < completed_after.
1899 if (gc_counter_less_than(old_marking_started_before,
1900 old_marking_completed_after)) {
1901 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
1902 return true;
1903 }
1904
1905 if (old_marking_started_after != old_marking_completed_after) {
1906 // If there is an in-progress cycle (possibly started by us), then
1907 // wait for that cycle to complete, e.g.
1908 // while completed_now < started_after.
1909 LOG_COLLECT_CONCURRENTLY(cause, "wait");
1910 MonitorLocker ml(G1OldGCCount_lock);
1911 while (gc_counter_less_than(_old_marking_cycles_completed,
1912 old_marking_started_after)) {
1913 ml.wait();
1914 }
1915 // Request is finished if the collection we just waited for was
1916 // started after this request.
1917 if (old_marking_started_before != old_marking_started_after) {
1918 LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
1919 return true;
1920 }
1921 }
1922 return false;
1923 }
1924
1925 // After calling wait_full_mark_finished(), this method determines whether we
1926 // previously failed for ordinary reasons (concurrent cycle in progress, whitebox
1927 // has control). Returns if this has been such an ordinary reason.
1928 static bool should_retry_vm_op(GCCause::Cause cause,
1929 VM_G1TryInitiateConcMark* op) {
1930 if (op->cycle_already_in_progress()) {
1931 // If VMOp failed because a cycle was already in progress, it
1932 // is now complete. But it didn't finish this user-requested
1933 // GC, so try again.
1934 LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
1935 return true;
1936 } else if (op->whitebox_attached()) {
1937 // If WhiteBox wants control, wait for notification of a state
1938 // change in the controller, then try again. Don't wait for
1939 // release of control, since collections may complete while in
1940 // control. Note: This won't recognize a STW full collection
1941 // while waiting; we can't wait on multiple monitors.
1942 LOG_COLLECT_CONCURRENTLY(cause, "whitebox control stall");
1943 MonitorLocker ml(ConcurrentGCBreakpoints::monitor());
1944 if (ConcurrentGCBreakpoints::is_controlled()) {
1945 ml.wait();
1946 }
1947 return true;
1948 }
1949 return false;
1950 }
1951
1952 bool G1CollectedHeap::try_collect_concurrently(size_t allocation_word_size,
1953 GCCause::Cause cause,
1954 uint gc_counter,
1955 uint old_marking_started_before) {
1956 assert_heap_not_locked();
1957 assert(should_do_concurrent_full_gc(cause),
1958 "Non-concurrent cause %s", GCCause::to_string(cause));
1959
1960 for (uint i = 1; true; ++i) {
1961 // Try to schedule concurrent start evacuation pause that will
1962 // start a concurrent cycle.
1963 LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
1964 VM_G1TryInitiateConcMark op(allocation_word_size, gc_counter, cause);
1965 VMThread::execute(&op);
1966
1967 // Request is trivially finished.
1968 if (cause == GCCause::_g1_periodic_collection) {
1969 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded());
1970 return op.gc_succeeded();
1971 }
1972
1973 // If VMOp skipped initiating concurrent marking cycle because
1974 // we're shutting down, then we're done.
1975 if (op.is_shutting_down()) {
1976 LOG_COLLECT_CONCURRENTLY(cause, "skipped: terminating");
1977 return false;
1978 }
1979
1980 // Lock to get consistent set of values.
1981 uint old_marking_started_after;
1982 uint old_marking_completed_after;
1983 {
1984 MutexLocker ml(Heap_lock);
1985 // Update gc_counter for retrying VMOp if needed. Captured here to be
1986 // consistent with the values we use below for termination tests. If
1987 // a retry is needed after a possible wait, and another collection
1988 // occurs in the meantime, it will cause our retry to be skipped and
1989 // we'll recheck for termination with updated conditions from that
1990 // more recent collection. That's what we want, rather than having
1991 // our retry possibly perform an unnecessary collection.
1992 gc_counter = total_collections();
1993 old_marking_started_after = _old_marking_cycles_started;
1994 old_marking_completed_after = _old_marking_cycles_completed;
1995 }
1996
1997 if (cause == GCCause::_wb_breakpoint) {
1998 if (op.gc_succeeded()) {
1999 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2000 return true;
2001 }
2002 // When _wb_breakpoint there can't be another cycle or deferred.
2003 assert(!op.cycle_already_in_progress(), "invariant");
2004 assert(!op.whitebox_attached(), "invariant");
2005 // Concurrent cycle attempt might have been cancelled by some other
2006 // collection, so retry. Unlike other cases below, we want to retry
2007 // even if cancelled by a STW full collection, because we really want
2008 // to start a concurrent cycle.
2009 if (old_marking_started_before != old_marking_started_after) {
2010 LOG_COLLECT_CONCURRENTLY(cause, "ignoring STW full GC");
2011 old_marking_started_before = old_marking_started_after;
2012 }
2013 } else if (GCCause::is_codecache_requested_gc(cause)) {
2014 assert(allocation_word_size == 0, "must be");
2015 // For a CodeCache requested GC, before marking, progress is ensured as the
2016 // following Remark pause unloads code (and signals the requester such).
2017 // Otherwise we must ensure that it is restarted.
2018 //
2019 // For a CodeCache requested GC, a successful GC operation means that
2020 // (1) marking is in progress. I.e. the VMOp started the marking or a
2021 // Remark pause is pending from a different VM op; we will potentially
2022 // abort a mixed phase if needed.
2023 // (2) a new cycle was started (by this thread or some other), or
2024 // (3) a Full GC was performed.
2025 //
2026 // Cases (2) and (3) are detected together by a change to
2027 // _old_marking_cycles_started.
2028 //
2029 // Compared to other "automatic" GCs (see below), we do not consider being
2030 // in whitebox as sufficient too because we might be anywhere within that
2031 // cycle and we need to make progress.
2032 if (op.mark_in_progress() ||
2033 (old_marking_started_before != old_marking_started_after)) {
2034 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2035 return true;
2036 }
2037
2038 if (wait_full_mark_finished(cause,
2039 old_marking_started_before,
2040 old_marking_started_after,
2041 old_marking_completed_after)) {
2042 return true;
2043 }
2044
2045 if (should_retry_vm_op(cause, &op)) {
2046 continue;
2047 }
2048 } else if (!GCCause::is_user_requested_gc(cause)) {
2049 assert(cause == GCCause::_g1_humongous_allocation ||
2050 cause == GCCause::_g1_periodic_collection,
2051 "Unsupported cause %s", GCCause::to_string(cause));
2052
2053 // For an "automatic" (not user-requested) collection, we just need to
2054 // ensure that progress is made.
2055 //
2056 // Request is finished if any of
2057 // (1) the VMOp successfully performed a GC,
2058 // (2) a concurrent cycle was already in progress,
2059 // (3) whitebox is controlling concurrent cycles,
2060 // (4) a new cycle was started (by this thread or some other), or
2061 // (5) a Full GC was performed.
2062 // Cases (4) and (5) are detected together by a change to
2063 // _old_marking_cycles_started.
2064 if (op.gc_succeeded() ||
2065 op.cycle_already_in_progress() ||
2066 op.whitebox_attached() ||
2067 (old_marking_started_before != old_marking_started_after)) {
2068 LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
2069 return true;
2070 }
2071 } else { // User-requested GC.
2072 // For a user-requested collection, we want to ensure that a complete
2073 // full collection has been performed before returning, but without
2074 // waiting for more than needed.
2075
2076 // For user-requested GCs (unlike non-UR), a successful VMOp implies a
2077 // new cycle was started. That's good, because it's not clear what we
2078 // should do otherwise. Trying again just does back to back GCs.
2079 // Can't wait for someone else to start a cycle. And returning fails
2080 // to meet the goal of ensuring a full collection was performed.
2081 assert(!op.gc_succeeded() ||
2082 (old_marking_started_before != old_marking_started_after),
2083 "invariant: succeeded %s, started before %u, started after %u",
2084 BOOL_TO_STR(op.gc_succeeded()),
2085 old_marking_started_before, old_marking_started_after);
2086
2087 if (wait_full_mark_finished(cause,
2088 old_marking_started_before,
2089 old_marking_started_after,
2090 old_marking_completed_after)) {
2091 return true;
2092 }
2093
2094 // If VMOp was successful then it started a new cycle that the above
2095 // wait &etc should have recognized as finishing this request. This
2096 // differs from a non-user-request, where gc_succeeded does not imply
2097 // a new cycle was started.
2098 assert(!op.gc_succeeded(), "invariant");
2099
2100 if (should_retry_vm_op(cause, &op)) {
2101 continue;
2102 }
2103 }
2104
2105 // Collection failed and should be retried.
2106 assert(op.transient_failure(), "invariant");
2107
2108 LOG_COLLECT_CONCURRENTLY(cause, "retry");
2109 }
2110 }
2111
2112 bool G1CollectedHeap::try_collect(size_t allocation_word_size,
2113 GCCause::Cause cause,
2114 const G1GCCounters& counters_before) {
2115 if (should_do_concurrent_full_gc(cause)) {
2116 return try_collect_concurrently(allocation_word_size,
2117 cause,
2118 counters_before.total_collections(),
2119 counters_before.old_marking_cycles_started());
2120 } else if (cause == GCCause::_wb_young_gc
2121 DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
2122
2123 assert(allocation_word_size == 0, "must be");
2124 // Schedule a standard evacuation pause. We're setting word_size
2125 // to 0 which means that we are not requesting a post-GC allocation.
2126 VM_G1CollectForAllocation op(0, /* word_size */
2127 counters_before.total_collections(),
2128 cause);
2129 VMThread::execute(&op);
2130 return op.gc_succeeded();
2131 } else {
2132 // The only path to get here is because of a periodic collection using a Full GC
2133 // or WhiteBox full gc.
2134 assert(allocation_word_size == 0, "must be");
2135 // Schedule a Full GC.
2136 VM_G1CollectFull op(counters_before.total_collections(),
2137 counters_before.total_full_collections(),
2138 cause);
2139 VMThread::execute(&op);
2140 return op.gc_succeeded();
2141 }
2142 }
2143
2144 void G1CollectedHeap::start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause) {
2145 GCCauseSetter x(this, gc_cause);
2146
2147 // At this point we are supposed to start a concurrent cycle. We
2148 // will do so if one is not already in progress.
2149 bool should_start = policy()->force_concurrent_start_if_outside_cycle(gc_cause);
2150 if (should_start) {
2151 do_collection_pause_at_safepoint(0 /* allocation_word_size */);
2152 }
2153 }
2154
2155 bool G1CollectedHeap::is_in(const void* p) const {
2156 return is_in_reserved(p) && _hrm.is_available(addr_to_region(p));
2157 }
2158
2159 // Iteration functions.
2160
2161 // Iterates an ObjectClosure over all objects within a G1HeapRegion.
2162
2163 class IterateObjectClosureRegionClosure: public G1HeapRegionClosure {
2164 ObjectClosure* _cl;
2165 public:
2166 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2167 bool do_heap_region(G1HeapRegion* r) {
2168 if (!r->is_continues_humongous()) {
2169 r->object_iterate(_cl);
2170 }
2171 return false;
2172 }
2173 };
2174
2175 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2176 IterateObjectClosureRegionClosure blk(cl);
2177 heap_region_iterate(&blk);
2178 }
2179
2180 class G1ParallelObjectIterator : public ParallelObjectIteratorImpl {
2181 private:
2182 G1CollectedHeap* _heap;
2183 G1HeapRegionClaimer _claimer;
2184
2185 public:
2186 G1ParallelObjectIterator(uint thread_num) :
2187 _heap(G1CollectedHeap::heap()),
2188 _claimer(thread_num == 0 ? G1CollectedHeap::heap()->workers()->active_workers() : thread_num) {}
2189
2190 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
2191 _heap->object_iterate_parallel(cl, worker_id, &_claimer);
2192 }
2193 };
2194
2195 ParallelObjectIteratorImpl* G1CollectedHeap::parallel_object_iterator(uint thread_num) {
2196 return new G1ParallelObjectIterator(thread_num);
2197 }
2198
2199 void G1CollectedHeap::object_iterate_parallel(ObjectClosure* cl, uint worker_id, G1HeapRegionClaimer* claimer) {
2200 IterateObjectClosureRegionClosure blk(cl);
2201 heap_region_par_iterate_from_worker_offset(&blk, claimer, worker_id);
2202 }
2203
2204 void G1CollectedHeap::keep_alive(oop obj) {
2205 G1BarrierSet::enqueue_preloaded(obj);
2206 }
2207
2208 void G1CollectedHeap::heap_region_iterate(G1HeapRegionClosure* cl) const {
2209 _hrm.iterate(cl);
2210 }
2211
2212 void G1CollectedHeap::heap_region_iterate(G1HeapRegionIndexClosure* cl) const {
2213 _hrm.iterate(cl);
2214 }
2215
2216 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(G1HeapRegionClosure* cl,
2217 G1HeapRegionClaimer *hrclaimer,
2218 uint worker_id) const {
2219 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2220 }
2221
2222 void G1CollectedHeap::heap_region_par_iterate_from_start(G1HeapRegionClosure* cl,
2223 G1HeapRegionClaimer *hrclaimer) const {
2224 _hrm.par_iterate(cl, hrclaimer, 0);
2225 }
2226
2227 void G1CollectedHeap::collection_set_iterate_all(G1HeapRegionClosure* cl) {
2228 _collection_set.iterate(cl);
2229 }
2230
2231 void G1CollectedHeap::collection_set_par_iterate_all(G1HeapRegionClosure* cl,
2232 G1HeapRegionClaimer* hr_claimer,
2233 uint worker_id) {
2234 _collection_set.par_iterate(cl, hr_claimer, worker_id);
2235 }
2236
2237 void G1CollectedHeap::collection_set_iterate_increment_from(G1HeapRegionClosure *cl,
2238 G1HeapRegionClaimer* hr_claimer,
2239 uint worker_id) {
2240 _collection_set.iterate_incremental_part_from(cl, hr_claimer, worker_id);
2241 }
2242
2243 void G1CollectedHeap::par_iterate_regions_array(G1HeapRegionClosure* cl,
2244 G1HeapRegionClaimer* hr_claimer,
2245 const uint regions[],
2246 size_t length,
2247 uint worker_id) const {
2248 assert_at_safepoint();
2249 if (length == 0) {
2250 return;
2251 }
2252 uint total_workers = workers()->active_workers();
2253
2254 size_t start_pos = (worker_id * length) / total_workers;
2255 size_t cur_pos = start_pos;
2256
2257 do {
2258 uint region_idx = regions[cur_pos];
2259 if (hr_claimer == nullptr || hr_claimer->claim_region(region_idx)) {
2260 G1HeapRegion* r = region_at(region_idx);
2261 bool result = cl->do_heap_region(r);
2262 guarantee(!result, "Must not cancel iteration");
2263 }
2264
2265 cur_pos++;
2266 if (cur_pos == length) {
2267 cur_pos = 0;
2268 }
2269 } while (cur_pos != start_pos);
2270 }
2271
2272 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2273 G1HeapRegion* hr = heap_region_containing(addr);
2274 // The CollectedHeap API requires us to not fail for any given address within
2275 // the heap. G1HeapRegion::block_start() has been optimized to not accept addresses
2276 // outside of the allocated area.
2277 if (addr >= hr->top()) {
2278 return nullptr;
2279 }
2280 return hr->block_start(addr);
2281 }
2282
2283 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2284 G1HeapRegion* hr = heap_region_containing(addr);
2285 return hr->block_is_obj(addr, hr->parsable_bottom_acquire());
2286 }
2287
2288 size_t G1CollectedHeap::tlab_capacity() const {
2289 return eden_target_length() * G1HeapRegion::GrainBytes;
2290 }
2291
2292 size_t G1CollectedHeap::tlab_used() const {
2293 return _eden.length() * G1HeapRegion::GrainBytes;
2294 }
2295
2296 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2297 // must be equal to the humongous object limit.
2298 size_t G1CollectedHeap::max_tlab_size() const {
2299 return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
2300 }
2301
2302 size_t G1CollectedHeap::unsafe_max_tlab_alloc() const {
2303 return _allocator->unsafe_max_tlab_alloc();
2304 }
2305
2306 size_t G1CollectedHeap::max_capacity() const {
2307 return max_num_regions() * G1HeapRegion::GrainBytes;
2308 }
2309
2310 size_t G1CollectedHeap::min_capacity() const {
2311 return MinHeapSize;
2312 }
2313
2314 void G1CollectedHeap::prepare_for_verify() {
2315 _verifier->prepare_for_verify();
2316 }
2317
2318 void G1CollectedHeap::verify(VerifyOption vo) {
2319 _verifier->verify(vo);
2320 }
2321
2322 bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2323 return true;
2324 }
2325
2326 class G1PrintRegionClosure: public G1HeapRegionClosure {
2327 outputStream* _st;
2328 public:
2329 G1PrintRegionClosure(outputStream* st) : _st(st) {}
2330 bool do_heap_region(G1HeapRegion* r) {
2331 r->print_on(_st);
2332 return false;
2333 }
2334 };
2335
2336 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2337 const G1HeapRegion* hr,
2338 const VerifyOption vo) const {
2339 switch (vo) {
2340 case VerifyOption::G1UseConcMarking: return is_obj_dead(obj, hr);
2341 case VerifyOption::G1UseFullMarking: return is_obj_dead_full(obj, hr);
2342 default: ShouldNotReachHere();
2343 }
2344 return false; // keep some compilers happy
2345 }
2346
2347 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2348 const VerifyOption vo) const {
2349 switch (vo) {
2350 case VerifyOption::G1UseConcMarking: return is_obj_dead(obj);
2351 case VerifyOption::G1UseFullMarking: return is_obj_dead_full(obj);
2352 default: ShouldNotReachHere();
2353 }
2354 return false; // keep some compilers happy
2355 }
2356
2357 void G1CollectedHeap::print_heap_regions() const {
2358 LogTarget(Trace, gc, heap, region) lt;
2359 if (lt.is_enabled()) {
2360 LogStream ls(lt);
2361 print_regions_on(&ls);
2362 }
2363 }
2364
2365 static void print_region_type(outputStream* st, const char* type, uint count, bool last = false) {
2366 st->print("%u %s (%zuM)%s", count, type, count * G1HeapRegion::GrainBytes / M, last ? "\n" : ", ");
2367 }
2368
2369 void G1CollectedHeap::print_heap_on(outputStream* st) const {
2370 size_t heap_used = (Thread::current_or_null_safe() != nullptr &&
2371 Heap_lock->owned_by_self()) ? used() : used_unlocked();
2372 st->print("%-20s", "garbage-first heap");
2373 st->print(" total reserved %zuK, committed %zuK, used %zuK",
2374 _hrm.reserved().byte_size()/K, capacity()/K, heap_used/K);
2375 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ")",
2376 p2i(_hrm.reserved().start()),
2377 p2i(_hrm.reserved().end()));
2378 st->cr();
2379
2380 StreamIndentor si(st, 1);
2381 st->print("region size %zuM, ", G1HeapRegion::GrainBytes / M);
2382 print_region_type(st, "eden", eden_regions_count());
2383 print_region_type(st, "survivor", survivor_regions_count());
2384 print_region_type(st, "old", old_regions_count());
2385 print_region_type(st, "humongous", humongous_regions_count());
2386 print_region_type(st, "free", num_free_regions(), true /* last */);
2387
2388 if (_numa->is_enabled()) {
2389 uint num_nodes = _numa->num_active_nodes();
2390 st->print("remaining free region(s) on each NUMA node: ");
2391 const uint* node_ids = _numa->node_ids();
2392 for (uint node_index = 0; node_index < num_nodes; node_index++) {
2393 uint num_free_regions = _hrm.num_free_regions(node_index);
2394 st->print("%u=%u ", node_ids[node_index], num_free_regions);
2395 }
2396 st->cr();
2397 }
2398 }
2399
2400 void G1CollectedHeap::print_regions_on(outputStream* st) const {
2401 st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
2402 "HS=humongous(starts), HC=humongous(continues), "
2403 "CS=collection set, F=free, "
2404 "TAMS=top-at-mark-start, "
2405 "PB=parsable bottom");
2406 G1PrintRegionClosure blk(st);
2407 heap_region_iterate(&blk);
2408 }
2409
2410 void G1CollectedHeap::print_extended_on(outputStream* st) const {
2411 print_heap_on(st);
2412
2413 // Print the per-region information.
2414 st->cr();
2415 print_regions_on(st);
2416 }
2417
2418 void G1CollectedHeap::print_gc_on(outputStream* st) const {
2419 // Print the per-region information.
2420 print_regions_on(st);
2421 st->cr();
2422
2423 BarrierSet* bs = BarrierSet::barrier_set();
2424 if (bs != nullptr) {
2425 bs->print_on(st);
2426 }
2427
2428 if (_cm != nullptr) {
2429 st->cr();
2430 _cm->print_on(st);
2431 }
2432 }
2433
2434 void G1CollectedHeap::gc_threads_do(ThreadClosure* tc) const {
2435 workers()->threads_do(tc);
2436 _cm->threads_do(tc);
2437 _cr->threads_do(tc);
2438 tc->do_thread(_service_thread);
2439 }
2440
2441 void G1CollectedHeap::print_tracing_info() const {
2442 rem_set()->print_summary_info();
2443 concurrent_mark()->print_summary_info();
2444 }
2445
2446 bool G1CollectedHeap::print_location(outputStream* st, void* addr) const {
2447 return BlockLocationPrinter<G1CollectedHeap>::print_location(st, addr);
2448 }
2449
2450 G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
2451
2452 size_t eden_used_bytes = _monitoring_support->eden_space_used();
2453 size_t survivor_used_bytes = _monitoring_support->survivor_space_used();
2454 size_t old_gen_used_bytes = _monitoring_support->old_gen_used();
2455 size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
2456
2457 size_t eden_capacity_bytes =
2458 (policy()->young_list_target_length() * G1HeapRegion::GrainBytes) - survivor_used_bytes;
2459
2460 VirtualSpaceSummary heap_summary = create_heap_space_summary();
2461 return G1HeapSummary(heap_summary, heap_used, eden_used_bytes, eden_capacity_bytes,
2462 survivor_used_bytes, old_gen_used_bytes, num_committed_regions());
2463 }
2464
2465 G1EvacSummary G1CollectedHeap::create_g1_evac_summary(G1EvacStats* stats) {
2466 return G1EvacSummary(stats->allocated(), stats->wasted(), stats->undo_wasted(),
2467 stats->unused(), stats->used(), stats->region_end_waste(),
2468 stats->regions_filled(), stats->num_plab_filled(),
2469 stats->direct_allocated(), stats->num_direct_allocated(),
2470 stats->failure_used(), stats->failure_waste());
2471 }
2472
2473 void G1CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
2474 const G1HeapSummary& heap_summary = create_g1_heap_summary();
2475 gc_tracer->report_gc_heap_summary(when, heap_summary);
2476
2477 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
2478 gc_tracer->report_metaspace_summary(when, metaspace_summary);
2479 }
2480
2481 void G1CollectedHeap::gc_prologue(bool full) {
2482 // Update common counters.
2483 increment_total_collections(full /* full gc */);
2484 if (full || collector_state()->in_concurrent_start_gc()) {
2485 increment_old_marking_cycles_started();
2486 }
2487 }
2488
2489 void G1CollectedHeap::gc_epilogue(bool full) {
2490 // Update common counters.
2491 if (full) {
2492 // Update the number of full collections that have been completed.
2493 increment_old_marking_cycles_completed(false /* concurrent */, true /* liveness_completed */);
2494 }
2495
2496 #if COMPILER2_OR_JVMCI
2497 assert(DerivedPointerTable::is_empty(), "derived pointer present");
2498 #endif
2499
2500 // We have just completed a GC. Update the soft reference
2501 // policy with the new heap occupancy
2502 Universe::heap()->update_capacity_and_used_at_gc();
2503
2504 _collection_pause_end = Ticks::now();
2505
2506 _free_arena_memory_task->notify_new_stats(&_young_gen_card_set_stats,
2507 &_collection_set_candidates_card_set_stats);
2508
2509 update_perf_counter_cpu_time();
2510 _refinement_epoch++;
2511 }
2512
2513 uint G1CollectedHeap::uncommit_regions(uint region_limit) {
2514 return _hrm.uncommit_inactive_regions(region_limit);
2515 }
2516
2517 bool G1CollectedHeap::has_uncommittable_regions() {
2518 return _hrm.has_inactive_regions();
2519 }
2520
2521 void G1CollectedHeap::uncommit_regions_if_necessary() {
2522 if (has_uncommittable_regions()) {
2523 G1UncommitRegionTask::enqueue();
2524 }
2525 }
2526
2527 void G1CollectedHeap::verify_numa_regions(const char* desc) {
2528 LogTarget(Trace, gc, heap, verify) lt;
2529
2530 if (lt.is_enabled()) {
2531 LogStream ls(lt);
2532 // Iterate all heap regions to print matching between preferred numa id and actual numa id.
2533 G1NodeIndexCheckClosure cl(desc, _numa, &ls);
2534 heap_region_iterate(&cl);
2535 }
2536 }
2537
2538 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
2539 uint gc_count_before,
2540 bool* succeeded,
2541 GCCause::Cause gc_cause) {
2542 assert_heap_not_locked_and_not_at_safepoint();
2543 VM_G1CollectForAllocation op(word_size, gc_count_before, gc_cause);
2544 VMThread::execute(&op);
2545
2546 HeapWord* result = op.result();
2547 *succeeded = op.gc_succeeded();
2548 assert(result == nullptr || *succeeded,
2549 "the result should be null if the VM did not succeed");
2550
2551 assert_heap_not_locked();
2552 return result;
2553 }
2554
2555 void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_mark) {
2556 assert(_cm->is_fully_initialized(), "sanity");
2557 assert(!_cm->in_progress(), "Can not start concurrent operation while in progress");
2558 MutexLocker x(G1CGC_lock, Mutex::_no_safepoint_check_flag);
2559 if (concurrent_operation_is_full_mark) {
2560 _cm->start_full_concurrent_cycle();
2561 } else {
2562 _cm->start_undo_concurrent_cycle();
2563 }
2564 G1CGC_lock->notify();
2565 }
2566
2567 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(G1HeapRegion* r) const {
2568 // We don't nominate objects with many remembered set entries, on
2569 // the assumption that such objects are likely still live.
2570 G1HeapRegionRemSet* rem_set = r->rem_set();
2571
2572 return rem_set->occupancy_less_or_equal_than(G1EagerReclaimRemSetThreshold);
2573 }
2574
2575 #ifndef PRODUCT
2576 void G1CollectedHeap::verify_region_attr_is_remset_tracked() {
2577 class VerifyRegionAttrRemSet : public G1HeapRegionClosure {
2578 public:
2579 virtual bool do_heap_region(G1HeapRegion* r) {
2580 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2581 G1HeapRegionAttr attr = g1h->region_attr(r->bottom());
2582 bool const is_remset_tracked = attr.is_remset_tracked();
2583 assert((r->rem_set()->is_tracked() == is_remset_tracked) ||
2584 (attr.is_new_survivor() && is_remset_tracked),
2585 "Region %u (%s) remset tracking status (%s) different to region attribute (%s)",
2586 r->hrm_index(), r->get_type_str(), BOOL_TO_STR(r->rem_set()->is_tracked()), BOOL_TO_STR(is_remset_tracked));
2587 return false;
2588 }
2589 } cl;
2590 heap_region_iterate(&cl);
2591 }
2592 #endif
2593
2594 void G1CollectedHeap::update_perf_counter_cpu_time() {
2595 assert(Thread::current()->is_VM_thread(),
2596 "Must be called from VM thread to avoid races");
2597 if (!UsePerfData) {
2598 return;
2599 }
2600
2601 // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
2602 // time.
2603 {
2604 ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
2605 // Currently parallel worker threads never terminate (JDK-8081682), so it is
2606 // safe for VMThread to read their CPU times. However, if JDK-8087340 is
2607 // resolved so they terminate, we should rethink if it is still safe.
2608 workers()->threads_do(&tttc);
2609 }
2610
2611 CPUTimeCounters::publish_gc_total_cpu_time();
2612 }
2613
2614 void G1CollectedHeap::start_new_collection_set() {
2615 collection_set()->start();
2616
2617 clear_region_attr();
2618
2619 guarantee(_eden.length() == 0, "eden should have been cleared");
2620 policy()->transfer_survivors_to_cset(survivor());
2621
2622 // We redo the verification but now wrt to the new CSet which
2623 // has just got initialized after the previous CSet was freed.
2624 _cm->verify_no_collection_set_oops();
2625 }
2626
2627 void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyType type) {
2628 if (!VerifyBeforeGC) {
2629 return;
2630 }
2631 if (!G1HeapVerifier::should_verify(type)) {
2632 return;
2633 }
2634 Ticks start = Ticks::now();
2635 _verifier->prepare_for_verify();
2636 _verifier->verify_region_sets_optional();
2637 _verifier->verify_before_gc();
2638 verify_numa_regions("GC Start");
2639 phase_times()->record_verify_before_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
2640 }
2641
2642 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
2643 if (!VerifyAfterGC) {
2644 return;
2645 }
2646 if (!G1HeapVerifier::should_verify(type)) {
2647 return;
2648 }
2649 Ticks start = Ticks::now();
2650 _verifier->verify_after_gc();
2651 verify_numa_regions("GC End");
2652 _verifier->verify_region_sets_optional();
2653
2654 if (collector_state()->in_concurrent_start_gc()) {
2655 log_debug(gc, verify)("Marking state");
2656 _verifier->verify_marking_state();
2657 }
2658 _verifier->verify_free_regions_card_tables_clean();
2659
2660 phase_times()->record_verify_after_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
2661 }
2662
2663 G1HeapPrinterMark::G1HeapPrinterMark(G1CollectedHeap* g1h) : _g1h(g1h), _heap_transition(g1h) {
2664 // This summary needs to be printed before incrementing total collections.
2665 _g1h->rem_set()->print_periodic_summary_info("Before GC RS summary",
2666 _g1h->total_collections(),
2667 true /* show_thread_times */);
2668 _g1h->print_before_gc();
2669 _g1h->print_heap_regions();
2670 }
2671
2672 G1HeapPrinterMark::~G1HeapPrinterMark() {
2673 _g1h->policy()->print_age_table();
2674 _g1h->rem_set()->print_coarsen_stats();
2675 // We are at the end of the GC. Total collections has already been increased.
2676 _g1h->rem_set()->print_periodic_summary_info("After GC RS summary",
2677 _g1h->total_collections() - 1,
2678 false /* show_thread_times */);
2679
2680 _heap_transition.print();
2681 _g1h->print_heap_regions();
2682 _g1h->print_after_gc();
2683 // Print NUMA statistics.
2684 _g1h->numa()->print_statistics();
2685 }
2686
2687 G1JFRTracerMark::G1JFRTracerMark(STWGCTimer* timer, GCTracer* tracer) :
2688 _timer(timer), _tracer(tracer) {
2689
2690 _timer->register_gc_start();
2691 _tracer->report_gc_start(G1CollectedHeap::heap()->gc_cause(), _timer->gc_start());
2692 G1CollectedHeap::heap()->trace_heap_before_gc(_tracer);
2693 }
2694
2695 G1JFRTracerMark::~G1JFRTracerMark() {
2696 G1CollectedHeap::heap()->trace_heap_after_gc(_tracer);
2697 _timer->register_gc_end();
2698 _tracer->report_gc_end(_timer->gc_end(), _timer->time_partitions());
2699 }
2700
2701 void G1CollectedHeap::prepare_for_mutator_after_young_collection() {
2702 Ticks start = Ticks::now();
2703
2704 _survivor_evac_stats.adjust_desired_plab_size();
2705 _old_evac_stats.adjust_desired_plab_size();
2706
2707 // Start a new incremental collection set for the mutator phase.
2708 start_new_collection_set();
2709 _allocator->init_mutator_alloc_regions();
2710
2711 phase_times()->record_prepare_for_mutator_time_ms((Ticks::now() - start).seconds() * 1000.0);
2712 }
2713
2714 void G1CollectedHeap::retire_tlabs() {
2715 ensure_parsability(true);
2716 }
2717
2718 void G1CollectedHeap::flush_region_pin_cache() {
2719 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
2720 G1ThreadLocalData::pin_count_cache(thread).flush();
2721 }
2722 }
2723
2724 void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_size) {
2725 G1GCMark gcm(_gc_tracer_stw, false /* is_full_gc */);
2726
2727 _bytes_used_during_gc = 0;
2728
2729 _cm->fully_initialize();
2730
2731 policy()->decide_on_concurrent_start_pause();
2732 // Record whether this pause may need to trigger a concurrent operation. Later,
2733 // when we signal the G1ConcurrentMarkThread, the collector state has already
2734 // been reset for the next pause.
2735 bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
2736
2737 // Perform the collection.
2738 G1YoungCollector collector(gc_cause(), allocation_word_size);
2739 collector.collect();
2740
2741 // It should now be safe to tell the concurrent mark thread to start
2742 // without its logging output interfering with the logging output
2743 // that came from the pause.
2744 if (should_start_concurrent_mark_operation) {
2745 verifier()->verify_bitmap_clear(true /* above_tams_only */);
2746 // CAUTION: after the start_concurrent_cycle() call below, the concurrent marking
2747 // thread(s) could be running concurrently with us. Make sure that anything
2748 // after this point does not assume that we are the only GC thread running.
2749 // Note: of course, the actual marking work will not start until the safepoint
2750 // itself is released in SuspendibleThreadSet::desynchronize().
2751 start_concurrent_cycle(collector.concurrent_operation_is_full_mark());
2752 ConcurrentGCBreakpoints::notify_idle_to_active();
2753 }
2754 }
2755
2756 void G1CollectedHeap::complete_cleaning(bool class_unloading_occurred) {
2757 G1ParallelCleaningTask unlink_task(class_unloading_occurred);
2758 workers()->run_task(&unlink_task);
2759 }
2760
2761 void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjectClosure* is_alive, GCTimer* timer) {
2762 GCTraceTime(Debug, gc, phases) debug(description, timer);
2763
2764 ClassUnloadingContext ctx(workers()->active_workers(),
2765 false /* unregister_nmethods_during_purge */,
2766 false /* lock_nmethod_free_separately */);
2767 {
2768 CodeCache::UnlinkingScope scope(is_alive);
2769 bool unloading_occurred = SystemDictionary::do_unloading(timer);
2770 GCTraceTime(Debug, gc, phases) t("G1 Complete Cleaning", timer);
2771 complete_cleaning(unloading_occurred);
2772 }
2773 {
2774 GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", timer);
2775 ctx.purge_nmethods();
2776 }
2777 {
2778 GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", timer);
2779 G1CollectedHeap::heap()->bulk_unregister_nmethods();
2780 }
2781 {
2782 GCTraceTime(Debug, gc, phases) t("Free Code Blobs", timer);
2783 ctx.free_nmethods();
2784 }
2785 {
2786 GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", timer);
2787 ClassLoaderDataGraph::purge(true /* at_safepoint */);
2788 DEBUG_ONLY(MetaspaceUtils::verify();)
2789 }
2790 }
2791
2792 class G1BulkUnregisterNMethodTask : public WorkerTask {
2793 G1HeapRegionClaimer _hrclaimer;
2794
2795 class UnregisterNMethodsHeapRegionClosure : public G1HeapRegionClosure {
2796 public:
2797
2798 bool do_heap_region(G1HeapRegion* hr) {
2799 hr->rem_set()->bulk_remove_code_roots();
2800 return false;
2801 }
2802 } _cl;
2803
2804 public:
2805 G1BulkUnregisterNMethodTask(uint num_workers)
2806 : WorkerTask("G1 Remove Unlinked NMethods From Code Root Set Task"),
2807 _hrclaimer(num_workers) { }
2808
2809 void work(uint worker_id) {
2810 G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hrclaimer, worker_id);
2811 }
2812 };
2813
2814 void G1CollectedHeap::bulk_unregister_nmethods() {
2815 uint num_workers = workers()->active_workers();
2816 G1BulkUnregisterNMethodTask t(num_workers);
2817 workers()->run_task(&t);
2818 }
2819
2820 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
2821 assert(obj != nullptr, "must not be null");
2822 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
2823 // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
2824 // may falsely indicate that this is not the case here: however the collection set only
2825 // contains old regions when concurrent mark is not running.
2826 return _g1h->is_in_cset(obj) || _g1h->heap_region_containing(obj)->is_survivor();
2827 }
2828
2829 void G1CollectedHeap::make_pending_list_reachable() {
2830 if (collector_state()->in_concurrent_start_gc()) {
2831 oop pll_head = Universe::reference_pending_list();
2832 if (pll_head != nullptr) {
2833 // Any valid worker id is fine here as we are in the VM thread and single-threaded.
2834 _cm->mark_in_bitmap(0 /* worker_id */, pll_head);
2835 }
2836 }
2837 }
2838
2839 void G1CollectedHeap::set_humongous_stats(uint num_humongous_total, uint num_humongous_candidates) {
2840 _num_humongous_objects = num_humongous_total;
2841 _num_humongous_reclaim_candidates = num_humongous_candidates;
2842 }
2843
2844 bool G1CollectedHeap::should_sample_collection_set_candidates() const {
2845 const G1CollectionSetCandidates* candidates = collection_set()->candidates();
2846 return !candidates->is_empty();
2847 }
2848
2849 void G1CollectedHeap::set_collection_set_candidates_stats(G1MonotonicArenaMemoryStats& stats) {
2850 _collection_set_candidates_card_set_stats = stats;
2851 }
2852
2853 void G1CollectedHeap::set_young_gen_card_set_stats(const G1MonotonicArenaMemoryStats& stats) {
2854 _young_gen_card_set_stats = stats;
2855 }
2856
2857 void G1CollectedHeap::record_obj_copy_mem_stats() {
2858 size_t total_old_allocated = _old_evac_stats.allocated() + _old_evac_stats.direct_allocated();
2859 uint total_allocated = _survivor_evac_stats.regions_filled() + _old_evac_stats.regions_filled();
2860
2861 log_debug(gc)("Allocated %u survivor %u old percent total %1.2f%% (%u%%)",
2862 _survivor_evac_stats.regions_filled(), _old_evac_stats.regions_filled(),
2863 percent_of(total_allocated, num_committed_regions() - total_allocated),
2864 G1ReservePercent);
2865
2866 policy()->old_gen_alloc_tracker()->
2867 add_allocated_bytes_since_last_gc(total_old_allocated * HeapWordSize);
2868
2869 _gc_tracer_stw->report_evacuation_statistics(create_g1_evac_summary(&_survivor_evac_stats),
2870 create_g1_evac_summary(&_old_evac_stats));
2871 }
2872
2873 void G1CollectedHeap::clear_bitmap_for_region(G1HeapRegion* hr) {
2874 concurrent_mark()->clear_bitmap_for_region(hr);
2875 }
2876
2877 void G1CollectedHeap::free_region(G1HeapRegion* hr, G1FreeRegionList* free_list) {
2878 assert(!hr->is_free(), "the region should not be free");
2879 assert(!hr->is_empty(), "the region should not be empty");
2880 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
2881 assert(!hr->has_pinned_objects(),
2882 "must not free a region which contains pinned objects");
2883
2884 // Reset region metadata to allow reuse.
2885 hr->hr_clear(true /* clear_space */);
2886 _policy->remset_tracker()->update_at_free(hr);
2887
2888 if (free_list != nullptr) {
2889 free_list->add_ordered(hr);
2890 }
2891 if (VerifyDuringGC) {
2892 // Card and refinement table must be clear for freed regions.
2893 card_table()->verify_region(MemRegion(hr->bottom(), hr->end()), G1CardTable::clean_card_val(), true);
2894 refinement_table()->verify_region(MemRegion(hr->bottom(), hr->end()), G1CardTable::clean_card_val(), true);
2895 }
2896 }
2897
2898 void G1CollectedHeap::retain_region(G1HeapRegion* hr) {
2899 MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
2900 collection_set()->candidates()->add_retained_region_unsorted(hr);
2901 }
2902
2903 void G1CollectedHeap::free_humongous_region(G1HeapRegion* hr,
2904 G1FreeRegionList* free_list) {
2905 assert(hr->is_humongous(), "this is only for humongous regions");
2906 hr->clear_humongous();
2907 free_region(hr, free_list);
2908 }
2909
2910 void G1CollectedHeap::remove_from_old_gen_sets(const uint old_regions_removed,
2911 const uint humongous_regions_removed) {
2912 if (old_regions_removed > 0 || humongous_regions_removed > 0) {
2913 MutexLocker x(G1OldSets_lock, Mutex::_no_safepoint_check_flag);
2914 _old_set.bulk_remove(old_regions_removed);
2915 _humongous_set.bulk_remove(humongous_regions_removed);
2916 }
2917
2918 }
2919
2920 void G1CollectedHeap::prepend_to_freelist(G1FreeRegionList* list) {
2921 assert(list != nullptr, "list can't be null");
2922 if (!list->is_empty()) {
2923 MutexLocker x(G1FreeList_lock, Mutex::_no_safepoint_check_flag);
2924 _hrm.insert_list_into_free_list(list);
2925 }
2926 }
2927
2928 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
2929 decrease_used(bytes);
2930 }
2931
2932 void G1CollectedHeap::clear_eden() {
2933 _eden.clear();
2934 }
2935
2936 void G1CollectedHeap::clear_collection_set() {
2937 collection_set()->clear();
2938 }
2939
2940 void G1CollectedHeap::rebuild_free_region_list() {
2941 Ticks start = Ticks::now();
2942 _hrm.rebuild_free_list(workers());
2943 phase_times()->record_total_rebuild_freelist_time_ms((Ticks::now() - start).seconds() * 1000.0);
2944 }
2945
2946 class G1AbandonCollectionSetClosure : public G1HeapRegionClosure {
2947 public:
2948 virtual bool do_heap_region(G1HeapRegion* r) {
2949 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
2950 G1CollectedHeap::heap()->clear_region_attr(r);
2951 r->clear_young_index_in_cset();
2952 return false;
2953 }
2954 };
2955
2956 void G1CollectedHeap::abandon_collection_set() {
2957 G1AbandonCollectionSetClosure cl;
2958 collection_set_iterate_all(&cl);
2959
2960 collection_set()->abandon();
2961 }
2962
2963 size_t G1CollectedHeap::non_young_occupancy_after_allocation(size_t allocation_word_size) {
2964 const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes -
2965 _allocator->free_bytes_in_retained_old_region();
2966 // Humongous allocations will always be assigned to non-young heap, so consider
2967 // that allocation in the result as well. Otherwise the allocation will always
2968 // be in young gen, so there is no need to account it here.
2969 return cur_occupancy + (is_humongous(allocation_word_size) ? allocation_used_bytes(allocation_word_size) : 0);
2970 }
2971
2972 bool G1CollectedHeap::is_old_gc_alloc_region(G1HeapRegion* hr) {
2973 return _allocator->is_retained_old_region(hr);
2974 }
2975
2976 #ifdef ASSERT
2977
2978 class NoYoungRegionsClosure: public G1HeapRegionClosure {
2979 private:
2980 bool _success;
2981 public:
2982 NoYoungRegionsClosure() : _success(true) { }
2983 bool do_heap_region(G1HeapRegion* r) {
2984 if (r->is_young()) {
2985 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
2986 p2i(r->bottom()), p2i(r->end()));
2987 _success = false;
2988 }
2989 return false;
2990 }
2991 bool success() { return _success; }
2992 };
2993
2994 bool G1CollectedHeap::check_young_list_empty() {
2995 bool ret = (young_regions_count() == 0);
2996
2997 NoYoungRegionsClosure closure;
2998 heap_region_iterate(&closure);
2999 ret = ret && closure.success();
3000
3001 return ret;
3002 }
3003
3004 #endif // ASSERT
3005
3006 // Remove the given G1HeapRegion from the appropriate region set.
3007 void G1CollectedHeap::prepare_region_for_full_compaction(G1HeapRegion* hr) {
3008 if (hr->is_humongous()) {
3009 _humongous_set.remove(hr);
3010 } else if (hr->is_old()) {
3011 _old_set.remove(hr);
3012 } else if (hr->is_young()) {
3013 // Note that emptying the eden and survivor lists is postponed and instead
3014 // done as the first step when rebuilding the regions sets again. The reason
3015 // for this is that during a full GC string deduplication needs to know if
3016 // a collected region was young or old when the full GC was initiated.
3017 hr->uninstall_surv_rate_group();
3018 } else {
3019 // We ignore free regions, we'll empty the free list afterwards.
3020 assert(hr->is_free(), "it cannot be another type");
3021 }
3022 }
3023
3024 void G1CollectedHeap::increase_used(size_t bytes) {
3025 _summary_bytes_used += bytes;
3026 }
3027
3028 void G1CollectedHeap::decrease_used(size_t bytes) {
3029 assert(_summary_bytes_used >= bytes,
3030 "invariant: _summary_bytes_used: %zu should be >= bytes: %zu",
3031 _summary_bytes_used, bytes);
3032 _summary_bytes_used -= bytes;
3033 }
3034
3035 void G1CollectedHeap::set_used(size_t bytes) {
3036 _summary_bytes_used = bytes;
3037 }
3038
3039 class RebuildRegionSetsClosure : public G1HeapRegionClosure {
3040 private:
3041 bool _free_list_only;
3042
3043 G1HeapRegionSet* _old_set;
3044 G1HeapRegionSet* _humongous_set;
3045
3046 G1HeapRegionManager* _hrm;
3047
3048 size_t _total_used;
3049
3050 public:
3051 RebuildRegionSetsClosure(bool free_list_only,
3052 G1HeapRegionSet* old_set,
3053 G1HeapRegionSet* humongous_set,
3054 G1HeapRegionManager* hrm) :
3055 _free_list_only(free_list_only), _old_set(old_set),
3056 _humongous_set(humongous_set), _hrm(hrm), _total_used(0) {
3057 assert(_hrm->num_free_regions() == 0, "pre-condition");
3058 if (!free_list_only) {
3059 assert(_old_set->is_empty(), "pre-condition");
3060 assert(_humongous_set->is_empty(), "pre-condition");
3061 }
3062 }
3063
3064 bool do_heap_region(G1HeapRegion* r) {
3065 if (r->is_empty()) {
3066 assert(r->rem_set()->is_empty(), "Empty regions should have empty remembered sets.");
3067 // Add free regions to the free list
3068 r->set_free();
3069 _hrm->insert_into_free_list(r);
3070 } else if (!_free_list_only) {
3071 assert(r->rem_set()->is_empty(), "At this point remembered sets must have been cleared.");
3072
3073 if (r->is_humongous()) {
3074 _humongous_set->add(r);
3075 } else {
3076 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
3077 // We now move all (non-humongous, non-old) regions to old gen,
3078 // and register them as such.
3079 r->move_to_old();
3080 _old_set->add(r);
3081 }
3082 _total_used += r->used();
3083 }
3084
3085 return false;
3086 }
3087
3088 size_t total_used() {
3089 return _total_used;
3090 }
3091 };
3092
3093 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
3094 assert_at_safepoint_on_vm_thread();
3095
3096 if (!free_list_only) {
3097 _eden.clear();
3098 _survivor.clear();
3099 }
3100
3101 RebuildRegionSetsClosure cl(free_list_only,
3102 &_old_set, &_humongous_set,
3103 &_hrm);
3104 heap_region_iterate(&cl);
3105
3106 if (!free_list_only) {
3107 set_used(cl.total_used());
3108 }
3109 assert_used_and_recalculate_used_equal(this);
3110 }
3111
3112 // Methods for the mutator alloc region
3113
3114 G1HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
3115 uint node_index) {
3116 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
3117 bool should_allocate = policy()->should_allocate_mutator_region();
3118 if (should_allocate) {
3119 G1HeapRegion* new_alloc_region = new_region(word_size,
3120 G1HeapRegionType::Eden,
3121 policy()->should_expand_on_mutator_allocation() /* do_expand */,
3122 node_index);
3123 if (new_alloc_region != nullptr) {
3124 new_alloc_region->set_eden();
3125 _eden.add(new_alloc_region);
3126 _policy->set_region_eden(new_alloc_region);
3127
3128 collection_set()->add_eden_region(new_alloc_region);
3129 G1HeapRegionPrinter::alloc(new_alloc_region);
3130 return new_alloc_region;
3131 }
3132 }
3133 return nullptr;
3134 }
3135
3136 void G1CollectedHeap::retire_mutator_alloc_region(G1HeapRegion* alloc_region,
3137 size_t allocated_bytes) {
3138 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
3139 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
3140
3141 increase_used(allocated_bytes);
3142 _eden.add_used_bytes(allocated_bytes);
3143 G1HeapRegionPrinter::retire(alloc_region);
3144
3145 // We update the eden sizes here, when the region is retired,
3146 // instead of when it's allocated, since this is the point that its
3147 // used space has been recorded in _summary_bytes_used.
3148 monitoring_support()->update_eden_size();
3149 }
3150
3151 // Methods for the GC alloc regions
3152
3153 bool G1CollectedHeap::has_more_regions(G1HeapRegionAttr dest) {
3154 if (dest.is_old()) {
3155 return true;
3156 } else {
3157 return survivor_regions_count() < policy()->max_survivor_regions();
3158 }
3159 }
3160
3161 G1HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index) {
3162 assert(G1FreeList_lock->owned_by_self(), "pre-condition");
3163
3164 if (!has_more_regions(dest)) {
3165 return nullptr;
3166 }
3167
3168 G1HeapRegionType type;
3169 if (dest.is_young()) {
3170 type = G1HeapRegionType::Survivor;
3171 } else {
3172 type = G1HeapRegionType::Old;
3173 }
3174
3175 G1HeapRegion* new_alloc_region = new_region(word_size,
3176 type,
3177 true /* do_expand */,
3178 node_index);
3179
3180 if (new_alloc_region != nullptr) {
3181 if (type.is_survivor()) {
3182 new_alloc_region->set_survivor();
3183 _survivor.add(new_alloc_region);
3184 // The remembered set/group cardset for this region will be installed at the
3185 // end of GC. Cannot do that right now because we still need the current young
3186 // gen cardset group.
3187 // However, register with the attribute table to collect remembered set entries
3188 // immediately as it is the only source for determining the need for remembered
3189 // set tracking during GC.
3190 register_new_survivor_region_with_region_attr(new_alloc_region);
3191 } else {
3192 new_alloc_region->set_old();
3193 // Update remembered set/cardset.
3194 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
3195 // Synchronize with region attribute table.
3196 update_region_attr(new_alloc_region);
3197 }
3198 G1HeapRegionPrinter::alloc(new_alloc_region);
3199 return new_alloc_region;
3200 }
3201 return nullptr;
3202 }
3203
3204 void G1CollectedHeap::retire_gc_alloc_region(G1HeapRegion* alloc_region,
3205 size_t allocated_bytes,
3206 G1HeapRegionAttr dest) {
3207 _bytes_used_during_gc += allocated_bytes;
3208 if (dest.is_old()) {
3209 old_set_add(alloc_region);
3210 } else {
3211 assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type());
3212 _survivor.add_used_bytes(allocated_bytes);
3213 }
3214
3215 bool const during_im = collector_state()->in_concurrent_start_gc();
3216 if (during_im && allocated_bytes > 0) {
3217 _cm->add_root_region(alloc_region);
3218 }
3219 G1HeapRegionPrinter::retire(alloc_region);
3220 }
3221
3222 void G1CollectedHeap::mark_evac_failure_object(uint worker_id, const oop obj, size_t obj_size) const {
3223 assert(!_cm->is_marked_in_bitmap(obj), "must be");
3224
3225 _cm->raw_mark_in_bitmap(obj);
3226 }
3227
3228 // Optimized nmethod scanning
3229 class RegisterNMethodOopClosure: public OopClosure {
3230 G1CollectedHeap* _g1h;
3231 nmethod* _nm;
3232
3233 public:
3234 RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
3235 _g1h(g1h), _nm(nm) {}
3236
3237 void do_oop(oop* p) {
3238 oop heap_oop = RawAccess<>::oop_load(p);
3239 if (!CompressedOops::is_null(heap_oop)) {
3240 oop obj = CompressedOops::decode_not_null(heap_oop);
3241 G1HeapRegion* hr = _g1h->heap_region_containing(obj);
3242 assert(!hr->is_continues_humongous(),
3243 "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
3244 " starting at " HR_FORMAT,
3245 p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
3246
3247 hr->add_code_root(_nm);
3248 }
3249 }
3250
3251 void do_oop(narrowOop* p) { ShouldNotReachHere(); }
3252 };
3253
3254 void G1CollectedHeap::register_nmethod(nmethod* nm) {
3255 guarantee(nm != nullptr, "sanity");
3256 RegisterNMethodOopClosure reg_cl(this, nm);
3257 nm->oops_do(®_cl);
3258 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
3259 bs_nm->disarm(nm);
3260 }
3261
3262 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
3263 // We always unregister nmethods in bulk during code unloading only.
3264 ShouldNotReachHere();
3265 }
3266
3267 void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) {
3268 if (evacuation_failed) {
3269 set_used(recalculate_used());
3270 } else {
3271 // The "used" of the collection set have already been subtracted
3272 // when they were freed. Add in the bytes used.
3273 increase_used(_bytes_used_during_gc);
3274 }
3275 }
3276
3277 class RebuildCodeRootClosure: public NMethodClosure {
3278 G1CollectedHeap* _g1h;
3279
3280 public:
3281 RebuildCodeRootClosure(G1CollectedHeap* g1h) :
3282 _g1h(g1h) {}
3283
3284 void do_nmethod(nmethod* nm) {
3285 assert(nm != nullptr, "Sanity");
3286 _g1h->register_nmethod(nm);
3287 }
3288 };
3289
3290 void G1CollectedHeap::rebuild_code_roots() {
3291 RebuildCodeRootClosure nmethod_cl(this);
3292 CodeCache::nmethods_do(&nmethod_cl);
3293 }
3294
3295 void G1CollectedHeap::initialize_serviceability() {
3296 _monitoring_support->initialize_serviceability();
3297 }
3298
3299 MemoryUsage G1CollectedHeap::memory_usage() {
3300 return _monitoring_support->memory_usage();
3301 }
3302
3303 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
3304 return _monitoring_support->memory_managers();
3305 }
3306
3307 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
3308 return _monitoring_support->memory_pools();
3309 }
3310
3311 void G1CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
3312 G1HeapRegion* region = heap_region_containing(start);
3313 region->fill_with_dummy_object(start, pointer_delta(end, start), zap);
3314 }
3315
3316 void G1CollectedHeap::start_codecache_marking_cycle_if_inactive(bool concurrent_mark_start) {
3317 // We can reach here with an active code cache marking cycle either because the
3318 // previous G1 concurrent marking cycle was undone (if heap occupancy after the
3319 // concurrent start young collection was below the threshold) or aborted. See
3320 // CodeCache::on_gc_marking_cycle_finish() why this is. We must not start a new code
3321 // cache cycle then. If we are about to start a new g1 concurrent marking cycle we
3322 // still have to arm all nmethod entry barriers. They are needed for adding oop
3323 // constants to the SATB snapshot. Full GC does not need nmethods to be armed.
3324 if (!CodeCache::is_gc_marking_cycle_active()) {
3325 CodeCache::on_gc_marking_cycle_start();
3326 }
3327 if (concurrent_mark_start) {
3328 CodeCache::arm_all_nmethods();
3329 }
3330 }
3331
3332 void G1CollectedHeap::finish_codecache_marking_cycle() {
3333 CodeCache::on_gc_marking_cycle_finish();
3334 CodeCache::arm_all_nmethods();
3335 }