1 /*
2 * Copyright (c) 2013, 2021, Red Hat, Inc. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "memory/allocation.hpp"
27 #include "memory/universe.hpp"
28
29 #include "gc/shared/gcArguments.hpp"
30 #include "gc/shared/gcTimer.hpp"
31 #include "gc/shared/gcTraceTime.inline.hpp"
32 #include "gc/shared/locationPrinter.inline.hpp"
33 #include "gc/shared/memAllocator.hpp"
34 #include "gc/shared/plab.hpp"
35 #include "gc/shared/tlab_globals.hpp"
36
37 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
41 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
42 #include "gc/shenandoah/shenandoahControlThread.hpp"
43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
46 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
48 #include "gc/shenandoah/shenandoahInitLogger.hpp"
49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
51 #include "gc/shenandoah/shenandoahMetrics.hpp"
52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
55 #include "gc/shenandoah/shenandoahPadding.hpp"
56 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
57 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
58 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
59 #include "gc/shenandoah/shenandoahStringDedup.hpp"
60 #include "gc/shenandoah/shenandoahSTWMark.hpp"
61 #include "gc/shenandoah/shenandoahUtils.hpp"
62 #include "gc/shenandoah/shenandoahVerifier.hpp"
63 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
64 #include "gc/shenandoah/shenandoahVMOperations.hpp"
65 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
66 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
67 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
68 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
69 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
70 #if INCLUDE_JFR
71 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
72 #endif
73
74 #include "classfile/systemDictionary.hpp"
75 #include "memory/classLoaderMetaspace.hpp"
76 #include "memory/metaspaceUtils.hpp"
77 #include "oops/compressedOops.inline.hpp"
78 #include "prims/jvmtiTagMap.hpp"
79 #include "runtime/atomic.hpp"
80 #include "runtime/globals.hpp"
81 #include "runtime/interfaceSupport.inline.hpp"
82 #include "runtime/java.hpp"
83 #include "runtime/orderAccess.hpp"
84 #include "runtime/safepointMechanism.hpp"
85 #include "runtime/vmThread.hpp"
86 #include "services/mallocTracker.hpp"
87 #include "services/memTracker.hpp"
88 #include "utilities/events.hpp"
89 #include "utilities/powerOfTwo.hpp"
90
91 class ShenandoahPretouchHeapTask : public AbstractGangTask {
92 private:
93 ShenandoahRegionIterator _regions;
94 const size_t _page_size;
95 public:
96 ShenandoahPretouchHeapTask(size_t page_size) :
97 AbstractGangTask("Shenandoah Pretouch Heap"),
98 _page_size(page_size) {}
99
100 virtual void work(uint worker_id) {
101 ShenandoahHeapRegion* r = _regions.next();
102 while (r != NULL) {
103 if (r->is_committed()) {
104 os::pretouch_memory(r->bottom(), r->end(), _page_size);
105 }
106 r = _regions.next();
107 }
108 }
109 };
110
111 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
112 private:
113 ShenandoahRegionIterator _regions;
114 char* _bitmap_base;
115 const size_t _bitmap_size;
116 const size_t _page_size;
117 public:
118 ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
119 AbstractGangTask("Shenandoah Pretouch Bitmap"),
120 _bitmap_base(bitmap_base),
121 _bitmap_size(bitmap_size),
122 _page_size(page_size) {}
123
124 virtual void work(uint worker_id) {
125 ShenandoahHeapRegion* r = _regions.next();
126 while (r != NULL) {
127 size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
128 size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
129 assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
130
131 if (r->is_committed()) {
132 os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
133 }
134
135 r = _regions.next();
136 }
137 }
138 };
139
140 jint ShenandoahHeap::initialize() {
141 //
142 // Figure out heap sizing
143 //
144
145 size_t init_byte_size = InitialHeapSize;
146 size_t min_byte_size = MinHeapSize;
147 size_t max_byte_size = MaxHeapSize;
148 size_t heap_alignment = HeapAlignment;
149
150 size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
151
152 Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
153 Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
154
155 _num_regions = ShenandoahHeapRegion::region_count();
156 assert(_num_regions == (max_byte_size / reg_size_bytes),
157 "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
158 _num_regions, max_byte_size, reg_size_bytes);
159
160 // Now we know the number of regions, initialize the heuristics.
161 initialize_heuristics();
162
163 size_t num_committed_regions = init_byte_size / reg_size_bytes;
164 num_committed_regions = MIN2(num_committed_regions, _num_regions);
165 assert(num_committed_regions <= _num_regions, "sanity");
166 _initial_size = num_committed_regions * reg_size_bytes;
167
168 size_t num_min_regions = min_byte_size / reg_size_bytes;
169 num_min_regions = MIN2(num_min_regions, _num_regions);
170 assert(num_min_regions <= _num_regions, "sanity");
171 _minimum_size = num_min_regions * reg_size_bytes;
172
173 // Default to max heap size.
174 _soft_max_size = _num_regions * reg_size_bytes;
175
176 _committed = _initial_size;
177
178 size_t heap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
179 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
180 size_t region_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
181
182 //
183 // Reserve and commit memory for heap
184 //
185
186 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
187 initialize_reserved_region(heap_rs);
188 _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
189 _heap_region_special = heap_rs.special();
190
191 assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
192 "Misaligned heap: " PTR_FORMAT, p2i(base()));
193
194 #if SHENANDOAH_OPTIMIZED_MARKTASK
195 // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
196 // Fail if we ever attempt to address more than we can.
197 if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
198 FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
199 "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
200 "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
201 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
202 vm_exit_during_initialization("Fatal Error", buf);
203 }
204 #endif
205
206 ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
207 if (!_heap_region_special) {
208 os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
209 "Cannot commit heap memory");
210 }
211
212 //
213 // Reserve and commit memory for bitmap(s)
214 //
215
216 _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
217 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
218
219 size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
220
221 guarantee(bitmap_bytes_per_region != 0,
222 "Bitmap bytes per region should not be zero");
223 guarantee(is_power_of_2(bitmap_bytes_per_region),
224 "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
225
226 if (bitmap_page_size > bitmap_bytes_per_region) {
227 _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
228 _bitmap_bytes_per_slice = bitmap_page_size;
229 } else {
230 _bitmap_regions_per_slice = 1;
231 _bitmap_bytes_per_slice = bitmap_bytes_per_region;
232 }
233
234 guarantee(_bitmap_regions_per_slice >= 1,
235 "Should have at least one region per slice: " SIZE_FORMAT,
236 _bitmap_regions_per_slice);
237
238 guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
239 "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
240 _bitmap_bytes_per_slice, bitmap_page_size);
241
242 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
243 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
244 _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
245 _bitmap_region_special = bitmap.special();
246
247 size_t bitmap_init_commit = _bitmap_bytes_per_slice *
248 align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
249 bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
250 if (!_bitmap_region_special) {
251 os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
252 "Cannot commit bitmap memory");
253 }
254
255 _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
256
257 if (ShenandoahVerify) {
258 ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
259 if (!verify_bitmap.special()) {
260 os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
261 "Cannot commit verification bitmap memory");
262 }
263 MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
264 MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
265 _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
266 _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
267 }
268
269 // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
270 ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
271 MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
272 _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
273 _aux_bitmap_region_special = aux_bitmap.special();
274 _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
275
276 //
277 // Create regions and region sets
278 //
279 size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
280 size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
281 region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
282
283 ReservedSpace region_storage(region_storage_size, region_page_size);
284 MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
285 if (!region_storage.special()) {
286 os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
287 "Cannot commit region memory");
288 }
289
290 // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
291 // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
292 // If not successful, bite a bullet and allocate at whatever address.
293 {
294 size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
295 size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
296
297 uintptr_t min = round_up_power_of_2(cset_align);
298 uintptr_t max = (1u << 30u);
299
300 for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
301 char* req_addr = (char*)addr;
302 assert(is_aligned(req_addr, cset_align), "Should be aligned");
303 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
304 if (cset_rs.is_reserved()) {
305 assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
306 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
307 break;
308 }
309 }
310
311 if (_collection_set == NULL) {
312 ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
313 _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
314 }
315 }
316
317 _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
318 _free_set = new ShenandoahFreeSet(this, _num_regions);
319
320 {
321 ShenandoahHeapLocker locker(lock());
322
323 for (size_t i = 0; i < _num_regions; i++) {
324 HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
325 bool is_committed = i < num_committed_regions;
326 void* loc = region_storage.base() + i * region_align;
327
328 ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
329 assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
330
331 _marking_context->initialize_top_at_mark_start(r);
332 _regions[i] = r;
333 assert(!collection_set()->is_in(i), "New region should not be in collection set");
334 }
335
336 // Initialize to complete
337 _marking_context->mark_complete();
338
339 _free_set->rebuild();
340 }
341
342 if (AlwaysPreTouch) {
343 // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
344 // before initialize() below zeroes it with initializing thread. For any given region,
345 // we touch the region and the corresponding bitmaps from the same thread.
346 ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
347
348 _pretouch_heap_page_size = heap_page_size;
349 _pretouch_bitmap_page_size = bitmap_page_size;
350
351 #ifdef LINUX
352 // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
353 // pages. But, the kernel needs to know that every small page is used, in order to coalesce
354 // them into huge one. Therefore, we need to pretouch with smaller pages.
355 if (UseTransparentHugePages) {
356 _pretouch_heap_page_size = (size_t)os::vm_page_size();
357 _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
358 }
359 #endif
360
361 // OS memory managers may want to coalesce back-to-back pages. Make their jobs
362 // simpler by pre-touching continuous spaces (heap and bitmap) separately.
363
364 ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
365 _workers->run_task(&bcl);
366
367 ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
368 _workers->run_task(&hcl);
369 }
370
371 //
372 // Initialize the rest of GC subsystems
373 //
374
375 _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
376 for (uint worker = 0; worker < _max_workers; worker++) {
377 _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
378 Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
379 }
380
381 // There should probably be Shenandoah-specific options for these,
382 // just as there are G1-specific options.
383 {
384 ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
385 satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
386 satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
387 }
388
389 _monitoring_support = new ShenandoahMonitoringSupport(this);
390 _phase_timings = new ShenandoahPhaseTimings(max_workers());
391 ShenandoahCodeRoots::initialize();
392
393 if (ShenandoahPacing) {
394 _pacer = new ShenandoahPacer(this);
395 _pacer->setup_for_idle();
396 } else {
397 _pacer = NULL;
398 }
399
400 _control_thread = new ShenandoahControlThread();
401
402 ShenandoahInitLogger::print();
403
404 return JNI_OK;
405 }
406
407 void ShenandoahHeap::initialize_mode() {
408 if (ShenandoahGCMode != NULL) {
409 if (strcmp(ShenandoahGCMode, "satb") == 0) {
410 _gc_mode = new ShenandoahSATBMode();
411 } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
412 _gc_mode = new ShenandoahIUMode();
413 } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
414 _gc_mode = new ShenandoahPassiveMode();
415 } else {
416 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
417 }
418 } else {
419 vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
420 }
421 _gc_mode->initialize_flags();
422 if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
423 vm_exit_during_initialization(
424 err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
425 _gc_mode->name()));
426 }
427 if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
428 vm_exit_during_initialization(
429 err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
430 _gc_mode->name()));
431 }
432 }
433
434 void ShenandoahHeap::initialize_heuristics() {
435 assert(_gc_mode != NULL, "Must be initialized");
436 _heuristics = _gc_mode->initialize_heuristics();
437
438 if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
439 vm_exit_during_initialization(
440 err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
441 _heuristics->name()));
442 }
443 if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
444 vm_exit_during_initialization(
445 err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
446 _heuristics->name()));
447 }
448 }
449
450 #ifdef _MSC_VER
451 #pragma warning( push )
452 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
453 #endif
454
455 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
456 CollectedHeap(),
457 _initial_size(0),
458 _used(0),
459 _committed(0),
460 _bytes_allocated_since_gc_start(0),
461 _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
462 _workers(NULL),
463 _safepoint_workers(NULL),
464 _heap_region_special(false),
465 _num_regions(0),
466 _regions(NULL),
467 _update_refs_iterator(this),
468 _control_thread(NULL),
469 _shenandoah_policy(policy),
470 _gc_mode(NULL),
471 _heuristics(NULL),
472 _free_set(NULL),
473 _pacer(NULL),
474 _verifier(NULL),
475 _phase_timings(NULL),
476 _monitoring_support(NULL),
477 _memory_pool(NULL),
478 _stw_memory_manager("Shenandoah Pauses"),
479 _cycle_memory_manager("Shenandoah Cycles"),
480 _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
481 _soft_ref_policy(),
482 _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
483 _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
484 _marking_context(NULL),
485 _bitmap_size(0),
486 _bitmap_regions_per_slice(0),
487 _bitmap_bytes_per_slice(0),
488 _bitmap_region_special(false),
489 _aux_bitmap_region_special(false),
490 _liveness_cache(NULL),
491 _collection_set(NULL)
492 {
493 // Initialize GC mode early, so we can adjust barrier support
494 initialize_mode();
495 BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
496
497 _max_workers = MAX2(_max_workers, 1U);
498 _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
499 /* are_GC_task_threads */ true,
500 /* are_ConcurrentGC_threads */ true);
501 if (_workers == NULL) {
502 vm_exit_during_initialization("Failed necessary allocation.");
503 } else {
504 _workers->initialize_workers();
505 }
506
507 if (ParallelGCThreads > 1) {
508 _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
509 ParallelGCThreads,
510 /* are_GC_task_threads */ false,
511 /* are_ConcurrentGC_threads */ false);
512 _safepoint_workers->initialize_workers();
513 }
514 }
515
516 #ifdef _MSC_VER
517 #pragma warning( pop )
518 #endif
519
520 class ShenandoahResetBitmapTask : public AbstractGangTask {
521 private:
522 ShenandoahRegionIterator _regions;
523
524 public:
525 ShenandoahResetBitmapTask() :
526 AbstractGangTask("Shenandoah Reset Bitmap") {}
527
528 void work(uint worker_id) {
529 ShenandoahHeapRegion* region = _regions.next();
530 ShenandoahHeap* heap = ShenandoahHeap::heap();
531 ShenandoahMarkingContext* const ctx = heap->marking_context();
532 while (region != NULL) {
533 if (heap->is_bitmap_slice_committed(region)) {
534 ctx->clear_bitmap(region);
535 }
536 region = _regions.next();
537 }
538 }
539 };
540
541 void ShenandoahHeap::reset_mark_bitmap() {
542 assert_gc_workers(_workers->active_workers());
543 mark_incomplete_marking_context();
544
545 ShenandoahResetBitmapTask task;
546 _workers->run_task(&task);
547 }
548
549 void ShenandoahHeap::print_on(outputStream* st) const {
550 st->print_cr("Shenandoah Heap");
551 st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
552 byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
553 byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
554 byte_size_in_proper_unit(committed()), proper_unit_for_byte_size(committed()),
555 byte_size_in_proper_unit(used()), proper_unit_for_byte_size(used()));
556 st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
557 num_regions(),
558 byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
559 proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
560
561 st->print("Status: ");
562 if (has_forwarded_objects()) st->print("has forwarded objects, ");
563 if (is_concurrent_mark_in_progress()) st->print("marking, ");
564 if (is_evacuation_in_progress()) st->print("evacuating, ");
565 if (is_update_refs_in_progress()) st->print("updating refs, ");
566 if (is_degenerated_gc_in_progress()) st->print("degenerated gc, ");
567 if (is_full_gc_in_progress()) st->print("full gc, ");
568 if (is_full_gc_move_in_progress()) st->print("full gc move, ");
569 if (is_concurrent_weak_root_in_progress()) st->print("concurrent weak roots, ");
570 if (is_concurrent_strong_root_in_progress() &&
571 !is_concurrent_weak_root_in_progress()) st->print("concurrent strong roots, ");
572
573 if (cancelled_gc()) {
574 st->print("cancelled");
575 } else {
576 st->print("not cancelled");
577 }
578 st->cr();
579
580 st->print_cr("Reserved region:");
581 st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
582 p2i(reserved_region().start()),
583 p2i(reserved_region().end()));
584
585 ShenandoahCollectionSet* cset = collection_set();
586 st->print_cr("Collection set:");
587 if (cset != NULL) {
588 st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
589 st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
590 } else {
591 st->print_cr(" (NULL)");
592 }
593
594 st->cr();
595 MetaspaceUtils::print_on(st);
596
597 if (Verbose) {
598 st->cr();
599 print_heap_regions_on(st);
600 }
601 }
602
603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
604 public:
605 void do_thread(Thread* thread) {
606 assert(thread != NULL, "Sanity");
607 assert(thread->is_Worker_thread(), "Only worker thread expected");
608 ShenandoahThreadLocalData::initialize_gclab(thread);
609 }
610 };
611
612 void ShenandoahHeap::post_initialize() {
613 CollectedHeap::post_initialize();
614 MutexLocker ml(Threads_lock);
615
616 ShenandoahInitWorkerGCLABClosure init_gclabs;
617 _workers->threads_do(&init_gclabs);
618
619 // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
620 // Now, we will let WorkGang to initialize gclab when new worker is created.
621 _workers->set_initialize_gclab();
622 if (_safepoint_workers != NULL) {
623 _safepoint_workers->threads_do(&init_gclabs);
624 _safepoint_workers->set_initialize_gclab();
625 }
626
627 _heuristics->initialize();
628
629 JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
630 }
631
632 size_t ShenandoahHeap::used() const {
633 return Atomic::load(&_used);
634 }
635
636 size_t ShenandoahHeap::committed() const {
637 return Atomic::load(&_committed);
638 }
639
640 void ShenandoahHeap::increase_committed(size_t bytes) {
641 shenandoah_assert_heaplocked_or_safepoint();
642 _committed += bytes;
643 }
644
645 void ShenandoahHeap::decrease_committed(size_t bytes) {
646 shenandoah_assert_heaplocked_or_safepoint();
647 _committed -= bytes;
648 }
649
650 void ShenandoahHeap::increase_used(size_t bytes) {
651 Atomic::add(&_used, bytes, memory_order_relaxed);
652 }
653
654 void ShenandoahHeap::set_used(size_t bytes) {
655 Atomic::store(&_used, bytes);
656 }
657
658 void ShenandoahHeap::decrease_used(size_t bytes) {
659 assert(used() >= bytes, "never decrease heap size by more than we've left");
660 Atomic::sub(&_used, bytes, memory_order_relaxed);
661 }
662
663 void ShenandoahHeap::increase_allocated(size_t bytes) {
664 Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
665 }
666
667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
668 size_t bytes = words * HeapWordSize;
669 if (!waste) {
670 increase_used(bytes);
671 }
672 increase_allocated(bytes);
673 if (ShenandoahPacing) {
674 control_thread()->pacing_notify_alloc(words);
675 if (waste) {
676 pacer()->claim_for_alloc(words, true);
677 }
678 }
679 }
680
681 size_t ShenandoahHeap::capacity() const {
682 return committed();
683 }
684
685 size_t ShenandoahHeap::max_capacity() const {
686 return _num_regions * ShenandoahHeapRegion::region_size_bytes();
687 }
688
689 size_t ShenandoahHeap::soft_max_capacity() const {
690 size_t v = Atomic::load(&_soft_max_size);
691 assert(min_capacity() <= v && v <= max_capacity(),
692 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
693 min_capacity(), v, max_capacity());
694 return v;
695 }
696
697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
698 assert(min_capacity() <= v && v <= max_capacity(),
699 "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
700 min_capacity(), v, max_capacity());
701 Atomic::store(&_soft_max_size, v);
702 }
703
704 size_t ShenandoahHeap::min_capacity() const {
705 return _minimum_size;
706 }
707
708 size_t ShenandoahHeap::initial_capacity() const {
709 return _initial_size;
710 }
711
712 bool ShenandoahHeap::is_in(const void* p) const {
713 HeapWord* heap_base = (HeapWord*) base();
714 HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
715 return p >= heap_base && p < last_region_end;
716 }
717
718 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
719 assert (ShenandoahUncommit, "should be enabled");
720
721 // Application allocates from the beginning of the heap, and GC allocates at
722 // the end of it. It is more efficient to uncommit from the end, so that applications
723 // could enjoy the near committed regions. GC allocations are much less frequent,
724 // and therefore can accept the committing costs.
725
726 size_t count = 0;
727 for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
728 ShenandoahHeapRegion* r = get_region(i - 1);
729 if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
730 ShenandoahHeapLocker locker(lock());
731 if (r->is_empty_committed()) {
732 if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
733 break;
734 }
735
736 r->make_uncommitted();
737 count++;
738 }
739 }
740 SpinPause(); // allow allocators to take the lock
741 }
742
743 if (count > 0) {
744 control_thread()->notify_heap_changed();
745 }
746 }
747
748 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
749 // New object should fit the GCLAB size
750 size_t min_size = MAX2(size, PLAB::min_size());
751
752 // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
753 size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
754 new_size = MIN2(new_size, PLAB::max_size());
755 new_size = MAX2(new_size, PLAB::min_size());
756
757 // Record new heuristic value even if we take any shortcut. This captures
758 // the case when moderately-sized objects always take a shortcut. At some point,
759 // heuristics should catch up with them.
760 ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
761
762 if (new_size < size) {
763 // New size still does not fit the object. Fall back to shared allocation.
764 // This avoids retiring perfectly good GCLABs, when we encounter a large object.
765 return NULL;
766 }
767
768 // Retire current GCLAB, and allocate a new one.
769 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
770 gclab->retire();
771
772 size_t actual_size = 0;
773 HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
774 if (gclab_buf == NULL) {
775 return NULL;
776 }
777
778 assert (size <= actual_size, "allocation should fit");
779
780 if (ZeroTLAB) {
781 // ..and clear it.
782 Copy::zero_to_words(gclab_buf, actual_size);
783 } else {
784 // ...and zap just allocated object.
785 #ifdef ASSERT
786 // Skip mangling the space corresponding to the object header to
787 // ensure that the returned space is not considered parsable by
788 // any concurrent GC thread.
789 size_t hdr_size = oopDesc::header_size();
790 Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
791 #endif // ASSERT
792 }
793 gclab->set_buf(gclab_buf, actual_size);
794 return gclab->allocate(size);
795 }
796
797 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
798 size_t requested_size,
799 size_t* actual_size) {
800 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
801 HeapWord* res = allocate_memory(req);
802 if (res != NULL) {
803 *actual_size = req.actual_size();
804 } else {
805 *actual_size = 0;
806 }
807 return res;
808 }
809
810 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
811 size_t word_size,
812 size_t* actual_size) {
813 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
814 HeapWord* res = allocate_memory(req);
815 if (res != NULL) {
816 *actual_size = req.actual_size();
817 } else {
818 *actual_size = 0;
819 }
820 return res;
821 }
822
823 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
824 intptr_t pacer_epoch = 0;
825 bool in_new_region = false;
826 HeapWord* result = NULL;
827
828 if (req.is_mutator_alloc()) {
829 if (ShenandoahPacing) {
830 pacer()->pace_for_alloc(req.size());
831 pacer_epoch = pacer()->epoch();
832 }
833
834 if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
835 result = allocate_memory_under_lock(req, in_new_region);
836 }
837
838 // Allocation failed, block until control thread reacted, then retry allocation.
839 //
840 // It might happen that one of the threads requesting allocation would unblock
841 // way later after GC happened, only to fail the second allocation, because
842 // other threads have already depleted the free storage. In this case, a better
843 // strategy is to try again, as long as GC makes progress.
844 //
845 // Then, we need to make sure the allocation was retried after at least one
846 // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
847
848 size_t tries = 0;
849
850 while (result == NULL && _progress_last_gc.is_set()) {
851 tries++;
852 control_thread()->handle_alloc_failure(req);
853 result = allocate_memory_under_lock(req, in_new_region);
854 }
855
856 while (result == NULL && tries <= ShenandoahFullGCThreshold) {
857 tries++;
858 control_thread()->handle_alloc_failure(req);
859 result = allocate_memory_under_lock(req, in_new_region);
860 }
861
862 } else {
863 assert(req.is_gc_alloc(), "Can only accept GC allocs here");
864 result = allocate_memory_under_lock(req, in_new_region);
865 // Do not call handle_alloc_failure() here, because we cannot block.
866 // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
867 }
868
869 if (in_new_region) {
870 control_thread()->notify_heap_changed();
871 }
872
873 if (result != NULL) {
874 size_t requested = req.size();
875 size_t actual = req.actual_size();
876
877 assert (req.is_lab_alloc() || (requested == actual),
878 "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
879 ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
880
881 if (req.is_mutator_alloc()) {
882 notify_mutator_alloc_words(actual, false);
883
884 // If we requested more than we were granted, give the rest back to pacer.
885 // This only matters if we are in the same pacing epoch: do not try to unpace
886 // over the budget for the other phase.
887 if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
888 pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
889 }
890 } else {
891 increase_used(actual*HeapWordSize);
892 }
893 }
894
895 return result;
896 }
897
898 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
899 ShenandoahHeapLocker locker(lock());
900 return _free_set->allocate(req, in_new_region);
901 }
902
903 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
904 bool* gc_overhead_limit_was_exceeded) {
905 ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
906 return allocate_memory(req);
907 }
908
909 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
910 size_t size,
911 Metaspace::MetadataType mdtype) {
912 MetaWord* result;
913
914 // Inform metaspace OOM to GC heuristics if class unloading is possible.
915 if (heuristics()->can_unload_classes()) {
916 ShenandoahHeuristics* h = heuristics();
917 h->record_metaspace_oom();
918 }
919
920 // Expand and retry allocation
921 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
922 if (result != NULL) {
923 return result;
924 }
925
926 // Start full GC
927 collect(GCCause::_metadata_GC_clear_soft_refs);
928
929 // Retry allocation
930 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
931 if (result != NULL) {
932 return result;
933 }
934
935 // Expand and retry allocation
936 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
937 if (result != NULL) {
938 return result;
939 }
940
941 // Out of memory
942 return NULL;
943 }
944
945 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
946 private:
947 ShenandoahHeap* const _heap;
948 Thread* const _thread;
949 public:
950 ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
951 _heap(heap), _thread(Thread::current()) {}
952
953 void do_object(oop p) {
954 shenandoah_assert_marked(NULL, p);
955 if (!p->is_forwarded()) {
956 _heap->evacuate_object(p, _thread);
957 }
958 }
959 };
960
961 class ShenandoahEvacuationTask : public AbstractGangTask {
962 private:
963 ShenandoahHeap* const _sh;
964 ShenandoahCollectionSet* const _cs;
965 bool _concurrent;
966 public:
967 ShenandoahEvacuationTask(ShenandoahHeap* sh,
968 ShenandoahCollectionSet* cs,
969 bool concurrent) :
970 AbstractGangTask("Shenandoah Evacuation"),
971 _sh(sh),
972 _cs(cs),
973 _concurrent(concurrent)
974 {}
975
976 void work(uint worker_id) {
977 if (_concurrent) {
978 ShenandoahConcurrentWorkerSession worker_session(worker_id);
979 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
980 ShenandoahEvacOOMScope oom_evac_scope;
981 do_work();
982 } else {
983 ShenandoahParallelWorkerSession worker_session(worker_id);
984 ShenandoahEvacOOMScope oom_evac_scope;
985 do_work();
986 }
987 }
988
989 private:
990 void do_work() {
991 ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
992 ShenandoahHeapRegion* r;
993 while ((r =_cs->claim_next()) != NULL) {
994 assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
995 _sh->marked_object_iterate(r, &cl);
996
997 if (ShenandoahPacing) {
998 _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
999 }
1000
1001 if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1002 break;
1003 }
1004 }
1005 }
1006 };
1007
1008 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
1009 ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1010 workers()->run_task(&task);
1011 }
1012
1013 void ShenandoahHeap::trash_cset_regions() {
1014 ShenandoahHeapLocker locker(lock());
1015
1016 ShenandoahCollectionSet* set = collection_set();
1017 ShenandoahHeapRegion* r;
1018 set->clear_current_index();
1019 while ((r = set->next()) != NULL) {
1020 r->make_trash();
1021 }
1022 collection_set()->clear();
1023 }
1024
1025 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1026 st->print_cr("Heap Regions:");
1027 st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1028 st->print_cr(" HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1029 st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1030 st->print_cr("UWM=update watermark, U=used");
1031 st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1032 st->print_cr("S=shared allocs, L=live data");
1033 st->print_cr("CP=critical pins");
1034
1035 for (size_t i = 0; i < num_regions(); i++) {
1036 get_region(i)->print_on(st);
1037 }
1038 }
1039
1040 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1041 assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1042
1043 oop humongous_obj = cast_to_oop(start->bottom());
1044 size_t size = humongous_obj->size();
1045 size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1046 size_t index = start->index() + required_regions - 1;
1047
1048 assert(!start->has_live(), "liveness must be zero");
1049
1050 for(size_t i = 0; i < required_regions; i++) {
1051 // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1052 // as it expects that every region belongs to a humongous region starting with a humongous start region.
1053 ShenandoahHeapRegion* region = get_region(index --);
1054
1055 assert(region->is_humongous(), "expect correct humongous start or continuation");
1056 assert(!region->is_cset(), "Humongous region should not be in collection set");
1057
1058 region->make_trash_immediate();
1059 }
1060 }
1061
1062 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1063 public:
1064 ShenandoahCheckCleanGCLABClosure() {}
1065 void do_thread(Thread* thread) {
1066 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1067 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1068 assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1069 }
1070 };
1071
1072 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1073 private:
1074 bool const _resize;
1075 public:
1076 ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1077 void do_thread(Thread* thread) {
1078 PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1079 assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1080 gclab->retire();
1081 if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1082 ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1083 }
1084 }
1085 };
1086
1087 void ShenandoahHeap::labs_make_parsable() {
1088 assert(UseTLAB, "Only call with UseTLAB");
1089
1090 ShenandoahRetireGCLABClosure cl(false);
1091
1092 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1093 ThreadLocalAllocBuffer& tlab = t->tlab();
1094 tlab.make_parsable();
1095 cl.do_thread(t);
1096 }
1097
1098 workers()->threads_do(&cl);
1099 }
1100
1101 void ShenandoahHeap::tlabs_retire(bool resize) {
1102 assert(UseTLAB, "Only call with UseTLAB");
1103 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1104
1105 ThreadLocalAllocStats stats;
1106
1107 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1108 ThreadLocalAllocBuffer& tlab = t->tlab();
1109 tlab.retire(&stats);
1110 if (resize) {
1111 tlab.resize();
1112 }
1113 }
1114
1115 stats.publish();
1116
1117 #ifdef ASSERT
1118 ShenandoahCheckCleanGCLABClosure cl;
1119 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1120 cl.do_thread(t);
1121 }
1122 workers()->threads_do(&cl);
1123 #endif
1124 }
1125
1126 void ShenandoahHeap::gclabs_retire(bool resize) {
1127 assert(UseTLAB, "Only call with UseTLAB");
1128 assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1129
1130 ShenandoahRetireGCLABClosure cl(resize);
1131 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1132 cl.do_thread(t);
1133 }
1134 workers()->threads_do(&cl);
1135
1136 if (safepoint_workers() != NULL) {
1137 safepoint_workers()->threads_do(&cl);
1138 }
1139 }
1140
1141 // Returns size in bytes
1142 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1143 if (ShenandoahElasticTLAB) {
1144 // With Elastic TLABs, return the max allowed size, and let the allocation path
1145 // figure out the safe size for current allocation.
1146 return ShenandoahHeapRegion::max_tlab_size_bytes();
1147 } else {
1148 return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1149 }
1150 }
1151
1152 size_t ShenandoahHeap::max_tlab_size() const {
1153 // Returns size in words
1154 return ShenandoahHeapRegion::max_tlab_size_words();
1155 }
1156
1157 void ShenandoahHeap::collect(GCCause::Cause cause) {
1158 control_thread()->request_gc(cause);
1159 }
1160
1161 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1162 //assert(false, "Shouldn't need to do full collections");
1163 }
1164
1165 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1166 ShenandoahHeapRegion* r = heap_region_containing(addr);
1167 if (r != NULL) {
1168 return r->block_start(addr);
1169 }
1170 return NULL;
1171 }
1172
1173 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1174 ShenandoahHeapRegion* r = heap_region_containing(addr);
1175 return r->block_is_obj(addr);
1176 }
1177
1178 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1179 return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1180 }
1181
1182 void ShenandoahHeap::prepare_for_verify() {
1183 if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1184 labs_make_parsable();
1185 }
1186 }
1187
1188 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1189 tcl->do_thread(_control_thread);
1190 workers()->threads_do(tcl);
1191 if (_safepoint_workers != NULL) {
1192 _safepoint_workers->threads_do(tcl);
1193 }
1194 if (ShenandoahStringDedup::is_enabled()) {
1195 ShenandoahStringDedup::threads_do(tcl);
1196 }
1197 }
1198
1199 void ShenandoahHeap::print_tracing_info() const {
1200 LogTarget(Info, gc, stats) lt;
1201 if (lt.is_enabled()) {
1202 ResourceMark rm;
1203 LogStream ls(lt);
1204
1205 phase_timings()->print_global_on(&ls);
1206
1207 ls.cr();
1208 ls.cr();
1209
1210 shenandoah_policy()->print_gc_stats(&ls);
1211
1212 ls.cr();
1213 ls.cr();
1214 }
1215 }
1216
1217 void ShenandoahHeap::verify(VerifyOption vo) {
1218 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1219 if (ShenandoahVerify) {
1220 verifier()->verify_generic(vo);
1221 } else {
1222 // TODO: Consider allocating verification bitmaps on demand,
1223 // and turn this on unconditionally.
1224 }
1225 }
1226 }
1227 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1228 return _free_set->capacity();
1229 }
1230
1231 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1232 private:
1233 MarkBitMap* _bitmap;
1234 ShenandoahScanObjectStack* _oop_stack;
1235 ShenandoahHeap* const _heap;
1236 ShenandoahMarkingContext* const _marking_context;
1237
1238 template <class T>
1239 void do_oop_work(T* p) {
1240 T o = RawAccess<>::oop_load(p);
1241 if (!CompressedOops::is_null(o)) {
1242 oop obj = CompressedOops::decode_not_null(o);
1243 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1244 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1245 return;
1246 }
1247 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1248
1249 assert(oopDesc::is_oop(obj), "must be a valid oop");
1250 if (!_bitmap->is_marked(obj)) {
1251 _bitmap->mark(obj);
1252 _oop_stack->push(obj);
1253 }
1254 }
1255 }
1256 public:
1257 ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1258 _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1259 _marking_context(_heap->marking_context()) {}
1260 void do_oop(oop* p) { do_oop_work(p); }
1261 void do_oop(narrowOop* p) { do_oop_work(p); }
1262 };
1263
1264 /*
1265 * This is public API, used in preparation of object_iterate().
1266 * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1267 * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1268 * control, we call SH::tlabs_retire, SH::gclabs_retire.
1269 */
1270 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1271 // No-op.
1272 }
1273
1274 /*
1275 * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1276 *
1277 * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1278 * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1279 * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1280 * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1281 * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1282 * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1283 * wiped the bitmap in preparation for next marking).
1284 *
1285 * For all those reasons, we implement object iteration as a single marking traversal, reporting
1286 * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1287 * is allowed to report dead objects, but is not required to do so.
1288 */
1289 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1290 // Reset bitmap
1291 if (!prepare_aux_bitmap_for_iteration())
1292 return;
1293
1294 ShenandoahScanObjectStack oop_stack;
1295 ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1296 // Seed the stack with root scan
1297 scan_roots_for_iteration(&oop_stack, &oops);
1298
1299 // Work through the oop stack to traverse heap
1300 while (! oop_stack.is_empty()) {
1301 oop obj = oop_stack.pop();
1302 assert(oopDesc::is_oop(obj), "must be a valid oop");
1303 cl->do_object(obj);
1304 obj->oop_iterate(&oops);
1305 }
1306
1307 assert(oop_stack.is_empty(), "should be empty");
1308 // Reclaim bitmap
1309 reclaim_aux_bitmap_for_iteration();
1310 }
1311
1312 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1313 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1314
1315 if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1316 log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1317 return false;
1318 }
1319 // Reset bitmap
1320 _aux_bit_map.clear();
1321 return true;
1322 }
1323
1324 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1325 // Process GC roots according to current GC cycle
1326 // This populates the work stack with initial objects
1327 // It is important to relinquish the associated locks before diving
1328 // into heap dumper
1329 ShenandoahHeapIterationRootScanner rp;
1330 rp.roots_do(oops);
1331 }
1332
1333 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1334 if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1335 log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1336 }
1337 }
1338
1339 // Closure for parallelly iterate objects
1340 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1341 private:
1342 MarkBitMap* _bitmap;
1343 ShenandoahObjToScanQueue* _queue;
1344 ShenandoahHeap* const _heap;
1345 ShenandoahMarkingContext* const _marking_context;
1346
1347 template <class T>
1348 void do_oop_work(T* p) {
1349 T o = RawAccess<>::oop_load(p);
1350 if (!CompressedOops::is_null(o)) {
1351 oop obj = CompressedOops::decode_not_null(o);
1352 if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1353 // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1354 return;
1355 }
1356 obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1357
1358 assert(oopDesc::is_oop(obj), "Must be a valid oop");
1359 if (_bitmap->par_mark(obj)) {
1360 _queue->push(ShenandoahMarkTask(obj));
1361 }
1362 }
1363 }
1364 public:
1365 ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1366 _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1367 _marking_context(_heap->marking_context()) {}
1368 void do_oop(oop* p) { do_oop_work(p); }
1369 void do_oop(narrowOop* p) { do_oop_work(p); }
1370 };
1371
1372 // Object iterator for parallel heap iteraion.
1373 // The root scanning phase happenes in construction as a preparation of
1374 // parallel marking queues.
1375 // Every worker processes it's own marking queue. work-stealing is used
1376 // to balance workload.
1377 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1378 private:
1379 uint _num_workers;
1380 bool _init_ready;
1381 MarkBitMap* _aux_bit_map;
1382 ShenandoahHeap* _heap;
1383 ShenandoahScanObjectStack _roots_stack; // global roots stack
1384 ShenandoahObjToScanQueueSet* _task_queues;
1385 public:
1386 ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1387 _num_workers(num_workers),
1388 _init_ready(false),
1389 _aux_bit_map(bitmap),
1390 _heap(ShenandoahHeap::heap()) {
1391 // Initialize bitmap
1392 _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1393 if (!_init_ready) {
1394 return;
1395 }
1396
1397 ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1398 _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1399
1400 _init_ready = prepare_worker_queues();
1401 }
1402
1403 ~ShenandoahParallelObjectIterator() {
1404 // Reclaim bitmap
1405 _heap->reclaim_aux_bitmap_for_iteration();
1406 // Reclaim queue for workers
1407 if (_task_queues!= NULL) {
1408 for (uint i = 0; i < _num_workers; ++i) {
1409 ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1410 if (q != NULL) {
1411 delete q;
1412 _task_queues->register_queue(i, NULL);
1413 }
1414 }
1415 delete _task_queues;
1416 _task_queues = NULL;
1417 }
1418 }
1419
1420 virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1421 if (_init_ready) {
1422 object_iterate_parallel(cl, worker_id, _task_queues);
1423 }
1424 }
1425
1426 private:
1427 // Divide global root_stack into worker queues
1428 bool prepare_worker_queues() {
1429 _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1430 // Initialize queues for every workers
1431 for (uint i = 0; i < _num_workers; ++i) {
1432 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1433 task_queue->initialize();
1434 _task_queues->register_queue(i, task_queue);
1435 }
1436 // Divide roots among the workers. Assume that object referencing distribution
1437 // is related with root kind, use round-robin to make every worker have same chance
1438 // to process every kind of roots
1439 size_t roots_num = _roots_stack.size();
1440 if (roots_num == 0) {
1441 // No work to do
1442 return false;
1443 }
1444
1445 for (uint j = 0; j < roots_num; j++) {
1446 uint stack_id = j % _num_workers;
1447 oop obj = _roots_stack.pop();
1448 _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1449 }
1450 return true;
1451 }
1452
1453 void object_iterate_parallel(ObjectClosure* cl,
1454 uint worker_id,
1455 ShenandoahObjToScanQueueSet* queue_set) {
1456 assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1457 assert(queue_set != NULL, "task queue must not be NULL");
1458
1459 ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1460 assert(q != NULL, "object iterate queue must not be NULL");
1461
1462 ShenandoahMarkTask t;
1463 ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1464
1465 // Work through the queue to traverse heap.
1466 // Steal when there is no task in queue.
1467 while (q->pop(t) || queue_set->steal(worker_id, t)) {
1468 oop obj = t.obj();
1469 assert(oopDesc::is_oop(obj), "must be a valid oop");
1470 cl->do_object(obj);
1471 obj->oop_iterate(&oops);
1472 }
1473 assert(q->is_empty(), "should be empty");
1474 }
1475 };
1476
1477 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1478 return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1479 }
1480
1481 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1482 void ShenandoahHeap::keep_alive(oop obj) {
1483 if (is_concurrent_mark_in_progress() && (obj != NULL)) {
1484 ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1485 }
1486 }
1487
1488 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1489 for (size_t i = 0; i < num_regions(); i++) {
1490 ShenandoahHeapRegion* current = get_region(i);
1491 blk->heap_region_do(current);
1492 }
1493 }
1494
1495 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1496 private:
1497 ShenandoahHeap* const _heap;
1498 ShenandoahHeapRegionClosure* const _blk;
1499
1500 shenandoah_padding(0);
1501 volatile size_t _index;
1502 shenandoah_padding(1);
1503
1504 public:
1505 ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1506 AbstractGangTask("Shenandoah Parallel Region Operation"),
1507 _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1508
1509 void work(uint worker_id) {
1510 ShenandoahParallelWorkerSession worker_session(worker_id);
1511 size_t stride = ShenandoahParallelRegionStride;
1512
1513 size_t max = _heap->num_regions();
1514 while (Atomic::load(&_index) < max) {
1515 size_t cur = Atomic::fetch_and_add(&_index, stride, memory_order_relaxed);
1516 size_t start = cur;
1517 size_t end = MIN2(cur + stride, max);
1518 if (start >= max) break;
1519
1520 for (size_t i = cur; i < end; i++) {
1521 ShenandoahHeapRegion* current = _heap->get_region(i);
1522 _blk->heap_region_do(current);
1523 }
1524 }
1525 }
1526 };
1527
1528 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1529 assert(blk->is_thread_safe(), "Only thread-safe closures here");
1530 if (num_regions() > ShenandoahParallelRegionStride) {
1531 ShenandoahParallelHeapRegionTask task(blk);
1532 workers()->run_task(&task);
1533 } else {
1534 heap_region_iterate(blk);
1535 }
1536 }
1537
1538 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1539 private:
1540 ShenandoahMarkingContext* const _ctx;
1541 public:
1542 ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1543
1544 void heap_region_do(ShenandoahHeapRegion* r) {
1545 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1546 if (r->is_active()) {
1547 // Check if region needs updating its TAMS. We have updated it already during concurrent
1548 // reset, so it is very likely we don't need to do another write here.
1549 if (_ctx->top_at_mark_start(r) != r->top()) {
1550 _ctx->capture_top_at_mark_start(r);
1551 }
1552 } else {
1553 assert(_ctx->top_at_mark_start(r) == r->top(),
1554 "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1555 }
1556 }
1557
1558 bool is_thread_safe() { return true; }
1559 };
1560
1561 class ShenandoahRendezvousClosure : public HandshakeClosure {
1562 public:
1563 inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1564 inline void do_thread(Thread* thread) {}
1565 };
1566
1567 void ShenandoahHeap::rendezvous_threads() {
1568 ShenandoahRendezvousClosure cl;
1569 Handshake::execute(&cl);
1570 }
1571
1572 void ShenandoahHeap::recycle_trash() {
1573 free_set()->recycle_trash();
1574 }
1575
1576 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1577 private:
1578 ShenandoahMarkingContext* const _ctx;
1579 public:
1580 ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1581
1582 void heap_region_do(ShenandoahHeapRegion* r) {
1583 if (r->is_active()) {
1584 // Reset live data and set TAMS optimistically. We would recheck these under the pause
1585 // anyway to capture any updates that happened since now.
1586 r->clear_live_data();
1587 _ctx->capture_top_at_mark_start(r);
1588 }
1589 }
1590
1591 bool is_thread_safe() { return true; }
1592 };
1593
1594 void ShenandoahHeap::prepare_gc() {
1595 reset_mark_bitmap();
1596
1597 ShenandoahResetUpdateRegionStateClosure cl;
1598 parallel_heap_region_iterate(&cl);
1599 }
1600
1601 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1602 private:
1603 ShenandoahMarkingContext* const _ctx;
1604 ShenandoahHeapLock* const _lock;
1605
1606 public:
1607 ShenandoahFinalMarkUpdateRegionStateClosure() :
1608 _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1609
1610 void heap_region_do(ShenandoahHeapRegion* r) {
1611 if (r->is_active()) {
1612 // All allocations past TAMS are implicitly live, adjust the region data.
1613 // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1614 HeapWord *tams = _ctx->top_at_mark_start(r);
1615 HeapWord *top = r->top();
1616 if (top > tams) {
1617 r->increase_live_data_alloc_words(pointer_delta(top, tams));
1618 }
1619
1620 // We are about to select the collection set, make sure it knows about
1621 // current pinning status. Also, this allows trashing more regions that
1622 // now have their pinning status dropped.
1623 if (r->is_pinned()) {
1624 if (r->pin_count() == 0) {
1625 ShenandoahHeapLocker locker(_lock);
1626 r->make_unpinned();
1627 }
1628 } else {
1629 if (r->pin_count() > 0) {
1630 ShenandoahHeapLocker locker(_lock);
1631 r->make_pinned();
1632 }
1633 }
1634
1635 // Remember limit for updating refs. It's guaranteed that we get no
1636 // from-space-refs written from here on.
1637 r->set_update_watermark_at_safepoint(r->top());
1638 } else {
1639 assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1640 assert(_ctx->top_at_mark_start(r) == r->top(),
1641 "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1642 }
1643 }
1644
1645 bool is_thread_safe() { return true; }
1646 };
1647
1648 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1649 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1650 {
1651 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1652 ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1653 ShenandoahFinalMarkUpdateRegionStateClosure cl;
1654 parallel_heap_region_iterate(&cl);
1655
1656 assert_pinned_region_status();
1657 }
1658
1659 {
1660 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1661 ShenandoahPhaseTimings::degen_gc_choose_cset);
1662 ShenandoahHeapLocker locker(lock());
1663 _collection_set->clear();
1664 heuristics()->choose_collection_set(_collection_set);
1665 }
1666
1667 {
1668 ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1669 ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1670 ShenandoahHeapLocker locker(lock());
1671 _free_set->rebuild();
1672 }
1673 }
1674
1675 void ShenandoahHeap::do_class_unloading() {
1676 _unloader.unload();
1677 }
1678
1679 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1680 // Weak refs processing
1681 ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1682 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1683 ShenandoahTimingsTracker t(phase);
1684 ShenandoahGCWorkerPhase worker_phase(phase);
1685 ref_processor()->process_references(phase, workers(), false /* concurrent */);
1686 }
1687
1688 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1689 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1690
1691 // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1692 // make them parsable for update code to work correctly. Plus, we can compute new sizes
1693 // for future GCLABs here.
1694 if (UseTLAB) {
1695 ShenandoahGCPhase phase(concurrent ?
1696 ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1697 ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1698 gclabs_retire(ResizeTLAB);
1699 }
1700
1701 _update_refs_iterator.reset();
1702 }
1703
1704 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1705 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1706 ShenandoahThreadLocalData::set_gc_state(t, state);
1707 }
1708 }
1709
1710 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1711 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1712 _gc_state.set_cond(mask, value);
1713 set_gc_state_all_threads(_gc_state.raw_value());
1714 }
1715
1716 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1717 assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1718 set_gc_state_mask(MARKING, in_progress);
1719 ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1720 }
1721
1722 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1723 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1724 set_gc_state_mask(EVACUATION, in_progress);
1725 }
1726
1727 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1728 if (in_progress) {
1729 _concurrent_strong_root_in_progress.set();
1730 } else {
1731 _concurrent_strong_root_in_progress.unset();
1732 }
1733 }
1734
1735 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1736 set_gc_state_mask(WEAK_ROOTS, cond);
1737 }
1738
1739 GCTracer* ShenandoahHeap::tracer() {
1740 return shenandoah_policy()->tracer();
1741 }
1742
1743 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1744 return _free_set->used();
1745 }
1746
1747 bool ShenandoahHeap::try_cancel_gc() {
1748 jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1749 return prev == CANCELLABLE;
1750 }
1751
1752 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1753 if (try_cancel_gc()) {
1754 FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1755 log_info(gc)("%s", msg.buffer());
1756 Events::log(Thread::current(), "%s", msg.buffer());
1757 }
1758 }
1759
1760 uint ShenandoahHeap::max_workers() {
1761 return _max_workers;
1762 }
1763
1764 void ShenandoahHeap::stop() {
1765 // The shutdown sequence should be able to terminate when GC is running.
1766
1767 // Step 0. Notify policy to disable event recording.
1768 _shenandoah_policy->record_shutdown();
1769
1770 // Step 1. Notify control thread that we are in shutdown.
1771 // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1772 // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1773 control_thread()->prepare_for_graceful_shutdown();
1774
1775 // Step 2. Notify GC workers that we are cancelling GC.
1776 cancel_gc(GCCause::_shenandoah_stop_vm);
1777
1778 // Step 3. Wait until GC worker exits normally.
1779 control_thread()->stop();
1780 }
1781
1782 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1783 if (!unload_classes()) return;
1784 // Unload classes and purge SystemDictionary.
1785 {
1786 ShenandoahPhaseTimings::Phase phase = full_gc ?
1787 ShenandoahPhaseTimings::full_gc_purge_class_unload :
1788 ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1789 ShenandoahGCPhase gc_phase(phase);
1790 ShenandoahGCWorkerPhase worker_phase(phase);
1791 bool purged_class = SystemDictionary::do_unloading(gc_timer());
1792
1793 ShenandoahIsAliveSelector is_alive;
1794 uint num_workers = _workers->active_workers();
1795 ShenandoahClassUnloadingTask unlink_task(phase, is_alive.is_alive_closure(), num_workers, purged_class);
1796 _workers->run_task(&unlink_task);
1797 }
1798
1799 {
1800 ShenandoahGCPhase phase(full_gc ?
1801 ShenandoahPhaseTimings::full_gc_purge_cldg :
1802 ShenandoahPhaseTimings::degen_gc_purge_cldg);
1803 ClassLoaderDataGraph::purge(/*at_safepoint*/true);
1804 }
1805 // Resize and verify metaspace
1806 MetaspaceGC::compute_new_size();
1807 DEBUG_ONLY(MetaspaceUtils::verify();)
1808 }
1809
1810 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1811 // so they should not have forwarded oops.
1812 // However, we do need to "null" dead oops in the roots, if can not be done
1813 // in concurrent cycles.
1814 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1815 uint num_workers = _workers->active_workers();
1816 ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1817 ShenandoahPhaseTimings::full_gc_purge_weak_par :
1818 ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1819 ShenandoahGCPhase phase(timing_phase);
1820 ShenandoahGCWorkerPhase worker_phase(timing_phase);
1821 // Cleanup weak roots
1822 if (has_forwarded_objects()) {
1823 ShenandoahForwardedIsAliveClosure is_alive;
1824 ShenandoahUpdateRefsClosure keep_alive;
1825 ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1826 cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1827 _workers->run_task(&cleaning_task);
1828 } else {
1829 ShenandoahIsAliveClosure is_alive;
1830 #ifdef ASSERT
1831 ShenandoahAssertNotForwardedClosure verify_cl;
1832 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1833 cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1834 #else
1835 ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1836 cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1837 #endif
1838 _workers->run_task(&cleaning_task);
1839 }
1840 }
1841
1842 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1843 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1844 assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1845 ShenandoahGCPhase phase(full_gc ?
1846 ShenandoahPhaseTimings::full_gc_purge :
1847 ShenandoahPhaseTimings::degen_gc_purge);
1848 stw_weak_refs(full_gc);
1849 stw_process_weak_roots(full_gc);
1850 stw_unload_classes(full_gc);
1851 }
1852
1853 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1854 set_gc_state_mask(HAS_FORWARDED, cond);
1855 }
1856
1857 void ShenandoahHeap::set_unload_classes(bool uc) {
1858 _unload_classes.set_cond(uc);
1859 }
1860
1861 bool ShenandoahHeap::unload_classes() const {
1862 return _unload_classes.is_set();
1863 }
1864
1865 address ShenandoahHeap::in_cset_fast_test_addr() {
1866 ShenandoahHeap* heap = ShenandoahHeap::heap();
1867 assert(heap->collection_set() != NULL, "Sanity");
1868 return (address) heap->collection_set()->biased_map_address();
1869 }
1870
1871 address ShenandoahHeap::cancelled_gc_addr() {
1872 return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1873 }
1874
1875 address ShenandoahHeap::gc_state_addr() {
1876 return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1877 }
1878
1879 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1880 return Atomic::load(&_bytes_allocated_since_gc_start);
1881 }
1882
1883 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1884 Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1885 }
1886
1887 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1888 _degenerated_gc_in_progress.set_cond(in_progress);
1889 }
1890
1891 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1892 _full_gc_in_progress.set_cond(in_progress);
1893 }
1894
1895 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1896 assert (is_full_gc_in_progress(), "should be");
1897 _full_gc_move_in_progress.set_cond(in_progress);
1898 }
1899
1900 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1901 set_gc_state_mask(UPDATEREFS, in_progress);
1902 }
1903
1904 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1905 ShenandoahCodeRoots::register_nmethod(nm);
1906 }
1907
1908 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1909 ShenandoahCodeRoots::unregister_nmethod(nm);
1910 }
1911
1912 void ShenandoahHeap::flush_nmethod(nmethod* nm) {
1913 ShenandoahCodeRoots::flush_nmethod(nm);
1914 }
1915
1916 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1917 heap_region_containing(o)->record_pin();
1918 return o;
1919 }
1920
1921 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1922 ShenandoahHeapRegion* r = heap_region_containing(o);
1923 assert(r != NULL, "Sanity");
1924 assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1925 r->record_unpin();
1926 }
1927
1928 void ShenandoahHeap::sync_pinned_region_status() {
1929 ShenandoahHeapLocker locker(lock());
1930
1931 for (size_t i = 0; i < num_regions(); i++) {
1932 ShenandoahHeapRegion *r = get_region(i);
1933 if (r->is_active()) {
1934 if (r->is_pinned()) {
1935 if (r->pin_count() == 0) {
1936 r->make_unpinned();
1937 }
1938 } else {
1939 if (r->pin_count() > 0) {
1940 r->make_pinned();
1941 }
1942 }
1943 }
1944 }
1945
1946 assert_pinned_region_status();
1947 }
1948
1949 #ifdef ASSERT
1950 void ShenandoahHeap::assert_pinned_region_status() {
1951 for (size_t i = 0; i < num_regions(); i++) {
1952 ShenandoahHeapRegion* r = get_region(i);
1953 assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1954 "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1955 }
1956 }
1957 #endif
1958
1959 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1960 return _gc_timer;
1961 }
1962
1963 void ShenandoahHeap::prepare_concurrent_roots() {
1964 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1965 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1966 set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1967 set_concurrent_weak_root_in_progress(true);
1968 if (unload_classes()) {
1969 _unloader.prepare();
1970 }
1971 }
1972
1973 void ShenandoahHeap::finish_concurrent_roots() {
1974 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1975 assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1976 if (unload_classes()) {
1977 _unloader.finish();
1978 }
1979 }
1980
1981 #ifdef ASSERT
1982 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1983 assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1984
1985 if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1986 if (UseDynamicNumberOfGCThreads) {
1987 assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1988 } else {
1989 // Use ParallelGCThreads inside safepoints
1990 assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1991 }
1992 } else {
1993 if (UseDynamicNumberOfGCThreads) {
1994 assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1995 } else {
1996 // Use ConcGCThreads outside safepoints
1997 assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1998 }
1999 }
2000 }
2001 #endif
2002
2003 ShenandoahVerifier* ShenandoahHeap::verifier() {
2004 guarantee(ShenandoahVerify, "Should be enabled");
2005 assert (_verifier != NULL, "sanity");
2006 return _verifier;
2007 }
2008
2009 template<bool CONCURRENT>
2010 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2011 private:
2012 ShenandoahHeap* _heap;
2013 ShenandoahRegionIterator* _regions;
2014 public:
2015 ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2016 AbstractGangTask("Shenandoah Update References"),
2017 _heap(ShenandoahHeap::heap()),
2018 _regions(regions) {
2019 }
2020
2021 void work(uint worker_id) {
2022 if (CONCURRENT) {
2023 ShenandoahConcurrentWorkerSession worker_session(worker_id);
2024 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2025 do_work<ShenandoahConcUpdateRefsClosure>();
2026 } else {
2027 ShenandoahParallelWorkerSession worker_session(worker_id);
2028 do_work<ShenandoahSTWUpdateRefsClosure>();
2029 }
2030 }
2031
2032 private:
2033 template<class T>
2034 void do_work() {
2035 T cl;
2036 ShenandoahHeapRegion* r = _regions->next();
2037 ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2038 while (r != NULL) {
2039 HeapWord* update_watermark = r->get_update_watermark();
2040 assert (update_watermark >= r->bottom(), "sanity");
2041 if (r->is_active() && !r->is_cset()) {
2042 _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2043 }
2044 if (ShenandoahPacing) {
2045 _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2046 }
2047 if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2048 return;
2049 }
2050 r = _regions->next();
2051 }
2052 }
2053 };
2054
2055 void ShenandoahHeap::update_heap_references(bool concurrent) {
2056 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2057
2058 if (concurrent) {
2059 ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2060 workers()->run_task(&task);
2061 } else {
2062 ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2063 workers()->run_task(&task);
2064 }
2065 }
2066
2067
2068 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2069 private:
2070 ShenandoahHeapLock* const _lock;
2071
2072 public:
2073 ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2074
2075 void heap_region_do(ShenandoahHeapRegion* r) {
2076 // Drop unnecessary "pinned" state from regions that does not have CP marks
2077 // anymore, as this would allow trashing them.
2078
2079 if (r->is_active()) {
2080 if (r->is_pinned()) {
2081 if (r->pin_count() == 0) {
2082 ShenandoahHeapLocker locker(_lock);
2083 r->make_unpinned();
2084 }
2085 } else {
2086 if (r->pin_count() > 0) {
2087 ShenandoahHeapLocker locker(_lock);
2088 r->make_pinned();
2089 }
2090 }
2091 }
2092 }
2093
2094 bool is_thread_safe() { return true; }
2095 };
2096
2097 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2098 assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2099 assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2100
2101 {
2102 ShenandoahGCPhase phase(concurrent ?
2103 ShenandoahPhaseTimings::final_update_refs_update_region_states :
2104 ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2105 ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2106 parallel_heap_region_iterate(&cl);
2107
2108 assert_pinned_region_status();
2109 }
2110
2111 {
2112 ShenandoahGCPhase phase(concurrent ?
2113 ShenandoahPhaseTimings::final_update_refs_trash_cset :
2114 ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2115 trash_cset_regions();
2116 }
2117 }
2118
2119 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2120 {
2121 ShenandoahGCPhase phase(concurrent ?
2122 ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2123 ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2124 ShenandoahHeapLocker locker(lock());
2125 _free_set->rebuild();
2126 }
2127 }
2128
2129 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2130 print_on(st);
2131 st->cr();
2132 print_heap_regions_on(st);
2133 }
2134
2135 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2136 size_t slice = r->index() / _bitmap_regions_per_slice;
2137
2138 size_t regions_from = _bitmap_regions_per_slice * slice;
2139 size_t regions_to = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2140 for (size_t g = regions_from; g < regions_to; g++) {
2141 assert (g / _bitmap_regions_per_slice == slice, "same slice");
2142 if (skip_self && g == r->index()) continue;
2143 if (get_region(g)->is_committed()) {
2144 return true;
2145 }
2146 }
2147 return false;
2148 }
2149
2150 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2151 shenandoah_assert_heaplocked();
2152
2153 // Bitmaps in special regions do not need commits
2154 if (_bitmap_region_special) {
2155 return true;
2156 }
2157
2158 if (is_bitmap_slice_committed(r, true)) {
2159 // Some other region from the group is already committed, meaning the bitmap
2160 // slice is already committed, we exit right away.
2161 return true;
2162 }
2163
2164 // Commit the bitmap slice:
2165 size_t slice = r->index() / _bitmap_regions_per_slice;
2166 size_t off = _bitmap_bytes_per_slice * slice;
2167 size_t len = _bitmap_bytes_per_slice;
2168 char* start = (char*) _bitmap_region.start() + off;
2169
2170 if (!os::commit_memory(start, len, false)) {
2171 return false;
2172 }
2173
2174 if (AlwaysPreTouch) {
2175 os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2176 }
2177
2178 return true;
2179 }
2180
2181 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2182 shenandoah_assert_heaplocked();
2183
2184 // Bitmaps in special regions do not need uncommits
2185 if (_bitmap_region_special) {
2186 return true;
2187 }
2188
2189 if (is_bitmap_slice_committed(r, true)) {
2190 // Some other region from the group is still committed, meaning the bitmap
2191 // slice is should stay committed, exit right away.
2192 return true;
2193 }
2194
2195 // Uncommit the bitmap slice:
2196 size_t slice = r->index() / _bitmap_regions_per_slice;
2197 size_t off = _bitmap_bytes_per_slice * slice;
2198 size_t len = _bitmap_bytes_per_slice;
2199 if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2200 return false;
2201 }
2202 return true;
2203 }
2204
2205 void ShenandoahHeap::safepoint_synchronize_begin() {
2206 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2207 SuspendibleThreadSet::synchronize();
2208 }
2209 }
2210
2211 void ShenandoahHeap::safepoint_synchronize_end() {
2212 if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2213 SuspendibleThreadSet::desynchronize();
2214 }
2215 }
2216
2217 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2218 static const char *msg = "Concurrent uncommit";
2219 ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2220 EventMark em("%s", msg);
2221
2222 op_uncommit(shrink_before, shrink_until);
2223 }
2224
2225 void ShenandoahHeap::try_inject_alloc_failure() {
2226 if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2227 _inject_alloc_failure.set();
2228 os::naked_short_sleep(1);
2229 if (cancelled_gc()) {
2230 log_info(gc)("Allocation failure was successfully injected");
2231 }
2232 }
2233 }
2234
2235 bool ShenandoahHeap::should_inject_alloc_failure() {
2236 return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2237 }
2238
2239 void ShenandoahHeap::initialize_serviceability() {
2240 _memory_pool = new ShenandoahMemoryPool(this);
2241 _cycle_memory_manager.add_pool(_memory_pool);
2242 _stw_memory_manager.add_pool(_memory_pool);
2243 }
2244
2245 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2246 GrowableArray<GCMemoryManager*> memory_managers(2);
2247 memory_managers.append(&_cycle_memory_manager);
2248 memory_managers.append(&_stw_memory_manager);
2249 return memory_managers;
2250 }
2251
2252 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2253 GrowableArray<MemoryPool*> memory_pools(1);
2254 memory_pools.append(_memory_pool);
2255 return memory_pools;
2256 }
2257
2258 MemoryUsage ShenandoahHeap::memory_usage() {
2259 return _memory_pool->get_memory_usage();
2260 }
2261
2262 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2263 _heap(ShenandoahHeap::heap()),
2264 _index(0) {}
2265
2266 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2267 _heap(heap),
2268 _index(0) {}
2269
2270 void ShenandoahRegionIterator::reset() {
2271 _index = 0;
2272 }
2273
2274 bool ShenandoahRegionIterator::has_next() const {
2275 return _index < _heap->num_regions();
2276 }
2277
2278 char ShenandoahHeap::gc_state() const {
2279 return _gc_state.raw_value();
2280 }
2281
2282 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2283 #ifdef ASSERT
2284 assert(_liveness_cache != NULL, "sanity");
2285 assert(worker_id < _max_workers, "sanity");
2286 for (uint i = 0; i < num_regions(); i++) {
2287 assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2288 }
2289 #endif
2290 return _liveness_cache[worker_id];
2291 }
2292
2293 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2294 assert(worker_id < _max_workers, "sanity");
2295 assert(_liveness_cache != NULL, "sanity");
2296 ShenandoahLiveData* ld = _liveness_cache[worker_id];
2297 for (uint i = 0; i < num_regions(); i++) {
2298 ShenandoahLiveData live = ld[i];
2299 if (live > 0) {
2300 ShenandoahHeapRegion* r = get_region(i);
2301 r->increase_live_data_gc_words(live);
2302 ld[i] = 0;
2303 }
2304 }
2305 }
--- EOF ---