1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 27 #include "precompiled.hpp" 28 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 30 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 31 #include "gc/shenandoah/shenandoahGeneration.hpp" 32 #include "gc/shenandoah/shenandoahMark.inline.hpp" 33 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 35 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 36 #include "gc/shenandoah/shenandoahUtils.hpp" 37 #include "gc/shenandoah/shenandoahVerifier.hpp" 38 39 void ShenandoahMark::start_mark() { 40 if (!CodeCache::is_gc_marking_cycle_active()) { 41 CodeCache::on_gc_marking_cycle_start(); 42 } 43 } 44 45 void ShenandoahMark::end_mark() { 46 // Unlike other GCs, we do not arm the nmethods 47 // when marking terminates. 48 if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) { 49 CodeCache::on_gc_marking_cycle_finish(); 50 } 51 } 52 53 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) : 54 MetadataVisitingOopIterateClosure(rp), 55 _queue(q), 56 _old_queue(old_q), 57 _mark_context(ShenandoahHeap::heap()->marking_context()), 58 _weak(false) 59 { } 60 61 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) : 62 _generation(generation), 63 _task_queues(generation->task_queues()), 64 _old_gen_task_queues(generation->old_gen_task_queues()) { 65 } 66 67 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP> 68 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) { 69 ShenandoahObjToScanQueue* q = get_queue(w); 70 ShenandoahObjToScanQueue* old_q = get_old_queue(w); 71 72 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 73 ShenandoahLiveData* ld = heap->get_liveness_cache(w); 74 75 // TODO: We can clean up this if we figure out how to do templated oop closures that 76 // play nice with specialized_oop_iterators. 77 if (update_refs) { 78 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>; 79 Closure cl(q, rp, old_q); 80 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 81 } else { 82 using Closure = ShenandoahMarkRefsClosure<GENERATION>; 83 Closure cl(q, rp, old_q); 84 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 85 } 86 87 heap->flush_liveness_cache(w); 88 } 89 90 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP> 91 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, 92 ShenandoahGenerationType generation, StringDedup::Requests* const req) { 93 bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects(); 94 switch (generation) { 95 case YOUNG: 96 mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs); 97 break; 98 case OLD: 99 // Old generation collection only performs marking, it should not update references. 100 mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false); 101 break; 102 case GLOBAL: 103 mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs); 104 break; 105 case NON_GEN: 106 mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs); 107 break; 108 default: 109 ShouldNotReachHere(); 110 break; 111 } 112 } 113 114 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, 115 ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) { 116 if (cancellable) { 117 switch(dedup_mode) { 118 case NO_DEDUP: 119 mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req); 120 break; 121 case ENQUEUE_DEDUP: 122 mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req); 123 break; 124 case ALWAYS_DEDUP: 125 mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req); 126 break; 127 } 128 } else { 129 switch(dedup_mode) { 130 case NO_DEDUP: 131 mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req); 132 break; 133 case ENQUEUE_DEDUP: 134 mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req); 135 break; 136 case ALWAYS_DEDUP: 137 mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req); 138 break; 139 } 140 } 141 } 142 143 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP> 144 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) { 145 uintx stride = ShenandoahMarkLoopStride; 146 147 ShenandoahHeap* heap = ShenandoahHeap::heap(); 148 ShenandoahObjToScanQueueSet* queues = task_queues(); 149 ShenandoahObjToScanQueue* q; 150 ShenandoahMarkTask t; 151 152 // Do not use active_generation() : we must use the gc_generation() set by 153 // ShenandoahGCScope on the ControllerThread's stack; no safepoint may 154 // intervene to update active_generation, so we can't 155 // shenandoah_assert_generations_reconciled() here. 156 assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION); 157 heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl); 158 159 /* 160 * Process outstanding queues, if any. 161 * 162 * There can be more queues than workers. To deal with the imbalance, we claim 163 * extra queues first. Since marking can push new tasks into the queue associated 164 * with this worker id, we come back to process this queue in the normal loop. 165 */ 166 assert(queues->get_reserved() == heap->workers()->active_workers(), 167 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers()); 168 169 q = queues->claim_next(); 170 while (q != nullptr) { 171 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 172 return; 173 } 174 175 for (uint i = 0; i < stride; i++) { 176 if (q->pop(t)) { 177 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id); 178 } else { 179 assert(q->is_empty(), "Must be empty"); 180 q = queues->claim_next(); 181 break; 182 } 183 } 184 } 185 q = get_queue(worker_id); 186 ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id); 187 188 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old_q); 189 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 190 191 /* 192 * Normal marking loop: 193 */ 194 while (true) { 195 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 196 return; 197 } 198 while (satb_mq_set.completed_buffers_num() > 0) { 199 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 200 } 201 202 uint work = 0; 203 for (uint i = 0; i < stride; i++) { 204 if (q->pop(t) || 205 queues->steal(worker_id, t)) { 206 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id); 207 work++; 208 } else { 209 break; 210 } 211 } 212 213 if (work == 0) { 214 // No work encountered in current stride, try to terminate. 215 // Need to leave the STS here otherwise it might block safepoints. 216 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE); 217 ShenandoahTerminatorTerminator tt(heap); 218 if (terminator->offer_termination(&tt)) return; 219 } 220 } 221 }