1 /* 2 * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "precompiled.hpp" 27 28 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 30 #include "gc/shenandoah/shenandoahMark.inline.hpp" 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 33 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 34 #include "gc/shenandoah/shenandoahUtils.hpp" 35 #include "gc/shenandoah/shenandoahVerifier.hpp" 36 37 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) : 38 MetadataVisitingOopIterateClosure(rp), 39 _queue(q), 40 _mark_context(ShenandoahHeap::heap()->marking_context()), 41 _weak(false) 42 { } 43 44 ShenandoahMark::ShenandoahMark() : 45 _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) { 46 } 47 48 void ShenandoahMark::start_mark() { 49 if (!CodeCache::is_gc_marking_cycle_active()) { 50 CodeCache::on_gc_marking_cycle_start(); 51 } 52 } 53 54 void ShenandoahMark::end_mark() { 55 // Unlike other GCs, we do not arm the nmethods 56 // when marking terminates. 57 CodeCache::on_gc_marking_cycle_finish(); 58 } 59 60 void ShenandoahMark::clear() { 61 // Clean up marking stacks. 62 ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues(); 63 queues->clear(); 64 65 // Cancel SATB buffers. 66 ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking(); 67 } 68 69 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP> 70 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) { 71 ShenandoahObjToScanQueue* q = get_queue(w); 72 73 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 74 ShenandoahLiveData* ld = heap->get_liveness_cache(w); 75 76 // TODO: We can clean up this if we figure out how to do templated oop closures that 77 // play nice with specialized_oop_iterators. 78 if (heap->has_forwarded_objects()) { 79 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>; 80 Closure cl(q, rp); 81 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 82 } else { 83 using Closure = ShenandoahMarkRefsClosure<GENERATION>; 84 Closure cl(q, rp); 85 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 86 } 87 88 heap->flush_liveness_cache(w); 89 } 90 91 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP> 92 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, 93 ShenandoahGenerationType generation, StringDedup::Requests* const req) { 94 mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req); 95 } 96 97 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, 98 ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, 99 StringDedup::Requests* const req) { 100 if (cancellable) { 101 switch(dedup_mode) { 102 case NO_DEDUP: 103 mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req); 104 break; 105 case ENQUEUE_DEDUP: 106 mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req); 107 break; 108 case ALWAYS_DEDUP: 109 mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req); 110 break; 111 } 112 } else { 113 switch(dedup_mode) { 114 case NO_DEDUP: 115 mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req); 116 break; 117 case ENQUEUE_DEDUP: 118 mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req); 119 break; 120 case ALWAYS_DEDUP: 121 mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req); 122 break; 123 } 124 } 125 } 126 127 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP> 128 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) { 129 uintx stride = ShenandoahMarkLoopStride; 130 131 ShenandoahHeap* heap = ShenandoahHeap::heap(); 132 ShenandoahObjToScanQueueSet* queues = task_queues(); 133 ShenandoahObjToScanQueue* q; 134 ShenandoahMarkTask t; 135 136 heap->ref_processor()->set_mark_closure(worker_id, cl); 137 138 /* 139 * Process outstanding queues, if any. 140 * 141 * There can be more queues than workers. To deal with the imbalance, we claim 142 * extra queues first. Since marking can push new tasks into the queue associated 143 * with this worker id, we come back to process this queue in the normal loop. 144 */ 145 assert(queues->get_reserved() == heap->workers()->active_workers(), 146 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers()); 147 148 q = queues->claim_next(); 149 while (q != nullptr) { 150 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 151 return; 152 } 153 154 for (uint i = 0; i < stride; i++) { 155 if (q->pop(t)) { 156 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t); 157 } else { 158 assert(q->is_empty(), "Must be empty"); 159 q = queues->claim_next(); 160 break; 161 } 162 } 163 } 164 q = get_queue(worker_id); 165 166 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q); 167 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 168 169 /* 170 * Normal marking loop: 171 */ 172 while (true) { 173 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 174 return; 175 } 176 177 while (satb_mq_set.completed_buffers_num() > 0) { 178 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 179 } 180 181 uint work = 0; 182 for (uint i = 0; i < stride; i++) { 183 if (q->pop(t) || 184 queues->steal(worker_id, t)) { 185 do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t); 186 work++; 187 } else { 188 break; 189 } 190 } 191 192 if (work == 0) { 193 // No work encountered in current stride, try to terminate. 194 // Need to leave the STS here otherwise it might block safepoints. 195 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE); 196 ShenandoahTerminatorTerminator tt(heap); 197 if (terminator->offer_termination(&tt)) return; 198 } 199 } 200 }