1 /* 2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 26 #include "precompiled.hpp" 27 28 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 30 #include "gc/shenandoah/shenandoahGeneration.hpp" 31 #include "gc/shenandoah/shenandoahMark.inline.hpp" 32 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp" 34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 35 #include "gc/shenandoah/shenandoahUtils.hpp" 36 #include "gc/shenandoah/shenandoahVerifier.hpp" 37 38 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) : 39 MetadataVisitingOopIterateClosure(rp), 40 _queue(q), 41 _old_queue(old_q), 42 _mark_context(ShenandoahHeap::heap()->marking_context()), 43 _weak(false) 44 { } 45 46 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) : 47 _generation(generation), 48 _task_queues(generation->task_queues()), 49 _old_gen_task_queues(generation->old_gen_task_queues()) { 50 } 51 52 template <GenerationMode GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP> 53 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) { 54 ShenandoahObjToScanQueue* q = get_queue(w); 55 ShenandoahObjToScanQueue* old = get_old_queue(w); 56 57 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 58 ShenandoahLiveData* ld = heap->get_liveness_cache(w); 59 60 // TODO: We can clean up this if we figure out how to do templated oop closures that 61 // play nice with specialized_oop_iterators. 62 if (heap->unload_classes()) { 63 if (update_refs) { 64 using Closure = ShenandoahMarkUpdateRefsMetadataClosure<GENERATION>; 65 Closure cl(q, rp, old); 66 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 67 } else { 68 using Closure = ShenandoahMarkRefsMetadataClosure<GENERATION>; 69 Closure cl(q, rp, old); 70 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 71 } 72 } else { 73 if (update_refs) { 74 using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>; 75 Closure cl(q, rp, old); 76 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 77 } else { 78 using Closure = ShenandoahMarkRefsClosure<GENERATION>; 79 Closure cl(q, rp, old); 80 mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req); 81 } 82 } 83 84 heap->flush_liveness_cache(w); 85 } 86 87 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP> 88 void ShenandoahMark::mark_loop(GenerationMode generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) { 89 bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects(); 90 switch (generation) { 91 case YOUNG: 92 mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs); 93 break; 94 case OLD: 95 // Old generation collection only performs marking, it should not update references. 96 mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false); 97 break; 98 case GLOBAL: 99 mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs); 100 break; 101 default: 102 ShouldNotReachHere(); 103 break; 104 } 105 } 106 107 void ShenandoahMark::mark_loop(GenerationMode generation, uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp, 108 bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) { 109 if (cancellable) { 110 switch(dedup_mode) { 111 case NO_DEDUP: 112 mark_loop<true, NO_DEDUP>(generation, worker_id, terminator, rp, req); 113 break; 114 case ENQUEUE_DEDUP: 115 mark_loop<true, ENQUEUE_DEDUP>(generation, worker_id, terminator, rp, req); 116 break; 117 case ALWAYS_DEDUP: 118 mark_loop<true, ALWAYS_DEDUP>(generation, worker_id, terminator, rp, req); 119 break; 120 } 121 } else { 122 switch(dedup_mode) { 123 case NO_DEDUP: 124 mark_loop<false, NO_DEDUP>(generation, worker_id, terminator, rp, req); 125 break; 126 case ENQUEUE_DEDUP: 127 mark_loop<false, ENQUEUE_DEDUP>(generation, worker_id, terminator, rp, req); 128 break; 129 case ALWAYS_DEDUP: 130 mark_loop<false, ALWAYS_DEDUP>(generation, worker_id, terminator, rp, req); 131 break; 132 } 133 } 134 } 135 136 template <class T, GenerationMode GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP> 137 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) { 138 uintx stride = ShenandoahMarkLoopStride; 139 140 ShenandoahHeap* heap = ShenandoahHeap::heap(); 141 ShenandoahObjToScanQueueSet* queues = task_queues(); 142 ShenandoahObjToScanQueue* q; 143 ShenandoahMarkTask t; 144 145 assert(heap->active_generation()->generation_mode() == GENERATION, "Sanity"); 146 heap->active_generation()->ref_processor()->set_mark_closure(worker_id, cl); 147 148 /* 149 * Process outstanding queues, if any. 150 * 151 * There can be more queues than workers. To deal with the imbalance, we claim 152 * extra queues first. Since marking can push new tasks into the queue associated 153 * with this worker id, we come back to process this queue in the normal loop. 154 */ 155 assert(queues->get_reserved() == heap->workers()->active_workers(), 156 "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers()); 157 158 q = queues->claim_next(); 159 while (q != NULL) { 160 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 161 return; 162 } 163 164 for (uint i = 0; i < stride; i++) { 165 if (q->pop(t)) { 166 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t); 167 } else { 168 assert(q->is_empty(), "Must be empty"); 169 q = queues->claim_next(); 170 break; 171 } 172 } 173 } 174 q = get_queue(worker_id); 175 ShenandoahObjToScanQueue* old = get_old_queue(worker_id); 176 177 ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old); 178 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 179 180 /* 181 * Normal marking loop: 182 */ 183 while (true) { 184 if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) { 185 return; 186 } 187 while (satb_mq_set.completed_buffers_num() > 0) { 188 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 189 } 190 191 uint work = 0; 192 for (uint i = 0; i < stride; i++) { 193 if (q->pop(t) || 194 queues->steal(worker_id, t)) { 195 do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t); 196 work++; 197 } else { 198 break; 199 } 200 } 201 202 if (work == 0) { 203 // No work encountered in current stride, try to terminate. 204 // Need to leave the STS here otherwise it might block safepoints. 205 ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers); 206 ShenandoahTerminatorTerminator tt(heap); 207 if (terminator->offer_termination(&tt)) return; 208 } 209 } 210 }