1 /*
  2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 30 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 31 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 32 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 33 #include "gc/shenandoah/shenandoahUtils.hpp"
 34 #include "gc/shenandoah/shenandoahVerifier.hpp"
 35 
 36 ShenandoahMark::ShenandoahMark() :
 37   _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
 38 }
 39 
 40 void ShenandoahMark::start_mark() {
 41   if (!CodeCache::is_gc_marking_cycle_active()) {
 42     CodeCache::on_gc_marking_cycle_start();
 43   }
 44 }
 45 
 46 void ShenandoahMark::end_mark() {
 47   // Unlike other GCs, we do not arm the nmethods
 48   // when marking terminates.
 49   CodeCache::on_gc_marking_cycle_finish();
 50 }
 51 
 52 void ShenandoahMark::clear() {
 53   // Clean up marking stacks.
 54   ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
 55   queues->clear();
 56 
 57   // Cancel SATB buffers.
 58   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 59 }
 60 
 61 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 62 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
 63   ShenandoahObjToScanQueue* q = get_queue(w);
 64 
 65   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 66   ShenandoahLiveData* ld = heap->get_liveness_cache(w);
 67 
 68   // TODO: We can clean up this if we figure out how to do templated oop closures that
 69   // play nice with specialized_oop_iterators.
 70   if (heap->has_forwarded_objects()) {
 71     using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
 72     Closure cl(q, rp);
 73     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 74   } else {
 75     using Closure = ShenandoahMarkRefsClosure<GENERATION>;
 76     Closure cl(q, rp);
 77     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 78   }
 79 
 80   heap->flush_liveness_cache(w);
 81 }
 82 
 83 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 84 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
 85                                ShenandoahGenerationType generation, StringDedup::Requests* const req) {
 86   mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req);
 87 }
 88 
 89 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
 90                                ShenandoahGenerationType generation, bool cancellable,  StringDedupMode dedup_mode,
 91                                StringDedup::Requests* const req) {
 92   if (cancellable) {
 93     switch(dedup_mode) {
 94       case NO_DEDUP:
 95         mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
 96         break;
 97       case ENQUEUE_DEDUP:
 98         mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
 99         break;
100       case ALWAYS_DEDUP:
101         mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
102         break;
103     }
104   } else {
105     switch(dedup_mode) {
106       case NO_DEDUP:
107         mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
108         break;
109       case ENQUEUE_DEDUP:
110         mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
111         break;
112       case ALWAYS_DEDUP:
113         mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
114         break;
115     }
116   }
117 }
118 
119 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
120 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
121   uintx stride = ShenandoahMarkLoopStride;
122 
123   ShenandoahHeap* heap = ShenandoahHeap::heap();
124   ShenandoahObjToScanQueueSet* queues = task_queues();
125   ShenandoahObjToScanQueue* q;
126   ShenandoahMarkTask t;
127 
128   heap->ref_processor()->set_mark_closure(worker_id, cl);
129 
130   /*
131    * Process outstanding queues, if any.
132    *
133    * There can be more queues than workers. To deal with the imbalance, we claim
134    * extra queues first. Since marking can push new tasks into the queue associated
135    * with this worker id, we come back to process this queue in the normal loop.
136    */
137   assert(queues->get_reserved() == heap->workers()->active_workers(),
138          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
139 
140   q = queues->claim_next();
141   while (q != nullptr) {
142     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
143       return;
144     }
145 
146     for (uint i = 0; i < stride; i++) {
147       if (q->pop(t)) {
148         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t);
149       } else {
150         assert(q->is_empty(), "Must be empty");
151         q = queues->claim_next();
152         break;
153       }
154     }
155   }
156   q = get_queue(worker_id);
157 
158   ShenandoahSATBBufferClosure<GENERATION> drain_satb(q);
159   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
160 
161   /*
162    * Normal marking loop:
163    */
164   while (true) {
165     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
166       return;
167     }
168 
169     while (satb_mq_set.completed_buffers_num() > 0) {
170       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
171     }
172 
173     uint work = 0;
174     for (uint i = 0; i < stride; i++) {
175       if (q->pop(t) ||
176           queues->steal(worker_id, t)) {
177         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t);
178         work++;
179       } else {
180         break;
181       }
182     }
183 
184     if (work == 0) {
185       // No work encountered in current stride, try to terminate.
186       // Need to leave the STS here otherwise it might block safepoints.
187       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
188       ShenandoahTerminatorTerminator tt(heap);
189       if (terminator->offer_termination(&tt)) return;
190     }
191   }
192 }