1 /*
  2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 
 27 #include "precompiled.hpp"
 28 
 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 31 #include "gc/shenandoah/shenandoahGeneration.hpp"
 32 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 33 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 34 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 35 #include "gc/shenandoah/shenandoahUtils.hpp"
 36 #include "gc/shenandoah/shenandoahVerifier.hpp"
 37 
 38 void ShenandoahMark::start_mark() {
 39   if (!CodeCache::is_gc_marking_cycle_active()) {
 40     CodeCache::on_gc_marking_cycle_start();
 41   }
 42 }
 43 
 44 void ShenandoahMark::end_mark() {
 45   // Unlike other GCs, we do not arm the nmethods
 46   // when marking terminates.
 47   if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) {
 48     CodeCache::on_gc_marking_cycle_finish();
 49   }
 50 }
 51 
 52 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
 53   _generation(generation),
 54   _task_queues(generation->task_queues()),
 55   _old_gen_task_queues(generation->old_gen_task_queues()) {
 56 }
 57 
 58 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 59 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
 60   ShenandoahObjToScanQueue* q = get_queue(w);
 61   ShenandoahObjToScanQueue* old_q = get_old_queue(w);
 62 
 63   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 64   ShenandoahLiveData* ld = heap->get_liveness_cache(w);
 65 
 66   // TODO: We can clean up this if we figure out how to do templated oop closures that
 67   // play nice with specialized_oop_iterators.
 68   if (update_refs) {
 69     using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
 70     Closure cl(q, rp, old_q);
 71     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 72   } else {
 73     using Closure = ShenandoahMarkRefsClosure<GENERATION>;
 74     Closure cl(q, rp, old_q);
 75     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 76   }
 77 
 78   heap->flush_liveness_cache(w);
 79 }
 80 
 81 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 82 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
 83                                ShenandoahGenerationType generation, StringDedup::Requests* const req) {
 84   bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
 85   switch (generation) {
 86     case YOUNG:
 87       mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
 88       break;
 89     case OLD:
 90       // Old generation collection only performs marking, it should not update references.
 91       mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false);
 92       break;
 93     case GLOBAL:
 94       mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
 95       break;
 96     case NON_GEN:
 97       mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
 98       break;
 99     default:
100       ShouldNotReachHere();
101       break;
102   }
103 }
104 
105 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
106                                ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
107   if (cancellable) {
108     switch(dedup_mode) {
109       case NO_DEDUP:
110         mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
111         break;
112       case ENQUEUE_DEDUP:
113         mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
114         break;
115       case ALWAYS_DEDUP:
116         mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
117         break;
118     }
119   } else {
120     switch(dedup_mode) {
121       case NO_DEDUP:
122         mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
123         break;
124       case ENQUEUE_DEDUP:
125         mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
126         break;
127       case ALWAYS_DEDUP:
128         mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
129         break;
130     }
131   }
132 }
133 
134 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
135 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
136   uintx stride = ShenandoahMarkLoopStride;
137 
138   ShenandoahHeap* heap = ShenandoahHeap::heap();
139   ShenandoahObjToScanQueueSet* queues = task_queues();
140   ShenandoahObjToScanQueue* q;
141   ShenandoahMarkTask t;
142 
143   // Do not use active_generation() : we must use the gc_generation() set by
144   // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
145   // intervene to update active_generation, so we can't
146   // shenandoah_assert_generations_reconciled() here.
147   assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION);
148   heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl);
149 
150   /*
151    * Process outstanding queues, if any.
152    *
153    * There can be more queues than workers. To deal with the imbalance, we claim
154    * extra queues first. Since marking can push new tasks into the queue associated
155    * with this worker id, we come back to process this queue in the normal loop.
156    */
157   assert(queues->get_reserved() == heap->workers()->active_workers(),
158          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
159 
160   q = queues->claim_next();
161   while (q != nullptr) {
162     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
163       return;
164     }
165 
166     for (uint i = 0; i < stride; i++) {
167       if (q->pop(t)) {
168         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
169       } else {
170         assert(q->is_empty(), "Must be empty");
171         q = queues->claim_next();
172         break;
173       }
174     }
175   }
176   q = get_queue(worker_id);
177   ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id);
178 
179   ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old_q);
180   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
181 
182   /*
183    * Normal marking loop:
184    */
185   while (true) {
186     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
187       return;
188     }
189     while (satb_mq_set.completed_buffers_num() > 0) {
190       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
191     }
192 
193     uint work = 0;
194     for (uint i = 0; i < stride; i++) {
195       if (q->pop(t) ||
196           queues->steal(worker_id, t)) {
197         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
198         work++;
199       } else {
200         break;
201       }
202     }
203 
204     if (work == 0) {
205       // No work encountered in current stride, try to terminate.
206       // Need to leave the STS here otherwise it might block safepoints.
207       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
208       ShenandoahTerminatorTerminator tt(heap);
209       if (terminator->offer_termination(&tt)) return;
210     }
211   }
212 }