< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahMark.cpp

Print this page

  1 /*
  2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.

  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 
 26 #include "precompiled.hpp"
 27 
 28 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp"

 30 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 31 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 32 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 33 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 34 #include "gc/shenandoah/shenandoahUtils.hpp"
 35 #include "gc/shenandoah/shenandoahVerifier.hpp"
 36 
 37 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q,  ShenandoahReferenceProcessor* rp) :
 38   MetadataVisitingOopIterateClosure(rp),
 39   _queue(q),
 40   _mark_context(ShenandoahHeap::heap()->marking_context()),
 41   _weak(false)
 42 { }
 43 
 44 ShenandoahMark::ShenandoahMark() :
 45   _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
 46 }
 47 
 48 void ShenandoahMark::start_mark() {
 49   if (!CodeCache::is_gc_marking_cycle_active()) {
 50     CodeCache::on_gc_marking_cycle_start();
 51   }
 52 }
 53 
 54 void ShenandoahMark::end_mark() {
 55   // Unlike other GCs, we do not arm the nmethods
 56   // when marking terminates.
 57   CodeCache::on_gc_marking_cycle_finish();


 58 }
 59 
 60 void ShenandoahMark::clear() {
 61   // Clean up marking stacks.
 62   ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
 63   queues->clear();



 64 
 65   // Cancel SATB buffers.
 66   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();


 67 }
 68 
 69 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 70 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
 71   ShenandoahObjToScanQueue* q = get_queue(w);

 72 
 73   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 74   ShenandoahLiveData* ld = heap->get_liveness_cache(w);
 75 
 76   // TODO: We can clean up this if we figure out how to do templated oop closures that
 77   // play nice with specialized_oop_iterators.
 78   if (heap->has_forwarded_objects()) {
 79     using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
 80     Closure cl(q, rp);
 81     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 82   } else {
 83     using Closure = ShenandoahMarkRefsClosure<GENERATION>;
 84     Closure cl(q, rp);
 85     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 86   }
 87 
 88   heap->flush_liveness_cache(w);
 89 }
 90 
 91 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 92 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
 93                                ShenandoahGenerationType generation, StringDedup::Requests* const req) {
 94   mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req);


















 95 }
 96 
 97 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
 98                                ShenandoahGenerationType generation, bool cancellable,  StringDedupMode dedup_mode,
 99                                StringDedup::Requests* const req) {
100   if (cancellable) {
101     switch(dedup_mode) {
102       case NO_DEDUP:
103         mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
104         break;
105       case ENQUEUE_DEDUP:
106         mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
107         break;
108       case ALWAYS_DEDUP:
109         mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
110         break;
111     }
112   } else {
113     switch(dedup_mode) {
114       case NO_DEDUP:
115         mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
116         break;
117       case ENQUEUE_DEDUP:
118         mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
119         break;
120       case ALWAYS_DEDUP:
121         mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
122         break;
123     }
124   }
125 }
126 
127 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
128 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
129   uintx stride = ShenandoahMarkLoopStride;
130 
131   ShenandoahHeap* heap = ShenandoahHeap::heap();
132   ShenandoahObjToScanQueueSet* queues = task_queues();
133   ShenandoahObjToScanQueue* q;
134   ShenandoahMarkTask t;
135 
136   heap->ref_processor()->set_mark_closure(worker_id, cl);





137 
138   /*
139    * Process outstanding queues, if any.
140    *
141    * There can be more queues than workers. To deal with the imbalance, we claim
142    * extra queues first. Since marking can push new tasks into the queue associated
143    * with this worker id, we come back to process this queue in the normal loop.
144    */
145   assert(queues->get_reserved() == heap->workers()->active_workers(),
146          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
147 
148   q = queues->claim_next();
149   while (q != nullptr) {
150     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
151       return;
152     }
153 
154     for (uint i = 0; i < stride; i++) {
155       if (q->pop(t)) {
156         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t);
157       } else {
158         assert(q->is_empty(), "Must be empty");
159         q = queues->claim_next();
160         break;
161       }
162     }
163   }
164   q = get_queue(worker_id);

165 
166   ShenandoahSATBBufferClosure<GENERATION> drain_satb(q);
167   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
168 
169   /*
170    * Normal marking loop:
171    */
172   while (true) {
173     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
174       return;
175     }
176 
177     while (satb_mq_set.completed_buffers_num() > 0) {
178       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
179     }
180 
181     uint work = 0;
182     for (uint i = 0; i < stride; i++) {
183       if (q->pop(t) ||
184           queues->steal(worker_id, t)) {
185         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t);
186         work++;
187       } else {
188         break;
189       }
190     }
191 
192     if (work == 0) {
193       // No work encountered in current stride, try to terminate.
194       // Need to leave the STS here otherwise it might block safepoints.
195       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
196       ShenandoahTerminatorTerminator tt(heap);
197       if (terminator->offer_termination(&tt)) return;
198     }
199   }
200 }

  1 /*
  2  * Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 
 27 #include "precompiled.hpp"
 28 
 29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 31 #include "gc/shenandoah/shenandoahGeneration.hpp"
 32 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 33 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 34 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
 35 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 36 #include "gc/shenandoah/shenandoahUtils.hpp"
 37 #include "gc/shenandoah/shenandoahVerifier.hpp"
 38 











 39 void ShenandoahMark::start_mark() {
 40   if (!CodeCache::is_gc_marking_cycle_active()) {
 41     CodeCache::on_gc_marking_cycle_start();
 42   }
 43 }
 44 
 45 void ShenandoahMark::end_mark() {
 46   // Unlike other GCs, we do not arm the nmethods
 47   // when marking terminates.
 48   if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) {
 49     CodeCache::on_gc_marking_cycle_finish();
 50   }
 51 }
 52 
 53 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q,  ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) :
 54   MetadataVisitingOopIterateClosure(rp),
 55   _queue(q),
 56   _old_queue(old_q),
 57   _mark_context(ShenandoahHeap::heap()->marking_context()),
 58   _weak(false)
 59 { }
 60 
 61 ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
 62   _generation(generation),
 63   _task_queues(generation->task_queues()),
 64   _old_gen_task_queues(generation->old_gen_task_queues()) {
 65 }
 66 
 67 template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 68 void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
 69   ShenandoahObjToScanQueue* q = get_queue(w);
 70   ShenandoahObjToScanQueue* old_q = get_old_queue(w);
 71 
 72   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 73   ShenandoahLiveData* ld = heap->get_liveness_cache(w);
 74 
 75   // TODO: We can clean up this if we figure out how to do templated oop closures that
 76   // play nice with specialized_oop_iterators.
 77   if (update_refs) {
 78     using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
 79     Closure cl(q, rp, old_q);
 80     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 81   } else {
 82     using Closure = ShenandoahMarkRefsClosure<GENERATION>;
 83     Closure cl(q, rp, old_q);
 84     mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
 85   }
 86 
 87   heap->flush_liveness_cache(w);
 88 }
 89 
 90 template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
 91 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
 92                                ShenandoahGenerationType generation, StringDedup::Requests* const req) {
 93   bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
 94   switch (generation) {
 95     case YOUNG:
 96       mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
 97       break;
 98     case OLD:
 99       // Old generation collection only performs marking, it should not update references.
100       mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false);
101       break;
102     case GLOBAL:
103       mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
104       break;
105     case NON_GEN:
106       mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
107       break;
108     default:
109       ShouldNotReachHere();
110       break;
111   }
112 }
113 
114 void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
115                                ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {

116   if (cancellable) {
117     switch(dedup_mode) {
118       case NO_DEDUP:
119         mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
120         break;
121       case ENQUEUE_DEDUP:
122         mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
123         break;
124       case ALWAYS_DEDUP:
125         mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
126         break;
127     }
128   } else {
129     switch(dedup_mode) {
130       case NO_DEDUP:
131         mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
132         break;
133       case ENQUEUE_DEDUP:
134         mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
135         break;
136       case ALWAYS_DEDUP:
137         mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
138         break;
139     }
140   }
141 }
142 
143 template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
144 void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
145   uintx stride = ShenandoahMarkLoopStride;
146 
147   ShenandoahHeap* heap = ShenandoahHeap::heap();
148   ShenandoahObjToScanQueueSet* queues = task_queues();
149   ShenandoahObjToScanQueue* q;
150   ShenandoahMarkTask t;
151 
152   // Do not use active_generation() : we must use the gc_generation() set by
153   // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
154   // intervene to update active_generation, so we can't
155   // shenandoah_assert_generations_reconciled() here.
156   assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION);
157   heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl);
158 
159   /*
160    * Process outstanding queues, if any.
161    *
162    * There can be more queues than workers. To deal with the imbalance, we claim
163    * extra queues first. Since marking can push new tasks into the queue associated
164    * with this worker id, we come back to process this queue in the normal loop.
165    */
166   assert(queues->get_reserved() == heap->workers()->active_workers(),
167          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
168 
169   q = queues->claim_next();
170   while (q != nullptr) {
171     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
172       return;
173     }
174 
175     for (uint i = 0; i < stride; i++) {
176       if (q->pop(t)) {
177         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
178       } else {
179         assert(q->is_empty(), "Must be empty");
180         q = queues->claim_next();
181         break;
182       }
183     }
184   }
185   q = get_queue(worker_id);
186   ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id);
187 
188   ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old_q);
189   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
190 
191   /*
192    * Normal marking loop:
193    */
194   while (true) {
195     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
196       return;
197     }

198     while (satb_mq_set.completed_buffers_num() > 0) {
199       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
200     }
201 
202     uint work = 0;
203     for (uint i = 0; i < stride; i++) {
204       if (q->pop(t) ||
205           queues->steal(worker_id, t)) {
206         do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
207         work++;
208       } else {
209         break;
210       }
211     }
212 
213     if (work == 0) {
214       // No work encountered in current stride, try to terminate.
215       // Need to leave the STS here otherwise it might block safepoints.
216       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
217       ShenandoahTerminatorTerminator tt(heap);
218       if (terminator->offer_termination(&tt)) return;
219     }
220   }
221 }
< prev index next >