< prev index next > src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
Print this page
/*
* Copyright (c) 2021, 2022, Red Hat, Inc. All rights reserved.
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
+ #include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
#include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
#include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
- ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp) :
- MetadataVisitingOopIterateClosure(rp),
- _queue(q),
- _mark_context(ShenandoahHeap::heap()->marking_context()),
- _weak(false)
- { }
-
- ShenandoahMark::ShenandoahMark() :
- _task_queues(ShenandoahHeap::heap()->marking_context()->task_queues()) {
- }
-
void ShenandoahMark::start_mark() {
if (!CodeCache::is_gc_marking_cycle_active()) {
CodeCache::on_gc_marking_cycle_start();
}
}
void ShenandoahMark::end_mark() {
// Unlike other GCs, we do not arm the nmethods
// when marking terminates.
- CodeCache::on_gc_marking_cycle_finish();
+ if (!ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress()) {
+ CodeCache::on_gc_marking_cycle_finish();
+ }
}
- void ShenandoahMark::clear() {
- // Clean up marking stacks.
- ShenandoahObjToScanQueueSet* queues = ShenandoahHeap::heap()->marking_context()->task_queues();
- queues->clear();
+ ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahReferenceProcessor* rp, ShenandoahObjToScanQueue* old_q) :
+ MetadataVisitingOopIterateClosure(rp),
+ _queue(q),
+ _old_queue(old_q),
+ _mark_context(ShenandoahHeap::heap()->marking_context()),
+ _weak(false)
+ { }
- // Cancel SATB buffers.
- ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
+ ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
+ _generation(generation),
+ _task_queues(generation->task_queues()),
+ _old_gen_task_queues(generation->old_gen_task_queues()) {
}
- template <bool CANCELLABLE, StringDedupMode STRING_DEDUP>
- void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req) {
+ template <ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
+ void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
ShenandoahObjToScanQueue* q = get_queue(w);
+ ShenandoahObjToScanQueue* old_q = get_old_queue(w);
ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahLiveData* ld = heap->get_liveness_cache(w);
// TODO: We can clean up this if we figure out how to do templated oop closures that
// play nice with specialized_oop_iterators.
- if (heap->has_forwarded_objects()) {
- using Closure = ShenandoahMarkUpdateRefsClosure;
- Closure cl(q, rp);
- mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
+ if (update_refs) {
+ using Closure = ShenandoahMarkUpdateRefsClosure<GENERATION>;
+ Closure cl(q, rp, old_q);
+ mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
} else {
- using Closure = ShenandoahMarkRefsClosure;
- Closure cl(q, rp);
- mark_loop_work<Closure, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
+ using Closure = ShenandoahMarkRefsClosure<GENERATION>;
+ Closure cl(q, rp, old_q);
+ mark_loop_work<Closure, GENERATION, CANCELLABLE, STRING_DEDUP>(&cl, ld, w, t, req);
}
heap->flush_liveness_cache(w);
}
+ template<bool CANCELLABLE, StringDedupMode STRING_DEDUP>
void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
- bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
+ ShenandoahGenerationType generation, StringDedup::Requests* const req) {
+ bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
+ switch (generation) {
+ case YOUNG:
+ mark_loop_prework<YOUNG, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
+ break;
+ case OLD:
+ // Old generation collection only performs marking, it should not update references.
+ mark_loop_prework<OLD, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, false);
+ break;
+ case GLOBAL:
+ mark_loop_prework<GLOBAL, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
+ break;
+ case NON_GEN:
+ mark_loop_prework<NON_GEN, CANCELLABLE, STRING_DEDUP>(worker_id, terminator, rp, req, update_refs);
+ break;
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+ }
+
+ void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
+ ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
if (cancellable) {
switch(dedup_mode) {
case NO_DEDUP:
- mark_loop_prework<true, NO_DEDUP>(worker_id, terminator, rp, req);
+ mark_loop<true, NO_DEDUP>(worker_id, terminator, rp, generation, req);
break;
case ENQUEUE_DEDUP:
- mark_loop_prework<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
+ mark_loop<true, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
break;
case ALWAYS_DEDUP:
- mark_loop_prework<true, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
+ mark_loop<true, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
break;
}
} else {
switch(dedup_mode) {
case NO_DEDUP:
- mark_loop_prework<false, NO_DEDUP>(worker_id, terminator, rp, req);
+ mark_loop<false, NO_DEDUP>(worker_id, terminator, rp, generation, req);
break;
case ENQUEUE_DEDUP:
- mark_loop_prework<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, req);
+ mark_loop<false, ENQUEUE_DEDUP>(worker_id, terminator, rp, generation, req);
break;
case ALWAYS_DEDUP:
- mark_loop_prework<false, ALWAYS_DEDUP>(worker_id, terminator, rp, req);
+ mark_loop<false, ALWAYS_DEDUP>(worker_id, terminator, rp, generation, req);
break;
}
}
}
- template <class T, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
+ template <class T, ShenandoahGenerationType GENERATION, bool CANCELLABLE, StringDedupMode STRING_DEDUP>
void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req) {
uintx stride = ShenandoahMarkLoopStride;
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahObjToScanQueueSet* queues = task_queues();
ShenandoahObjToScanQueue* q;
ShenandoahMarkTask t;
- heap->ref_processor()->set_mark_closure(worker_id, cl);
+ // Do not use active_generation() : we must use the gc_generation() set by
+ // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
+ // intervene to update active_generation, so we can't
+ // shenandoah_assert_generations_reconciled() here.
+ assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION);
+ heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl);
/*
* Process outstanding queues, if any.
*
* There can be more queues than workers. To deal with the imbalance, we claim
return;
}
for (uint i = 0; i < stride; i++) {
if (q->pop(t)) {
- do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
+ do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
} else {
assert(q->is_empty(), "Must be empty");
q = queues->claim_next();
break;
}
}
}
q = get_queue(worker_id);
+ ShenandoahObjToScanQueue* old_q = get_old_queue(worker_id);
- ShenandoahSATBBufferClosure drain_satb(q);
+ ShenandoahSATBBufferClosure<GENERATION> drain_satb(q, old_q);
SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
/*
* Normal marking loop:
*/
while (true) {
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
return;
}
-
while (satb_mq_set.completed_buffers_num() > 0) {
satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
}
uint work = 0;
for (uint i = 0; i < stride; i++) {
if (q->pop(t) ||
queues->steal(worker_id, t)) {
- do_task<T, STRING_DEDUP>(q, cl, live_data, req, &t);
+ do_task<T, GENERATION, STRING_DEDUP>(q, cl, live_data, req, &t, worker_id);
work++;
} else {
break;
}
}
< prev index next >