1 /* 2 * Copyright (c) 2021, Red Hat, Inc. All rights reserved. 3 * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 28 #include "gc/shenandoah/shenandoahAsserts.hpp" 29 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 30 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 31 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 32 #include "gc/shenandoah/shenandoahStackWatermark.hpp" 33 #include "gc/shenandoah/shenandoahUtils.hpp" 34 #include "runtime/safepointVerifiers.hpp" 35 36 uint32_t ShenandoahStackWatermark::_epoch_id = 1; 37 38 ShenandoahOnStackNMethodClosure::ShenandoahOnStackNMethodClosure() : 39 _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {} 40 41 void ShenandoahOnStackNMethodClosure::do_nmethod(nmethod* nm) { 42 assert(nm != nullptr, "Sanity"); 43 const bool result = _bs_nm->nmethod_entry_barrier(nm); 44 assert(result, "NMethod on-stack must be alive"); 45 } 46 47 ThreadLocalAllocStats& ShenandoahStackWatermark::stats() { 48 return _stats; 49 } 50 51 uint32_t ShenandoahStackWatermark::epoch_id() const { 52 return _epoch_id; 53 } 54 55 void ShenandoahStackWatermark::change_epoch_id() { 56 shenandoah_assert_safepoint(); 57 _epoch_id++; 58 } 59 60 ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) : 61 StackWatermark(jt, StackWatermarkKind::gc, _epoch_id), 62 _heap(ShenandoahHeap::heap()), 63 _stats(), 64 _keep_alive_cl(), 65 _evac_update_oop_cl(), 66 _nm_cl() {} 67 68 OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) { 69 if (context != nullptr) { 70 assert(_heap->is_concurrent_weak_root_in_progress() || 71 _heap->is_concurrent_mark_in_progress(), 72 "Only these two phases"); 73 assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context)); 74 return reinterpret_cast<OopClosure*>(context); 75 } else { 76 if (_heap->is_concurrent_mark_in_progress()) { 77 return &_keep_alive_cl; 78 } else if (_heap->is_concurrent_weak_root_in_progress()) { 79 assert(_heap->is_evacuation_in_progress(), "Nothing to evacuate"); 80 return &_evac_update_oop_cl; 81 } else { 82 ShouldNotReachHere(); 83 return nullptr; 84 } 85 } 86 } 87 88 void ShenandoahStackWatermark::start_processing_impl(void* context) { 89 NoSafepointVerifier nsv; 90 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 91 92 // Process the non-frame part of the thread 93 if (heap->is_concurrent_mark_in_progress()) { 94 // We need to reset all TLABs because they might be below the TAMS, and we need to mark 95 // the objects in them. Do not let mutators allocate any new objects in their current TLABs. 96 // It is also a good place to resize the TLAB sizes for future allocations. 97 retire_tlab(); 98 99 _jt->oops_do_no_frames(closure_from_context(context), &_nm_cl); 100 } else if (heap->is_concurrent_weak_root_in_progress()) { 101 assert(heap->is_evacuation_in_progress(), "Should not be armed"); 102 // Retire the TLABs, which will force threads to reacquire their TLABs. 103 // This is needed for two reasons. Strong one: new allocations would be with new freeset, 104 // which would be outside the collection set, so no cset writes would happen there. 105 // Weaker one: new allocations would happen past update watermark, and so less work would 106 // be needed for reference updates (would update the large filler instead). 107 retire_tlab(); 108 109 _jt->oops_do_no_frames(closure_from_context(context), &_nm_cl); 110 } else { 111 ShouldNotReachHere(); 112 } 113 114 // Publishes the processing start to concurrent threads 115 StackWatermark::start_processing_impl(context); 116 } 117 118 void ShenandoahStackWatermark::retire_tlab() { 119 // Retire TLAB 120 if (UseTLAB) { 121 _stats.reset(); 122 _jt->tlab().retire(&_stats); 123 if (ResizeTLAB) { 124 _jt->tlab().resize(); 125 } 126 } 127 } 128 129 void ShenandoahStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) { 130 OopClosure* oops = closure_from_context(context); 131 assert(oops != nullptr, "Should not get to here"); 132 ShenandoahHeap* const heap = ShenandoahHeap::heap(); 133 assert((heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) || 134 heap->is_concurrent_mark_in_progress(), 135 "Only these two phases"); 136 fr.oops_do(oops, &_nm_cl, ®ister_map, DerivedPointerIterationMode::_directly); 137 }