1 /*
  2  * Copyright (c) 2021, Red Hat, Inc. All rights reserved.
  3  * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
  4  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 
 29 #include "gc/shenandoah/shenandoahAsserts.hpp"
 30 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 31 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
 32 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 33 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
 34 #include "gc/shenandoah/shenandoahUtils.hpp"
 35 #include "runtime/safepointVerifiers.hpp"
 36 
 37 uint32_t ShenandoahStackWatermark::_epoch_id = 1;
 38 
 39 ShenandoahOnStackCodeBlobClosure::ShenandoahOnStackCodeBlobClosure() :
 40     _bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
 41 
 42 void ShenandoahOnStackCodeBlobClosure::do_code_blob(CodeBlob* cb) {
 43   nmethod* const nm = cb->as_nmethod_or_null();
 44   if (nm != nullptr) {
 45     const bool result = _bs_nm->nmethod_entry_barrier(nm);
 46     assert(result, "NMethod on-stack must be alive");
 47   }
 48 }
 49 
 50 ThreadLocalAllocStats& ShenandoahStackWatermark::stats() {
 51   return _stats;
 52 }
 53 
 54 uint32_t ShenandoahStackWatermark::epoch_id() const {
 55   return _epoch_id;
 56 }
 57 
 58 void ShenandoahStackWatermark::change_epoch_id() {
 59   shenandoah_assert_safepoint();
 60   _epoch_id++;
 61 }
 62 
 63 ShenandoahStackWatermark::ShenandoahStackWatermark(JavaThread* jt) :
 64   StackWatermark(jt, StackWatermarkKind::gc, _epoch_id),
 65   _heap(ShenandoahHeap::heap()),
 66   _stats(),
 67   _keep_alive_cl(),
 68   _evac_update_oop_cl(),
 69   _cb_cl() {}
 70 
 71 OopClosure* ShenandoahStackWatermark::closure_from_context(void* context) {
 72   if (context != nullptr) {
 73     assert(_heap->is_concurrent_weak_root_in_progress() ||
 74            _heap->is_concurrent_mark_in_progress(),
 75            "Only these two phases");
 76     assert(Thread::current()->is_Worker_thread(), "Unexpected thread passing in context: " PTR_FORMAT, p2i(context));
 77     return reinterpret_cast<OopClosure*>(context);
 78   } else {
 79     if (_heap->is_concurrent_weak_root_in_progress()) {
 80       assert(_heap->is_evacuation_in_progress(), "Nothing to evacuate");
 81       return &_evac_update_oop_cl;
 82     } else if (_heap->is_concurrent_mark_in_progress()) {
 83       return &_keep_alive_cl;
 84     } else {
 85       ShouldNotReachHere();
 86       return nullptr;
 87     }
 88   }
 89 }
 90 
 91 void ShenandoahStackWatermark::start_processing_impl(void* context) {
 92   NoSafepointVerifier nsv;
 93   ShenandoahHeap* const heap = ShenandoahHeap::heap();
 94 
 95   // Process the non-frame part of the thread
 96   if (heap->is_concurrent_weak_root_in_progress()) {
 97     assert(heap->is_evacuation_in_progress(), "Should not be armed");
 98     // Retire the TLABs, which will force threads to reacquire their TLABs.
 99     // This is needed for two reasons. Strong one: new allocations would be with new freeset,
100     // which would be outside the collection set, so no cset writes would happen there.
101     // Weaker one: new allocations would happen past update watermark, and so less work would
102     // be needed for reference updates (would update the large filler instead).
103     retire_tlab();
104 
105     _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
106   } else if (heap->is_concurrent_mark_in_progress()) {
107     // We need to reset all TLABs because they might be below the TAMS, and we need to mark
108     // the objects in them. Do not let mutators allocate any new objects in their current TLABs.
109     // It is also a good place to resize the TLAB sizes for future allocations.
110     retire_tlab();
111 
112     _jt->oops_do_no_frames(closure_from_context(context), &_cb_cl);
113   } else {
114     ShouldNotReachHere();
115   }
116 
117   // Publishes the processing start to concurrent threads
118   StackWatermark::start_processing_impl(context);
119 }
120 
121 void ShenandoahStackWatermark::retire_tlab() {
122   // Retire TLAB
123   if (UseTLAB) {
124     _stats.reset();
125     _jt->tlab().retire(&_stats);
126     if (ResizeTLAB) {
127       _jt->tlab().resize();
128     }
129   }
130 }
131 
132 void ShenandoahStackWatermark::process(const frame& fr, RegisterMap& register_map, void* context) {
133   OopClosure* oops = closure_from_context(context);
134   assert(oops != nullptr, "Should not get to here");
135   ShenandoahHeap* const heap = ShenandoahHeap::heap();
136   assert((heap->is_concurrent_weak_root_in_progress() && heap->is_evacuation_in_progress()) ||
137          heap->is_concurrent_mark_in_progress(),
138          "Only these two phases");
139   fr.oops_do(oops, &_cb_cl, &register_map, DerivedPointerIterationMode::_directly);
140 }