1 /* 2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP 28 29 #include "gc/shared/plab.hpp" 30 #include "gc/shared/gcThreadLocalData.hpp" 31 #include "gc/shared/gc_globals.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 34 #include "gc/shenandoah/shenandoahEvacTracker.hpp" 35 #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp" 36 #include "runtime/javaThread.hpp" 37 #include "utilities/debug.hpp" 38 #include "utilities/sizes.hpp" 39 40 class ShenandoahThreadLocalData { 41 private: 42 char _gc_state; 43 // Evacuation OOM state 44 uint8_t _oom_scope_nesting_level; 45 bool _oom_during_evac; 46 47 SATBMarkQueue _satb_mark_queue; 48 49 // Thread-local allocation buffer for object evacuations. 50 // In generational mode, it is exclusive to the young generation. 51 PLAB* _gclab; 52 size_t _gclab_size; 53 54 double _paced_time; 55 56 // Thread-local allocation buffer only used in generational mode. 57 // Used both by mutator threads and by GC worker threads 58 // for evacuations within the old generation and 59 // for promotions from the young generation into the old generation. 60 PLAB* _plab; 61 size_t _plab_size; 62 63 size_t _plab_evacuated; 64 size_t _plab_promoted; 65 size_t _plab_preallocated_promoted; 66 bool _plab_allows_promotion; // If false, no more promotion by this thread during this evacuation phase. 67 bool _plab_retries_enabled; 68 69 ShenandoahEvacuationStats* _evacuation_stats; 70 71 ShenandoahThreadLocalData(); 72 ~ShenandoahThreadLocalData(); 73 74 static ShenandoahThreadLocalData* data(Thread* thread) { 75 assert(UseShenandoahGC, "Sanity"); 76 return thread->gc_data<ShenandoahThreadLocalData>(); 77 } 78 79 static ByteSize satb_mark_queue_offset() { 80 return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue); 81 } 82 83 public: 84 static void create(Thread* thread) { 85 new (data(thread)) ShenandoahThreadLocalData(); 86 } 87 88 static void destroy(Thread* thread) { 89 data(thread)->~ShenandoahThreadLocalData(); 90 } 91 92 static SATBMarkQueue& satb_mark_queue(Thread* thread) { 93 return data(thread)->_satb_mark_queue; 94 } 95 96 static void set_gc_state(Thread* thread, char gc_state) { 97 data(thread)->_gc_state = gc_state; 98 } 99 100 static char gc_state(Thread* thread) { 101 return data(thread)->_gc_state; 102 } 103 104 static void initialize_gclab(Thread* thread) { 105 assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs"); 106 assert(data(thread)->_gclab == nullptr, "Only initialize once"); 107 data(thread)->_gclab = new PLAB(PLAB::min_size()); 108 data(thread)->_gclab_size = 0; 109 data(thread)->_plab = new PLAB(PLAB::min_size()); 110 data(thread)->_plab_size = 0; 111 } 112 113 static PLAB* gclab(Thread* thread) { 114 return data(thread)->_gclab; 115 } 116 117 static size_t gclab_size(Thread* thread) { 118 return data(thread)->_gclab_size; 119 } 120 121 static void set_gclab_size(Thread* thread, size_t v) { 122 data(thread)->_gclab_size = v; 123 } 124 125 static void begin_evacuation(Thread* thread, size_t bytes) { 126 data(thread)->_evacuation_stats->begin_evacuation(bytes); 127 } 128 129 static void end_evacuation(Thread* thread, size_t bytes) { 130 data(thread)->_evacuation_stats->end_evacuation(bytes); 131 } 132 133 static void record_age(Thread* thread, size_t bytes, uint age) { 134 data(thread)->_evacuation_stats->record_age(bytes, age); 135 } 136 137 static ShenandoahEvacuationStats* evacuation_stats(Thread* thread) { 138 return data(thread)->_evacuation_stats; 139 } 140 141 static PLAB* plab(Thread* thread) { 142 return data(thread)->_plab; 143 } 144 145 static size_t plab_size(Thread* thread) { 146 return data(thread)->_plab_size; 147 } 148 149 static void set_plab_size(Thread* thread, size_t v) { 150 data(thread)->_plab_size = v; 151 } 152 153 static void enable_plab_retries(Thread* thread) { 154 data(thread)->_plab_retries_enabled = true; 155 } 156 157 static void disable_plab_retries(Thread* thread) { 158 data(thread)->_plab_retries_enabled = false; 159 } 160 161 static bool plab_retries_enabled(Thread* thread) { 162 return data(thread)->_plab_retries_enabled; 163 } 164 165 static void enable_plab_promotions(Thread* thread) { 166 data(thread)->_plab_allows_promotion = true; 167 } 168 169 static void disable_plab_promotions(Thread* thread) { 170 data(thread)->_plab_allows_promotion = false; 171 } 172 173 static bool allow_plab_promotions(Thread* thread) { 174 return data(thread)->_plab_allows_promotion; 175 } 176 177 static void reset_plab_evacuated(Thread* thread) { 178 data(thread)->_plab_evacuated = 0; 179 } 180 181 static void add_to_plab_evacuated(Thread* thread, size_t increment) { 182 data(thread)->_plab_evacuated += increment; 183 } 184 185 static void subtract_from_plab_evacuated(Thread* thread, size_t increment) { 186 // TODO: Assert underflow 187 data(thread)->_plab_evacuated -= increment; 188 } 189 190 static size_t get_plab_evacuated(Thread* thread) { 191 return data(thread)->_plab_evacuated; 192 } 193 194 static void reset_plab_promoted(Thread* thread) { 195 data(thread)->_plab_promoted = 0; 196 } 197 198 static void add_to_plab_promoted(Thread* thread, size_t increment) { 199 data(thread)->_plab_promoted += increment; 200 } 201 202 static void subtract_from_plab_promoted(Thread* thread, size_t increment) { 203 // TODO: Assert underflow 204 data(thread)->_plab_promoted -= increment; 205 } 206 207 static size_t get_plab_promoted(Thread* thread) { 208 return data(thread)->_plab_promoted; 209 } 210 211 static void set_plab_preallocated_promoted(Thread* thread, size_t value) { 212 data(thread)->_plab_preallocated_promoted = value; 213 } 214 215 static size_t get_plab_preallocated_promoted(Thread* thread) { 216 return data(thread)->_plab_preallocated_promoted; 217 } 218 219 static void add_paced_time(Thread* thread, double v) { 220 data(thread)->_paced_time += v; 221 } 222 223 static double paced_time(Thread* thread) { 224 return data(thread)->_paced_time; 225 } 226 227 static void reset_paced_time(Thread* thread) { 228 data(thread)->_paced_time = 0; 229 } 230 231 // Evacuation OOM handling 232 static bool is_oom_during_evac(Thread* thread) { 233 return data(thread)->_oom_during_evac; 234 } 235 236 static void set_oom_during_evac(Thread* thread, bool oom) { 237 data(thread)->_oom_during_evac = oom; 238 } 239 240 static uint8_t evac_oom_scope_level(Thread* thread) { 241 return data(thread)->_oom_scope_nesting_level; 242 } 243 244 // Push the scope one level deeper, return previous level 245 static uint8_t push_evac_oom_scope(Thread* thread) { 246 uint8_t level = evac_oom_scope_level(thread); 247 assert(level < 254, "Overflow nesting level"); // UINT8_MAX = 255 248 data(thread)->_oom_scope_nesting_level = level + 1; 249 return level; 250 } 251 252 // Pop the scope by one level, return previous level 253 static uint8_t pop_evac_oom_scope(Thread* thread) { 254 uint8_t level = evac_oom_scope_level(thread); 255 assert(level > 0, "Underflow nesting level"); 256 data(thread)->_oom_scope_nesting_level = level - 1; 257 return level; 258 } 259 260 static bool is_evac_allowed(Thread* thread) { 261 return evac_oom_scope_level(thread) > 0; 262 } 263 264 // Offsets 265 static ByteSize satb_mark_queue_active_offset() { 266 return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active(); 267 } 268 269 static ByteSize satb_mark_queue_index_offset() { 270 return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index(); 271 } 272 273 static ByteSize satb_mark_queue_buffer_offset() { 274 return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf(); 275 } 276 277 static ByteSize gc_state_offset() { 278 return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state); 279 } 280 }; 281 282 STATIC_ASSERT(sizeof(ShenandoahThreadLocalData) <= sizeof(GCThreadLocalData)); 283 284 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP