1 /* 2 * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved. 3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP 28 29 #include "gc/shared/plab.hpp" 30 #include "gc/shared/gcThreadLocalData.hpp" 31 #include "gc/shared/gc_globals.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahCardTable.hpp" 34 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 35 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" 36 #include "gc/shenandoah/shenandoahEvacTracker.hpp" 37 #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp" 38 #include "gc/shenandoah/mode/shenandoahMode.hpp" 39 #include "runtime/javaThread.hpp" 40 #include "utilities/debug.hpp" 41 #include "utilities/sizes.hpp" 42 43 class ShenandoahThreadLocalData { 44 private: 45 char _gc_state; 46 // Evacuation OOM state 47 uint8_t _oom_scope_nesting_level; 48 bool _oom_during_evac; 49 50 SATBMarkQueue _satb_mark_queue; 51 52 // Thread-local allocation buffer for object evacuations. 53 // In generational mode, it is exclusive to the young generation. 54 PLAB* _gclab; 55 size_t _gclab_size; 56 57 double _paced_time; 58 59 // Thread-local allocation buffer only used in generational mode. 60 // Used both by mutator threads and by GC worker threads 61 // for evacuations within the old generation and 62 // for promotions from the young generation into the old generation. 63 PLAB* _plab; 64 65 // Heuristics will grow the desired size of plabs. 66 size_t _plab_desired_size; 67 68 // Once the plab has been allocated, and we know the actual size, we record it here. 69 size_t _plab_actual_size; 70 71 // As the plab is used for promotions, this value is incremented. When the plab is 72 // retired, the difference between 'actual_size' and 'promoted' will be returned to 73 // the old generation's promotion reserve (i.e., it will be 'unexpended'). 74 size_t _plab_promoted; 75 76 // If false, no more promotion by this thread during this evacuation phase. 77 bool _plab_allows_promotion; 78 79 // If true, evacuations may attempt to allocate a smaller plab if the original size fails. 80 bool _plab_retries_enabled; 81 82 ShenandoahEvacuationStats* _evacuation_stats; 83 84 ShenandoahThreadLocalData(); 85 ~ShenandoahThreadLocalData(); 86 87 static ShenandoahThreadLocalData* data(Thread* thread) { 88 assert(UseShenandoahGC, "Sanity"); 89 return thread->gc_data<ShenandoahThreadLocalData>(); 90 } 91 92 static ByteSize satb_mark_queue_offset() { 93 return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue); 94 } 95 96 public: 97 static void create(Thread* thread) { 98 new (data(thread)) ShenandoahThreadLocalData(); 99 } 100 101 static void destroy(Thread* thread) { 102 data(thread)->~ShenandoahThreadLocalData(); 103 } 104 105 static SATBMarkQueue& satb_mark_queue(Thread* thread) { 106 return data(thread)->_satb_mark_queue; 107 } 108 109 static void set_gc_state(Thread* thread, char gc_state) { 110 data(thread)->_gc_state = gc_state; 111 } 112 113 static char gc_state(Thread* thread) { 114 assert(thread->is_Java_thread(), "GC state is only synchronized to java threads"); 115 return data(thread)->_gc_state; 116 } 117 118 static void initialize_gclab(Thread* thread) { 119 assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs"); 120 assert(data(thread)->_gclab == nullptr, "Only initialize once"); 121 data(thread)->_gclab = new PLAB(PLAB::min_size()); 122 data(thread)->_gclab_size = 0; 123 124 if (ShenandoahHeap::heap()->mode()->is_generational()) { 125 data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words())); 126 data(thread)->_plab_desired_size = 0; 127 } 128 } 129 130 static PLAB* gclab(Thread* thread) { 131 return data(thread)->_gclab; 132 } 133 134 static size_t gclab_size(Thread* thread) { 135 return data(thread)->_gclab_size; 136 } 137 138 static void set_gclab_size(Thread* thread, size_t v) { 139 data(thread)->_gclab_size = v; 140 } 141 142 static void begin_evacuation(Thread* thread, size_t bytes) { 143 data(thread)->_evacuation_stats->begin_evacuation(bytes); 144 } 145 146 static void end_evacuation(Thread* thread, size_t bytes) { 147 data(thread)->_evacuation_stats->end_evacuation(bytes); 148 } 149 150 static void record_age(Thread* thread, size_t bytes, uint age) { 151 data(thread)->_evacuation_stats->record_age(bytes, age); 152 } 153 154 static ShenandoahEvacuationStats* evacuation_stats(Thread* thread) { 155 shenandoah_assert_generational(); 156 return data(thread)->_evacuation_stats; 157 } 158 159 static PLAB* plab(Thread* thread) { 160 return data(thread)->_plab; 161 } 162 163 static size_t plab_size(Thread* thread) { 164 return data(thread)->_plab_desired_size; 165 } 166 167 static void set_plab_size(Thread* thread, size_t v) { 168 data(thread)->_plab_desired_size = v; 169 } 170 171 static void enable_plab_retries(Thread* thread) { 172 data(thread)->_plab_retries_enabled = true; 173 } 174 175 static void disable_plab_retries(Thread* thread) { 176 data(thread)->_plab_retries_enabled = false; 177 } 178 179 static bool plab_retries_enabled(Thread* thread) { 180 return data(thread)->_plab_retries_enabled; 181 } 182 183 static void enable_plab_promotions(Thread* thread) { 184 data(thread)->_plab_allows_promotion = true; 185 } 186 187 static void disable_plab_promotions(Thread* thread) { 188 data(thread)->_plab_allows_promotion = false; 189 } 190 191 static bool allow_plab_promotions(Thread* thread) { 192 return data(thread)->_plab_allows_promotion; 193 } 194 195 static void reset_plab_promoted(Thread* thread) { 196 data(thread)->_plab_promoted = 0; 197 } 198 199 static void add_to_plab_promoted(Thread* thread, size_t increment) { 200 data(thread)->_plab_promoted += increment; 201 } 202 203 static void subtract_from_plab_promoted(Thread* thread, size_t increment) { 204 assert(data(thread)->_plab_promoted >= increment, "Cannot subtract more than remaining promoted"); 205 data(thread)->_plab_promoted -= increment; 206 } 207 208 static size_t get_plab_promoted(Thread* thread) { 209 return data(thread)->_plab_promoted; 210 } 211 212 static void set_plab_actual_size(Thread* thread, size_t value) { 213 data(thread)->_plab_actual_size = value; 214 } 215 216 static size_t get_plab_actual_size(Thread* thread) { 217 return data(thread)->_plab_actual_size; 218 } 219 220 static void add_paced_time(Thread* thread, double v) { 221 data(thread)->_paced_time += v; 222 } 223 224 static double paced_time(Thread* thread) { 225 return data(thread)->_paced_time; 226 } 227 228 static void reset_paced_time(Thread* thread) { 229 data(thread)->_paced_time = 0; 230 } 231 232 // Evacuation OOM handling 233 static bool is_oom_during_evac(Thread* thread) { 234 return data(thread)->_oom_during_evac; 235 } 236 237 static void set_oom_during_evac(Thread* thread, bool oom) { 238 data(thread)->_oom_during_evac = oom; 239 } 240 241 static uint8_t evac_oom_scope_level(Thread* thread) { 242 return data(thread)->_oom_scope_nesting_level; 243 } 244 245 // Push the scope one level deeper, return previous level 246 static uint8_t push_evac_oom_scope(Thread* thread) { 247 uint8_t level = evac_oom_scope_level(thread); 248 assert(level < 254, "Overflow nesting level"); // UINT8_MAX = 255 249 data(thread)->_oom_scope_nesting_level = level + 1; 250 return level; 251 } 252 253 // Pop the scope by one level, return previous level 254 static uint8_t pop_evac_oom_scope(Thread* thread) { 255 uint8_t level = evac_oom_scope_level(thread); 256 assert(level > 0, "Underflow nesting level"); 257 data(thread)->_oom_scope_nesting_level = level - 1; 258 return level; 259 } 260 261 static bool is_evac_allowed(Thread* thread) { 262 return evac_oom_scope_level(thread) > 0; 263 } 264 265 // Offsets 266 static ByteSize satb_mark_queue_index_offset() { 267 return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index(); 268 } 269 270 static ByteSize satb_mark_queue_buffer_offset() { 271 return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf(); 272 } 273 274 static ByteSize gc_state_offset() { 275 return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state); 276 } 277 }; 278 279 STATIC_ASSERT(sizeof(ShenandoahThreadLocalData) <= sizeof(GCThreadLocalData)); 280 281 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP