1 /*
  2  * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
 28 
 29 #include "gc/shared/plab.hpp"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/gc_globals.hpp"
 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 33 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
 34 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 35 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
 36 #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
 37 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 38 #include "runtime/javaThread.hpp"
 39 #include "utilities/debug.hpp"
 40 #include "utilities/sizes.hpp"
 41 
 42 class ShenandoahThreadLocalData {
 43 private:
 44   char _gc_state;
 45   // Evacuation OOM state
 46   uint8_t                 _oom_scope_nesting_level;
 47   bool                    _oom_during_evac;
 48 
 49   SATBMarkQueue           _satb_mark_queue;
 50 
 51   // Thread-local allocation buffer for object evacuations.
 52   // In generational mode, it is exclusive to the young generation.
 53   PLAB* _gclab;
 54   size_t _gclab_size;
 55 
 56   double _paced_time;
 57 
 58   // Thread-local allocation buffer only used in generational mode.
 59   // Used both by mutator threads and by GC worker threads
 60   // for evacuations within the old generation and
 61   // for promotions from the young generation into the old generation.
 62   PLAB* _plab;
 63   size_t _plab_size;
 64 
 65   size_t _plab_evacuated;
 66   size_t _plab_promoted;
 67   size_t _plab_preallocated_promoted;
 68   bool   _plab_allows_promotion; // If false, no more promotion by this thread during this evacuation phase.
 69   bool   _plab_retries_enabled;
 70 
 71   ShenandoahEvacuationStats* _evacuation_stats;
 72 
 73   ShenandoahThreadLocalData();
 74   ~ShenandoahThreadLocalData();
 75 
 76   static ShenandoahThreadLocalData* data(Thread* thread) {
 77     assert(UseShenandoahGC, "Sanity");
 78     return thread->gc_data<ShenandoahThreadLocalData>();
 79   }
 80 
 81   static ByteSize satb_mark_queue_offset() {
 82     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue);
 83   }
 84 
 85 public:
 86   static void create(Thread* thread) {
 87     new (data(thread)) ShenandoahThreadLocalData();
 88   }
 89 
 90   static void destroy(Thread* thread) {
 91     data(thread)->~ShenandoahThreadLocalData();
 92   }
 93 
 94   static SATBMarkQueue& satb_mark_queue(Thread* thread) {
 95     return data(thread)->_satb_mark_queue;
 96   }
 97 
 98   static void set_gc_state(Thread* thread, char gc_state) {
 99     data(thread)->_gc_state = gc_state;
100   }
101 
102   static char gc_state(Thread* thread) {
103     assert(thread->is_Java_thread(), "GC state is only synchronized to java threads");
104     return data(thread)->_gc_state;
105   }
106 
107   static void initialize_gclab(Thread* thread) {
108     assert (thread->is_Java_thread() || thread->is_Worker_thread(), "Only Java and GC worker threads are allowed to get GCLABs");
109     assert(data(thread)->_gclab == nullptr, "Only initialize once");
110     data(thread)->_gclab = new PLAB(PLAB::min_size());
111     data(thread)->_gclab_size = 0;
112 
113     // TODO:
114     //   Only initialize _plab if (!Universe::is_fully_initialized() || ShenandoahHeap::heap()->mode()->is_generational())
115     //   Otherwise, set _plab to nullptr
116     // Problem is there is code sprinkled throughout that asserts (plab != nullptr) that need to be fixed up.  Perhaps
117     // those assertions are overzealous.
118 
119     // In theory, plabs are only need if heap->mode()->is_generational().  However, some threads
120     // instantiated before we are able to answer that question.
121     data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words()));
122     data(thread)->_plab_size = 0;
123   }
124 
125   static PLAB* gclab(Thread* thread) {
126     return data(thread)->_gclab;
127   }
128 
129   static size_t gclab_size(Thread* thread) {
130     return data(thread)->_gclab_size;
131   }
132 
133   static void set_gclab_size(Thread* thread, size_t v) {
134     data(thread)->_gclab_size = v;
135   }
136 
137   static void begin_evacuation(Thread* thread, size_t bytes) {
138     data(thread)->_evacuation_stats->begin_evacuation(bytes);
139   }
140 
141   static void end_evacuation(Thread* thread, size_t bytes) {
142     data(thread)->_evacuation_stats->end_evacuation(bytes);
143   }
144 
145   static void record_age(Thread* thread, size_t bytes, uint age) {
146     data(thread)->_evacuation_stats->record_age(bytes, age);
147   }
148 
149   static ShenandoahEvacuationStats* evacuation_stats(Thread* thread) {
150     return data(thread)->_evacuation_stats;
151   }
152 
153   static PLAB* plab(Thread* thread) {
154     return data(thread)->_plab;
155   }
156 
157   static size_t plab_size(Thread* thread) {
158     return data(thread)->_plab_size;
159   }
160 
161   static void set_plab_size(Thread* thread, size_t v) {
162     data(thread)->_plab_size = v;
163   }
164 
165   static void enable_plab_retries(Thread* thread) {
166     data(thread)->_plab_retries_enabled = true;
167   }
168 
169   static void disable_plab_retries(Thread* thread) {
170     data(thread)->_plab_retries_enabled = false;
171   }
172 
173   static bool plab_retries_enabled(Thread* thread) {
174     return data(thread)->_plab_retries_enabled;
175   }
176 
177   static void enable_plab_promotions(Thread* thread) {
178     data(thread)->_plab_allows_promotion = true;
179   }
180 
181   static void disable_plab_promotions(Thread* thread) {
182     data(thread)->_plab_allows_promotion = false;
183   }
184 
185   static bool allow_plab_promotions(Thread* thread) {
186     return data(thread)->_plab_allows_promotion;
187   }
188 
189   static void reset_plab_evacuated(Thread* thread) {
190     data(thread)->_plab_evacuated = 0;
191   }
192 
193   static void add_to_plab_evacuated(Thread* thread, size_t increment) {
194     data(thread)->_plab_evacuated += increment;
195   }
196 
197   static void subtract_from_plab_evacuated(Thread* thread, size_t increment) {
198     // TODO: Assert underflow
199     data(thread)->_plab_evacuated -= increment;
200   }
201 
202   static size_t get_plab_evacuated(Thread* thread) {
203     return data(thread)->_plab_evacuated;
204   }
205 
206   static void reset_plab_promoted(Thread* thread) {
207     data(thread)->_plab_promoted = 0;
208   }
209 
210   static void add_to_plab_promoted(Thread* thread, size_t increment) {
211     data(thread)->_plab_promoted += increment;
212   }
213 
214   static void subtract_from_plab_promoted(Thread* thread, size_t increment) {
215     // TODO: Assert underflow
216     data(thread)->_plab_promoted -= increment;
217   }
218 
219   static size_t get_plab_promoted(Thread* thread) {
220     return data(thread)->_plab_promoted;
221   }
222 
223   static void set_plab_preallocated_promoted(Thread* thread, size_t value) {
224     data(thread)->_plab_preallocated_promoted = value;
225   }
226 
227   static size_t get_plab_preallocated_promoted(Thread* thread) {
228     return data(thread)->_plab_preallocated_promoted;
229   }
230 
231   static void add_paced_time(Thread* thread, double v) {
232     data(thread)->_paced_time += v;
233   }
234 
235   static double paced_time(Thread* thread) {
236     return data(thread)->_paced_time;
237   }
238 
239   static void reset_paced_time(Thread* thread) {
240     data(thread)->_paced_time = 0;
241   }
242 
243   // Evacuation OOM handling
244   static bool is_oom_during_evac(Thread* thread) {
245     return data(thread)->_oom_during_evac;
246   }
247 
248   static void set_oom_during_evac(Thread* thread, bool oom) {
249     data(thread)->_oom_during_evac = oom;
250   }
251 
252   static uint8_t evac_oom_scope_level(Thread* thread) {
253     return data(thread)->_oom_scope_nesting_level;
254   }
255 
256   // Push the scope one level deeper, return previous level
257   static uint8_t push_evac_oom_scope(Thread* thread) {
258     uint8_t level = evac_oom_scope_level(thread);
259     assert(level < 254, "Overflow nesting level"); // UINT8_MAX = 255
260     data(thread)->_oom_scope_nesting_level = level + 1;
261     return level;
262   }
263 
264   // Pop the scope by one level, return previous level
265   static uint8_t pop_evac_oom_scope(Thread* thread) {
266     uint8_t level = evac_oom_scope_level(thread);
267     assert(level > 0, "Underflow nesting level");
268     data(thread)->_oom_scope_nesting_level = level - 1;
269     return level;
270   }
271 
272   static bool is_evac_allowed(Thread* thread) {
273     return evac_oom_scope_level(thread) > 0;
274   }
275 
276   // Offsets
277   static ByteSize satb_mark_queue_active_offset() {
278     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active();
279   }
280 
281   static ByteSize satb_mark_queue_index_offset() {
282     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index();
283   }
284 
285   static ByteSize satb_mark_queue_buffer_offset() {
286     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf();
287   }
288 
289   static ByteSize gc_state_offset() {
290     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state);
291   }
292 };
293 
294 STATIC_ASSERT(sizeof(ShenandoahThreadLocalData) <= sizeof(GCThreadLocalData));
295 
296 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP