1 /*
  2  * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
 28 
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/plab.hpp"
 32 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 33 #include "gc/shenandoah/shenandoahAffiliation.hpp"
 34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 35 #include "gc/shenandoah/shenandoahCardTable.hpp"
 36 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
 37 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
 38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 39 #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
 40 #include "runtime/javaThread.hpp"
 41 #include "utilities/debug.hpp"
 42 #include "utilities/sizes.hpp"
 43 
 44 class ShenandoahThreadLocalData {
 45 private:
 46   // Thread-local mirror for global GC state
 47   char _gc_state;
 48 
 49   // Quickened version of GC state, use single bit to check the group of states
 50   char _gc_state_fast;
 51 
 52   // Evacuation OOM state
 53   uint8_t                 _oom_scope_nesting_level;
 54   bool                    _oom_during_evac;
 55 
 56   SATBMarkQueue           _satb_mark_queue;
 57 
 58   // Current active CardTable's byte_map_base for this thread.
 59   CardTable::CardValue*   _card_table;
 60 
 61   // Thread-local allocation buffer for object evacuations.
 62   // In generational mode, it is exclusive to the young generation.
 63   PLAB* _gclab;
 64   size_t _gclab_size;
 65 
 66   // Thread-local allocation buffer only used in generational mode.
 67   // Used both by mutator threads and by GC worker threads
 68   // for evacuations within the old generation and
 69   // for promotions from the young generation into the old generation.
 70   PLAB* _plab;
 71 
 72   // Heuristics will grow the desired size of plabs.
 73   size_t _plab_desired_size;
 74 
 75   // Once the plab has been allocated, and we know the actual size, we record it here.
 76   size_t _plab_actual_size;
 77 
 78   // As the plab is used for promotions, this value is incremented. When the plab is
 79   // retired, the difference between 'actual_size' and 'promoted' will be returned to
 80   // the old generation's promotion reserve (i.e., it will be 'unexpended').
 81   size_t _plab_promoted;
 82 
 83   // If false, no more promotion by this thread during this evacuation phase.
 84   bool   _plab_allows_promotion;
 85 
 86   // If true, evacuations may attempt to allocate a smaller plab if the original size fails.
 87   bool   _plab_retries_enabled;
 88 
 89   enum FastGCState {
 90     FORWARDED                    = ShenandoahHeap::HAS_FORWARDED,
 91     MARKING                      = ShenandoahHeap::MARKING,
 92     WEAK                         = ShenandoahHeap::WEAK_ROOTS,
 93     FORWARDED_OR_MARKING         = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING,
 94     FORWARDED_OR_WEAK            = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::WEAK_ROOTS,
 95     MARKING_OR_WEAK              = ShenandoahHeap::MARKING       | ShenandoahHeap::WEAK_ROOTS,
 96     FORWARDED_OR_MARKING_OR_WEAK = ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING    | ShenandoahHeap::WEAK_ROOTS,
 97   };
 98 
 99   enum FastGCStateBitPos {
100     FORWARDED_BITPOS                    = 0,
101     MARKING_BITPOS                      = 1,
102     WEAK_BITPOS                         = 2,
103     FORWARDED_OR_MARKING_BITPOS         = 3,
104     FORWARDED_OR_WEAK_BITPOS            = 4,
105     MARKING_OR_WEAK_BITPOS              = 5,
106     FORWARDED_OR_MARKING_OR_WEAK_BITPOS = 6,
107   };
108 
109   ShenandoahEvacuationStats* _evacuation_stats;
110 
111   ShenandoahThreadLocalData();
112   ~ShenandoahThreadLocalData();
113 
114   static ShenandoahThreadLocalData* data(Thread* thread) {
115     assert(UseShenandoahGC, "Sanity");
116     return thread->gc_data<ShenandoahThreadLocalData>();
117   }
118 
119   static ByteSize satb_mark_queue_offset() {
120     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue);
121   }
122 
123 public:
124   static void create(Thread* thread) {
125     new (data(thread)) ShenandoahThreadLocalData();
126   }
127 
128   static void destroy(Thread* thread) {
129     data(thread)->~ShenandoahThreadLocalData();
130   }
131 
132   static SATBMarkQueue& satb_mark_queue(Thread* thread) {
133     return data(thread)->_satb_mark_queue;
134   }
135 
136   static char gc_state_to_fast_bit(char gc_state) {
137     if (gc_state == FORWARDED)                    return FORWARDED_BITPOS;
138     if (gc_state == MARKING)                      return MARKING_BITPOS;
139     if (gc_state == WEAK)                         return WEAK_BITPOS;
140     if (gc_state == FORWARDED_OR_MARKING)         return FORWARDED_OR_MARKING_BITPOS;
141     if (gc_state == FORWARDED_OR_WEAK)            return FORWARDED_OR_WEAK_BITPOS;
142     if (gc_state == MARKING_OR_WEAK)              return MARKING_OR_WEAK_BITPOS;
143     if (gc_state == FORWARDED_OR_MARKING_OR_WEAK) return FORWARDED_OR_MARKING_OR_WEAK_BITPOS;
144     ShouldNotReachHere();
145     return 0;
146   }
147 
148   static char gc_state_to_fast(char gc_state) {
149     return 1 << gc_state_to_fast_bit(gc_state);
150   }
151 
152   static char compute_gc_state_fast(char gc_state) {
153     char fast = 0;
154     if ((gc_state & FORWARDED) > 0)                    fast |= (1 << FORWARDED_BITPOS);
155     if ((gc_state & MARKING) > 0)                      fast |= (1 << MARKING_BITPOS);
156     if ((gc_state & WEAK) > 0)                         fast |= (1 << WEAK_BITPOS);
157     if ((gc_state & FORWARDED_OR_MARKING) > 0)         fast |= (1 << FORWARDED_OR_MARKING_BITPOS);
158     if ((gc_state & FORWARDED_OR_WEAK) > 0)            fast |= (1 << FORWARDED_OR_WEAK_BITPOS);
159     if ((gc_state & MARKING_OR_WEAK) > 0)              fast |= (1 << MARKING_OR_WEAK_BITPOS);
160     if ((gc_state & FORWARDED_OR_MARKING_OR_WEAK) > 0) fast |= (1 << FORWARDED_OR_MARKING_OR_WEAK_BITPOS);
161     return fast;
162   }
163 
164   static void set_gc_state(Thread* thread, char gc_state, char gc_state_fast) {
165     data(thread)->_gc_state = gc_state;
166     data(thread)->_gc_state_fast = gc_state_fast;
167   }
168 
169   static void set_gc_state(Thread* thread, char gc_state) {
170     set_gc_state(thread, gc_state, compute_gc_state_fast(gc_state));
171   }
172 
173   static char gc_state(Thread* thread) {
174     return data(thread)->_gc_state;
175   }
176 
177   static bool is_gc_state(Thread* thread, ShenandoahHeap::GCState state) {
178     return (gc_state(thread) & state) != 0;
179   }
180 
181   static bool is_gc_state(ShenandoahHeap::GCState state) {
182     return is_gc_state(Thread::current(), state);
183   }
184 
185   static void set_card_table(Thread* thread, CardTable::CardValue* ct) {
186     assert(ct != nullptr, "trying to set thread local card_table pointer to nullptr.");
187     data(thread)->_card_table = ct;
188   }
189 
190   static CardTable::CardValue* card_table(Thread* thread) {
191     CardTable::CardValue* ct = data(thread)->_card_table;
192     assert(ct != nullptr, "returning a null thread local card_table pointer.");
193     return ct;
194   }
195 
196   static void initialize_gclab(Thread* thread) {
197     assert(data(thread)->_gclab == nullptr, "Only initialize once");
198     data(thread)->_gclab = new PLAB(PLAB::min_size());
199     data(thread)->_gclab_size = 0;
200 
201     if (ShenandoahHeap::heap()->mode()->is_generational()) {
202       data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words()));
203       data(thread)->_plab_desired_size = 0;
204     }
205   }
206 
207   static PLAB* gclab(Thread* thread) {
208     return data(thread)->_gclab;
209   }
210 
211   static size_t gclab_size(Thread* thread) {
212     return data(thread)->_gclab_size;
213   }
214 
215   static void set_gclab_size(Thread* thread, size_t v) {
216     data(thread)->_gclab_size = v;
217   }
218 
219   static void begin_evacuation(Thread* thread, size_t bytes, ShenandoahAffiliation from, ShenandoahAffiliation to) {
220     data(thread)->_evacuation_stats->begin_evacuation(bytes, from, to);
221   }
222 
223   static void end_evacuation(Thread* thread, size_t bytes, ShenandoahAffiliation from, ShenandoahAffiliation to) {
224     data(thread)->_evacuation_stats->end_evacuation(bytes, from, to);
225   }
226 
227   static ShenandoahEvacuationStats* evacuation_stats(Thread* thread) {
228     return data(thread)->_evacuation_stats;
229   }
230 
231   static PLAB* plab(Thread* thread) {
232     return data(thread)->_plab;
233   }
234 
235   static size_t plab_size(Thread* thread) {
236     return data(thread)->_plab_desired_size;
237   }
238 
239   static void set_plab_size(Thread* thread, size_t v) {
240     data(thread)->_plab_desired_size = v;
241   }
242 
243   static void enable_plab_retries(Thread* thread) {
244     data(thread)->_plab_retries_enabled = true;
245   }
246 
247   static void disable_plab_retries(Thread* thread) {
248     data(thread)->_plab_retries_enabled = false;
249   }
250 
251   static bool plab_retries_enabled(Thread* thread) {
252     return data(thread)->_plab_retries_enabled;
253   }
254 
255   static void enable_plab_promotions(Thread* thread) {
256     data(thread)->_plab_allows_promotion = true;
257   }
258 
259   static void disable_plab_promotions(Thread* thread) {
260     data(thread)->_plab_allows_promotion = false;
261   }
262 
263   static bool allow_plab_promotions(Thread* thread) {
264     return data(thread)->_plab_allows_promotion;
265   }
266 
267   static void reset_plab_promoted(Thread* thread) {
268     data(thread)->_plab_promoted = 0;
269   }
270 
271   static void add_to_plab_promoted(Thread* thread, size_t increment) {
272     data(thread)->_plab_promoted += increment;
273   }
274 
275   static void subtract_from_plab_promoted(Thread* thread, size_t increment) {
276     assert(data(thread)->_plab_promoted >= increment, "Cannot subtract more than remaining promoted");
277     data(thread)->_plab_promoted -= increment;
278   }
279 
280   static size_t get_plab_promoted(Thread* thread) {
281     return data(thread)->_plab_promoted;
282   }
283 
284   static void set_plab_actual_size(Thread* thread, size_t value) {
285     data(thread)->_plab_actual_size = value;
286   }
287 
288   static size_t get_plab_actual_size(Thread* thread) {
289     return data(thread)->_plab_actual_size;
290   }
291 
292   // Evacuation OOM handling
293   static bool is_oom_during_evac(Thread* thread) {
294     return data(thread)->_oom_during_evac;
295   }
296 
297   static void set_oom_during_evac(Thread* thread, bool oom) {
298     data(thread)->_oom_during_evac = oom;
299   }
300 
301   static uint8_t evac_oom_scope_level(Thread* thread) {
302     return data(thread)->_oom_scope_nesting_level;
303   }
304 
305   // Push the scope one level deeper, return previous level
306   static uint8_t push_evac_oom_scope(Thread* thread) {
307     uint8_t level = evac_oom_scope_level(thread);
308     assert(level < 254, "Overflow nesting level"); // UINT8_MAX = 255
309     data(thread)->_oom_scope_nesting_level = level + 1;
310     return level;
311   }
312 
313   // Pop the scope by one level, return previous level
314   static uint8_t pop_evac_oom_scope(Thread* thread) {
315     uint8_t level = evac_oom_scope_level(thread);
316     assert(level > 0, "Underflow nesting level");
317     data(thread)->_oom_scope_nesting_level = level - 1;
318     return level;
319   }
320 
321   static bool is_evac_allowed(Thread* thread) {
322     return evac_oom_scope_level(thread) > 0;
323   }
324 
325   // Offsets
326   static ByteSize satb_mark_queue_index_offset() {
327     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index();
328   }
329 
330   static ByteSize satb_mark_queue_buffer_offset() {
331     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf();
332   }
333 
334   static ByteSize gc_state_offset() {
335     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state);
336   }
337 
338   static ByteSize gc_state_fast_offset() {
339     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state_fast);
340   }
341 
342   static ByteSize card_table_offset() {
343     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _card_table);
344   }
345 };
346 
347 STATIC_ASSERT(sizeof(ShenandoahThreadLocalData) <= sizeof(GCThreadLocalData));
348 
349 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP