1 /*
  2  * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  5  *
  6  * This code is free software; you can redistribute it and/or modify it
  7  * under the terms of the GNU General Public License version 2 only, as
  8  * published by the Free Software Foundation.
  9  *
 10  * This code is distributed in the hope that it will be useful, but WITHOUT
 11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 13  * version 2 for more details (a copy is included in the LICENSE file that
 14  * accompanied this code).
 15  *
 16  * You should have received a copy of the GNU General Public License version
 17  * 2 along with this work; if not, write to the Free Software Foundation,
 18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 19  *
 20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 21  * or visit www.oracle.com if you need additional information or have any
 22  * questions.
 23  *
 24  */
 25 
 26 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
 27 #define SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP
 28 
 29 #include "gc/shared/gc_globals.hpp"
 30 #include "gc/shared/gcThreadLocalData.hpp"
 31 #include "gc/shared/plab.hpp"
 32 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 33 #include "gc/shenandoah/shenandoahAffiliation.hpp"
 34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
 35 #include "gc/shenandoah/shenandoahCardTable.hpp"
 36 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
 37 #include "gc/shenandoah/shenandoahEvacTracker.hpp"
 38 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 39 #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
 40 #include "runtime/javaThread.hpp"
 41 #include "utilities/debug.hpp"
 42 #include "utilities/sizes.hpp"
 43 
 44 class ShenandoahThreadLocalData {
 45 private:
 46   char _gc_state;
 47   // Evacuation OOM state
 48   uint8_t                 _oom_scope_nesting_level;
 49   bool                    _oom_during_evac;
 50 
 51   SATBMarkQueue           _satb_mark_queue;
 52 
 53   // Current active CardTable's byte_map_base for this thread.
 54   CardTable::CardValue*   _card_table;
 55 
 56   // Thread-local allocation buffer for object evacuations.
 57   // In generational mode, it is exclusive to the young generation.
 58   PLAB* _gclab;
 59   size_t _gclab_size;
 60 
 61   // Thread-local allocation buffer only used in generational mode.
 62   // Used both by mutator threads and by GC worker threads
 63   // for evacuations within the old generation and
 64   // for promotions from the young generation into the old generation.
 65   PLAB* _plab;
 66 
 67   // Heuristics will grow the desired size of plabs.
 68   size_t _plab_desired_size;
 69 
 70   // Once the plab has been allocated, and we know the actual size, we record it here.
 71   size_t _plab_actual_size;
 72 
 73   // As the plab is used for promotions, this value is incremented. When the plab is
 74   // retired, the difference between 'actual_size' and 'promoted' will be returned to
 75   // the old generation's promotion reserve (i.e., it will be 'unexpended').
 76   size_t _plab_promoted;
 77 
 78   // If false, no more promotion by this thread during this evacuation phase.
 79   bool   _plab_allows_promotion;
 80 
 81   // If true, evacuations may attempt to allocate a smaller plab if the original size fails.
 82   bool   _plab_retries_enabled;
 83 
 84   ShenandoahEvacuationStats* _evacuation_stats;
 85 
 86   ShenandoahThreadLocalData();
 87   ~ShenandoahThreadLocalData();
 88 
 89   static ShenandoahThreadLocalData* data(Thread* thread) {
 90     assert(UseShenandoahGC, "Sanity");
 91     return thread->gc_data<ShenandoahThreadLocalData>();
 92   }
 93 
 94   static ByteSize satb_mark_queue_offset() {
 95     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _satb_mark_queue);
 96   }
 97 
 98 public:
 99   static void create(Thread* thread) {
100     new (data(thread)) ShenandoahThreadLocalData();
101   }
102 
103   static void destroy(Thread* thread) {
104     data(thread)->~ShenandoahThreadLocalData();
105   }
106 
107   static SATBMarkQueue& satb_mark_queue(Thread* thread) {
108     return data(thread)->_satb_mark_queue;
109   }
110 
111   static void set_gc_state(Thread* thread, char gc_state) {
112     data(thread)->_gc_state = gc_state;
113   }
114 
115   static char gc_state(Thread* thread) {
116     return data(thread)->_gc_state;
117   }
118 
119   static bool is_gc_state(Thread* thread, ShenandoahHeap::GCState state) {
120     return (gc_state(thread) & state) != 0;
121   }
122 
123   static bool is_gc_state(ShenandoahHeap::GCState state) {
124     return is_gc_state(Thread::current(), state);
125   }
126 
127   static void set_card_table(Thread* thread, CardTable::CardValue* ct) {
128     assert(ct != nullptr, "trying to set thread local card_table pointer to nullptr.");
129     data(thread)->_card_table = ct;
130   }
131 
132   static CardTable::CardValue* card_table(Thread* thread) {
133     CardTable::CardValue* ct = data(thread)->_card_table;
134     assert(ct != nullptr, "returning a null thread local card_table pointer.");
135     return ct;
136   }
137 
138   static void initialize_gclab(Thread* thread) {
139     assert(data(thread)->_gclab == nullptr, "Only initialize once");
140     data(thread)->_gclab = new PLAB(PLAB::min_size());
141     data(thread)->_gclab_size = 0;
142 
143     if (ShenandoahHeap::heap()->mode()->is_generational()) {
144       data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words()));
145       data(thread)->_plab_desired_size = 0;
146     }
147   }
148 
149   static PLAB* gclab(Thread* thread) {
150     return data(thread)->_gclab;
151   }
152 
153   static size_t gclab_size(Thread* thread) {
154     return data(thread)->_gclab_size;
155   }
156 
157   static void set_gclab_size(Thread* thread, size_t v) {
158     data(thread)->_gclab_size = v;
159   }
160 
161   static void begin_evacuation(Thread* thread, size_t bytes, ShenandoahAffiliation from, ShenandoahAffiliation to) {
162     data(thread)->_evacuation_stats->begin_evacuation(bytes, from, to);
163   }
164 
165   static void end_evacuation(Thread* thread, size_t bytes, ShenandoahAffiliation from, ShenandoahAffiliation to) {
166     data(thread)->_evacuation_stats->end_evacuation(bytes, from, to);
167   }
168 
169   static ShenandoahEvacuationStats* evacuation_stats(Thread* thread) {
170     return data(thread)->_evacuation_stats;
171   }
172 
173   static PLAB* plab(Thread* thread) {
174     return data(thread)->_plab;
175   }
176 
177   static size_t plab_size(Thread* thread) {
178     return data(thread)->_plab_desired_size;
179   }
180 
181   static void set_plab_size(Thread* thread, size_t v) {
182     data(thread)->_plab_desired_size = v;
183   }
184 
185   static void enable_plab_retries(Thread* thread) {
186     data(thread)->_plab_retries_enabled = true;
187   }
188 
189   static void disable_plab_retries(Thread* thread) {
190     data(thread)->_plab_retries_enabled = false;
191   }
192 
193   static bool plab_retries_enabled(Thread* thread) {
194     return data(thread)->_plab_retries_enabled;
195   }
196 
197   static void enable_plab_promotions(Thread* thread) {
198     data(thread)->_plab_allows_promotion = true;
199   }
200 
201   static void disable_plab_promotions(Thread* thread) {
202     data(thread)->_plab_allows_promotion = false;
203   }
204 
205   static bool allow_plab_promotions(Thread* thread) {
206     return data(thread)->_plab_allows_promotion;
207   }
208 
209   static void reset_plab_promoted(Thread* thread) {
210     data(thread)->_plab_promoted = 0;
211   }
212 
213   static void add_to_plab_promoted(Thread* thread, size_t increment) {
214     data(thread)->_plab_promoted += increment;
215   }
216 
217   static void subtract_from_plab_promoted(Thread* thread, size_t increment) {
218     assert(data(thread)->_plab_promoted >= increment, "Cannot subtract more than remaining promoted");
219     data(thread)->_plab_promoted -= increment;
220   }
221 
222   static size_t get_plab_promoted(Thread* thread) {
223     return data(thread)->_plab_promoted;
224   }
225 
226   static void set_plab_actual_size(Thread* thread, size_t value) {
227     data(thread)->_plab_actual_size = value;
228   }
229 
230   static size_t get_plab_actual_size(Thread* thread) {
231     return data(thread)->_plab_actual_size;
232   }
233 
234   // Evacuation OOM handling
235   static bool is_oom_during_evac(Thread* thread) {
236     return data(thread)->_oom_during_evac;
237   }
238 
239   static void set_oom_during_evac(Thread* thread, bool oom) {
240     data(thread)->_oom_during_evac = oom;
241   }
242 
243   static uint8_t evac_oom_scope_level(Thread* thread) {
244     return data(thread)->_oom_scope_nesting_level;
245   }
246 
247   // Push the scope one level deeper, return previous level
248   static uint8_t push_evac_oom_scope(Thread* thread) {
249     uint8_t level = evac_oom_scope_level(thread);
250     assert(level < 254, "Overflow nesting level"); // UINT8_MAX = 255
251     data(thread)->_oom_scope_nesting_level = level + 1;
252     return level;
253   }
254 
255   // Pop the scope by one level, return previous level
256   static uint8_t pop_evac_oom_scope(Thread* thread) {
257     uint8_t level = evac_oom_scope_level(thread);
258     assert(level > 0, "Underflow nesting level");
259     data(thread)->_oom_scope_nesting_level = level - 1;
260     return level;
261   }
262 
263   static bool is_evac_allowed(Thread* thread) {
264     return evac_oom_scope_level(thread) > 0;
265   }
266 
267   // Offsets
268   static ByteSize satb_mark_queue_index_offset() {
269     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index();
270   }
271 
272   static ByteSize satb_mark_queue_buffer_offset() {
273     return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_buf();
274   }
275 
276   static ByteSize gc_state_offset() {
277     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state);
278   }
279 
280   static ByteSize card_table_offset() {
281     return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _card_table);
282   }
283 };
284 
285 STATIC_ASSERT(sizeof(ShenandoahThreadLocalData) <= sizeof(GCThreadLocalData));
286 
287 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP