1 /*
2 * Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1OopClosures.hpp"
30 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
31 #include "gc/shared/ageTable.hpp"
32 #include "gc/shared/copyFailedInfo.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "gc/shared/partialArraySplitter.hpp"
35 #include "gc/shared/partialArrayState.hpp"
36 #include "gc/shared/stringdedup/stringDedup.hpp"
37 #include "gc/shared/taskqueue.hpp"
38 #include "memory/allocation.hpp"
39 #include "oops/oop.hpp"
40 #include "utilities/ticks.hpp"
41
42 class G1CardTable;
43 class G1CollectionSet;
44 class G1EvacFailureRegions;
45 class G1EvacuationRootClosures;
46 class G1OopStarChunkedList;
47 class G1PLABAllocator;
48 class G1HeapRegion;
49 class outputStream;
50
51 class G1ParScanThreadState : public CHeapObj<mtGC> {
52 G1CollectedHeap* _g1h;
53 G1ScannerTasksQueue* _task_queue;
54 G1CardTable* _ct;
55 G1EvacuationRootClosures* _closures;
56
57 G1PLABAllocator* _plab_allocator;
58
59 AgeTable _age_table;
60 // Local tenuring threshold.
61 uint _tenuring_threshold;
62 G1ScanEvacuatedObjClosure _scanner;
63
64 uint _worker_id;
65
66 size_t _num_cards_marked_dirty;
67 size_t _num_cards_marked_to_cset;
68
69 // Upper and lower threshold to start and end work queue draining.
70 uint const _stack_trim_upper_threshold;
71 uint const _stack_trim_lower_threshold;
72
73 Tickspan _trim_ticks;
74 // Map from young-age-index (0 == not young, 1 is youngest) to
75 // surviving words. base is what we get back from the malloc call
76 size_t* _surviving_young_words_base;
77 // this points into the array, as we use the first few entries for padding
78 size_t* _surviving_young_words;
79 // Number of elements in the array above.
80 size_t _surviving_words_length;
81 // Indicates whether in the last generation (old) there is no more space
82 // available for allocation.
83 bool _old_gen_is_full;
84 PartialArraySplitter _partial_array_splitter;
85 StringDedup::Requests _string_dedup_requests;
86
87 G1CardTable* ct() { return _ct; }
88
89 // Maximum number of optional regions at start of gc.
90 size_t _max_num_optional_regions;
91 G1OopStarChunkedList* _oops_into_optional_regions;
92
93 G1NUMA* _numa;
94 // Records how many object allocations happened at each node during copy to survivor.
95 // Only starts recording when log of gc+heap+numa is enabled and its data is
96 // transferred when flushed.
97 size_t* _obj_alloc_stat;
98
99 // Per-thread evacuation failure data structures.
100 ALLOCATION_FAILURE_INJECTOR_ONLY(size_t _allocation_failure_inject_counter;)
101
102 EvacuationFailedInfo _evacuation_failed_info;
103 G1EvacFailureRegions* _evac_failure_regions;
104 // Number of additional cards into evacuation failed regions.
105 size_t _num_cards_from_evac_failure;
106
107 // Mark the card if not already in the set; this is a best-effort attempt on
108 // detecting duplicates.
109 template <class T> bool mark_if_new(T* p, bool into_survivor);
110 // Mark the card of p into the (evacuation failed) region.
111 template <class T> void mark_card_into_evac_fail_region(T* p, oop obj);
112
113 bool inject_allocation_failure(uint region_idx) ALLOCATION_FAILURE_INJECTOR_RETURN_( return false; );
114
115 public:
116 G1ParScanThreadState(G1CollectedHeap* g1h,
117 uint worker_id,
118 uint num_workers,
119 G1CollectionSet* collection_set,
120 G1EvacFailureRegions* evac_failure_regions);
121 virtual ~G1ParScanThreadState();
122
123 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
124
125 #ifdef ASSERT
126 bool queue_is_empty() const { return _task_queue->is_empty(); }
127 #endif
128
129 void verify_task(narrowOop* task) const NOT_DEBUG_RETURN;
130 void verify_task(oop* task) const NOT_DEBUG_RETURN;
131 void verify_task(PartialArrayState* task) const NOT_DEBUG_RETURN;
132 void verify_task(ScannerTask task) const NOT_DEBUG_RETURN;
133
134 void push_on_queue(ScannerTask task);
135
136 // Apply the post barrier to the given reference field. Marks the card of p
137 // if the barrier does not filter out the reference for some reason (e.g.
138 // p and q are in the same region, p is in survivor, p is in collection set)
139 // To be called during GC if nothing particular about p and obj are known.
140 template <class T> void write_ref_field_post(T* p, oop obj);
141
142 // Mark the card if the reference's target region's remembered set is tracked.
143 // Assumes that a significant amount of pre-filtering (like done by
144 // write_ref_field_post() above) has already been performed.
145 template <class T> void mark_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o);
146
147 G1EvacuationRootClosures* closures() { return _closures; }
148 uint worker_id() { return _worker_id; }
149
150 size_t lab_waste_words() const;
151 size_t lab_undo_waste_words() const;
152
153 // Newly marked cards during this garbage collection, to be refined concurrently
154 // later. Contains both marks generated by new cross-region references as well
155 // as cards generated from regions into evacuation failed regions.
156 // Does not contain cards into the next collection set (e.g. survivors) - they will not
157 // be refined concurrently. Calculation is done on a best-effort basis.
158 size_t num_cards_pending() const;
159 // Number of cards newly generated by references into evacuation failed regions.
160 // Calculation is done on a best-effort basis.
161 size_t num_cards_from_evac_failure() const;
162 // Sum of cards marked by evacuation. Contains both pending cards as well as cards
163 // into the next collection set (e.g. survivors).
164 size_t num_cards_marked() const;
165
166 // Pass locally gathered statistics to global state. Returns the total number of
167 // HeapWords copied.
168 size_t flush_stats(size_t* surviving_young_words, uint num_workers);
169
170 #if TASKQUEUE_STATS
171 PartialArrayTaskStats* partial_array_task_stats();
172 #endif // TASKQUEUE_STATS
173
174 private:
175 void do_partial_array(PartialArrayState* state, bool stolen);
176 void start_partial_objarray(oop from, oop to);
177 void process_array_chunk(objArrayOop obj, size_t start, size_t end);
178
179 HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
180 Klass* klass,
181 size_t word_sz,
182 uint age,
183 uint node_index);
184
185 void undo_allocation(G1HeapRegionAttr dest_addr,
186 HeapWord* obj_ptr,
187 size_t word_sz,
188 uint node_index);
189
190 void update_bot_after_copying(oop obj, size_t word_sz);
191
192 void do_iterate_object(oop const obj,
193 oop const old,
194 Klass* const klass,
195 G1HeapRegionAttr const region_attr,
196 G1HeapRegionAttr const dest_attr,
197 uint age);
198 oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
199 oop obj,
200 markWord old_mark);
201
202 // This method is applied to the fields of the objects that have just been copied.
203 template <class T> void do_oop_evac(T* p);
204
205 void dispatch_task(ScannerTask task, bool stolen);
206
207 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
208 // allocate into dest. Previous_plab_refill_failed indicates whether previous
209 // PLAB refill for the original (source) object failed.
210 // Returns a non-null pointer if successful, and updates dest if required.
211 // Also determines whether we should continue to try to allocate into the various
212 // generations or just end trying to allocate.
213 HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
214 size_t word_sz,
215 bool previous_plab_refill_failed,
216 uint node_index);
217
218 inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
219
220 void report_promotion_event(G1HeapRegionAttr const dest_attr,
221 Klass* klass, size_t word_sz, uint age,
222 HeapWord * const obj_ptr, uint node_index) const;
223
224 void trim_queue_to_threshold(uint threshold);
225
226 inline bool needs_partial_trimming() const;
227
228 // NUMA statistics related methods.
229 void initialize_numa_stats();
230 void flush_numa_stats();
231 inline void update_numa_stats(uint node_index);
232
233 public:
234 oop copy_to_survivor_space(G1HeapRegionAttr region_attr, oop obj, markWord old_mark);
235
236 inline void trim_queue();
237 inline void trim_queue_partially();
238 void steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues);
239
240 Tickspan trim_ticks() const;
241 void reset_trim_ticks();
242
243 void record_evacuation_failed_region(G1HeapRegion* r, uint worker_id, bool cause_pinned);
244 // An attempt to evacuate "obj" has failed; take necessary steps.
245 oop handle_evacuation_failure_par(oop obj, markWord m, Klass* klass, G1HeapRegionAttr attr, size_t word_sz, bool cause_pinned);
246
247 template <typename T>
248 inline void remember_root_into_optional_region(T* p);
249 template <typename T>
250 inline void remember_reference_into_optional_region(T* p);
251
252 inline G1OopStarChunkedList* oops_into_optional_region(const G1HeapRegion* hr);
253 };
254
255 class G1ParScanThreadStateSet : public StackObj {
256 G1CollectedHeap* _g1h;
257 G1CollectionSet* _collection_set;
258 G1ParScanThreadState** _states;
259 size_t* _surviving_young_words_total;
260 uint _num_workers;
261 bool _flushed;
262 G1EvacFailureRegions* _evac_failure_regions;
263
264 public:
265 G1ParScanThreadStateSet(G1CollectedHeap* g1h,
266 uint num_workers,
267 G1CollectionSet* collection_set,
268 G1EvacFailureRegions* evac_failure_regions);
269 ~G1ParScanThreadStateSet();
270
271 void flush_stats();
272 void record_unused_optional_region(G1HeapRegion* hr);
273 #if TASKQUEUE_STATS
274 void print_partial_array_task_stats();
275 #endif // TASKQUEUE_STATS
276
277 G1ParScanThreadState* state_for_worker(uint worker_id);
278 uint num_workers() const { return _num_workers; }
279
280 const size_t* surviving_young_words() const;
281 };
282
283 #endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP