1 /*
  2  * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
 26 #define SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
 27 
 28 #include "gc/g1/g1CollectedHeap.hpp"
 29 #include "gc/g1/g1OopClosures.hpp"
 30 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
 31 #include "gc/shared/ageTable.hpp"
 32 #include "gc/shared/copyFailedInfo.hpp"
 33 #include "gc/shared/gc_globals.hpp"
 34 #include "gc/shared/partialArraySplitter.hpp"
 35 #include "gc/shared/partialArrayState.hpp"
 36 #include "gc/shared/stringdedup/stringDedup.hpp"
 37 #include "gc/shared/taskqueue.hpp"
 38 #include "memory/allocation.hpp"
 39 #include "oops/oop.hpp"
 40 #include "utilities/ticks.hpp"
 41 
 42 class G1CardTable;
 43 class G1CollectionSet;
 44 class G1EvacFailureRegions;
 45 class G1EvacuationRootClosures;
 46 class G1OopStarChunkedList;
 47 class G1PLABAllocator;
 48 class G1HeapRegion;
 49 class outputStream;
 50 
 51 class G1ParScanThreadState : public CHeapObj<mtGC> {
 52   G1CollectedHeap* _g1h;
 53   G1ScannerTasksQueue* _task_queue;
 54   G1CardTable* _ct;
 55   G1EvacuationRootClosures* _closures;
 56 
 57   G1PLABAllocator* _plab_allocator;
 58 
 59   AgeTable _age_table;
 60   // Local tenuring threshold.
 61   uint _tenuring_threshold;
 62   G1ScanEvacuatedObjClosure _scanner;
 63 
 64   uint _worker_id;
 65 
 66   size_t _num_cards_marked_dirty;
 67   size_t _num_cards_marked_to_cset;
 68 
 69   // Upper and lower threshold to start and end work queue draining.
 70   uint const _stack_trim_upper_threshold;
 71   uint const _stack_trim_lower_threshold;
 72 
 73   Tickspan _trim_ticks;
 74   // Map from young-age-index (0 == not young, 1 is youngest) to
 75   // surviving words. base is what we get back from the malloc call
 76   size_t* _surviving_young_words_base;
 77   // this points into the array, as we use the first few entries for padding
 78   size_t* _surviving_young_words;
 79   // Number of elements in the array above.
 80   size_t _surviving_words_length;
 81   // Indicates whether in the last generation (old) there is no more space
 82   // available for allocation.
 83   bool _old_gen_is_full;
 84   PartialArraySplitter _partial_array_splitter;
 85   StringDedup::Requests _string_dedup_requests;
 86 
 87   G1CardTable* ct() { return _ct; }
 88 
 89   // Maximum number of optional regions at start of gc.
 90   size_t _max_num_optional_regions;
 91   G1OopStarChunkedList* _oops_into_optional_regions;
 92 
 93   G1NUMA* _numa;
 94   // Records how many object allocations happened at each node during copy to survivor.
 95   // Only starts recording when log of gc+heap+numa is enabled and its data is
 96   // transferred when flushed.
 97   size_t* _obj_alloc_stat;
 98 
 99   // Per-thread evacuation failure data structures.
100   ALLOCATION_FAILURE_INJECTOR_ONLY(size_t _allocation_failure_inject_counter;)
101 
102   EvacuationFailedInfo _evacuation_failed_info;
103   G1EvacFailureRegions* _evac_failure_regions;
104   // Number of additional cards into evacuation failed regions.
105   size_t _num_cards_from_evac_failure;
106 
107   // Mark the card if not already in the set; this is a best-effort attempt on
108   // detecting duplicates.
109   template <class T> bool mark_if_new(T* p, bool into_survivor);
110   // Mark the card of p into the (evacuation failed) region.
111   template <class T> void mark_card_into_evac_fail_region(T* p, oop obj);
112 
113   bool inject_allocation_failure(uint region_idx) ALLOCATION_FAILURE_INJECTOR_RETURN_( return false; );
114 
115 public:
116   G1ParScanThreadState(G1CollectedHeap* g1h,
117                        uint worker_id,
118                        uint num_workers,
119                        G1CollectionSet* collection_set,
120                        G1EvacFailureRegions* evac_failure_regions);
121   virtual ~G1ParScanThreadState();
122 
123   void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
124 
125 #ifdef ASSERT
126   bool queue_is_empty() const { return _task_queue->is_empty(); }
127 #endif
128 
129   void verify_task(narrowOop* task) const NOT_DEBUG_RETURN;
130   void verify_task(oop* task) const NOT_DEBUG_RETURN;
131   void verify_task(PartialArrayState* task) const NOT_DEBUG_RETURN;
132   void verify_task(ScannerTask task) const NOT_DEBUG_RETURN;
133 
134   void push_on_queue(ScannerTask task);
135 
136   // Apply the post barrier to the given reference field. Marks the card of p
137   // if the barrier does not filter out the reference for some reason (e.g.
138   // p and q are in the same region, p is in survivor, p is in collection set)
139   // To be called during GC if nothing particular about p and obj are known.
140   template <class T> void write_ref_field_post(T* p, oop obj);
141 
142   // Mark the card if the reference's target region's remembered set is tracked.
143   // Assumes that a significant amount of pre-filtering (like done by
144   // write_ref_field_post() above) has already been performed.
145   template <class T> void mark_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o);
146 
147   G1EvacuationRootClosures* closures() { return _closures; }
148   uint worker_id() { return _worker_id; }
149 
150   size_t lab_waste_words() const;
151   size_t lab_undo_waste_words() const;
152 
153   // Newly marked cards during this garbage collection, to be refined concurrently
154   // later. Contains both marks generated by new cross-region references as well
155   // as cards generated from regions into evacuation failed regions.
156   // Does not contain cards into the next collection set (e.g. survivors) - they will not
157   // be refined concurrently. Calculation is done on a best-effort basis.
158   size_t num_cards_pending() const;
159   // Number of cards newly generated by references into evacuation failed regions.
160   // Calculation is done on a best-effort basis.
161   size_t num_cards_from_evac_failure() const;
162   // Sum of cards marked by evacuation. Contains both pending cards as well as cards
163   // into the next collection set (e.g. survivors).
164   size_t num_cards_marked() const;
165 
166   // Pass locally gathered statistics to global state. Returns the total number of
167   // HeapWords copied.
168   size_t flush_stats(size_t* surviving_young_words, uint num_workers);
169 
170 #if TASKQUEUE_STATS
171   PartialArrayTaskStats* partial_array_task_stats();
172 #endif // TASKQUEUE_STATS
173 
174 private:
175   void do_partial_array(PartialArrayState* state, bool stolen);
176   void start_partial_objarray(oop from, oop to);
177 
178   HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
179                                Klass* klass,
180                                size_t word_sz,
181                                uint age,
182                                uint node_index);
183 
184   void undo_allocation(G1HeapRegionAttr dest_addr,
185                        HeapWord* obj_ptr,
186                        size_t word_sz,
187                        uint node_index);
188 
189   void update_bot_after_copying(oop obj, size_t word_sz);
190 
191   void do_iterate_object(oop const obj,
192                          oop const old,
193                          Klass* const klass,
194                          G1HeapRegionAttr const region_attr,
195                          G1HeapRegionAttr const dest_attr,
196                          uint age);
197   oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
198                                 oop obj,
199                                 markWord old_mark);
200 
201   // This method is applied to the fields of the objects that have just been copied.
202   template <class T> void do_oop_evac(T* p);
203 
204   void dispatch_task(ScannerTask task, bool stolen);
205 
206   // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
207   // allocate into dest. Previous_plab_refill_failed indicates whether previous
208   // PLAB refill for the original (source) object failed.
209   // Returns a non-null pointer if successful, and updates dest if required.
210   // Also determines whether we should continue to try to allocate into the various
211   // generations or just end trying to allocate.
212   HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
213                                   size_t word_sz,
214                                   bool previous_plab_refill_failed,
215                                   uint node_index);
216 
217   inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
218 
219   void report_promotion_event(G1HeapRegionAttr const dest_attr,
220                               Klass* klass, size_t word_sz, uint age,
221                               HeapWord * const obj_ptr, uint node_index) const;
222 
223   void trim_queue_to_threshold(uint threshold);
224 
225   inline bool needs_partial_trimming() const;
226 
227   // NUMA statistics related methods.
228   void initialize_numa_stats();
229   void flush_numa_stats();
230   inline void update_numa_stats(uint node_index);
231 
232 public:
233   oop copy_to_survivor_space(G1HeapRegionAttr region_attr, oop obj, markWord old_mark);
234 
235   inline void trim_queue();
236   inline void trim_queue_partially();
237   void steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues);
238 
239   Tickspan trim_ticks() const;
240   void reset_trim_ticks();
241 
242   void record_evacuation_failed_region(G1HeapRegion* r, uint worker_id, bool cause_pinned);
243   // An attempt to evacuate "obj" has failed; take necessary steps.
244   oop handle_evacuation_failure_par(oop obj, markWord m, Klass* klass, G1HeapRegionAttr attr, size_t word_sz, bool cause_pinned);
245 
246   template <typename T>
247   inline void remember_root_into_optional_region(T* p);
248   template <typename T>
249   inline void remember_reference_into_optional_region(T* p);
250 
251   inline G1OopStarChunkedList* oops_into_optional_region(const G1HeapRegion* hr);
252 };
253 
254 class G1ParScanThreadStateSet : public StackObj {
255   G1CollectedHeap* _g1h;
256   G1CollectionSet* _collection_set;
257   G1ParScanThreadState** _states;
258   size_t* _surviving_young_words_total;
259   uint _num_workers;
260   bool _flushed;
261   G1EvacFailureRegions* _evac_failure_regions;
262 
263  public:
264   G1ParScanThreadStateSet(G1CollectedHeap* g1h,
265                           uint num_workers,
266                           G1CollectionSet* collection_set,
267                           G1EvacFailureRegions* evac_failure_regions);
268   ~G1ParScanThreadStateSet();
269 
270   void flush_stats();
271   void record_unused_optional_region(G1HeapRegion* hr);
272 #if TASKQUEUE_STATS
273   void print_partial_array_task_stats();
274 #endif // TASKQUEUE_STATS
275 
276   G1ParScanThreadState* state_for_worker(uint worker_id);
277   uint num_workers() const { return _num_workers; }
278 
279   const size_t* surviving_young_words() const;
280 };
281 
282 #endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP