16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1RedirtyCardsQueue.hpp"
30 #include "gc/g1/g1OopClosures.hpp"
31 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
32 #include "gc/shared/ageTable.hpp"
33 #include "gc/shared/copyFailedInfo.hpp"
34 #include "gc/shared/gc_globals.hpp"
35 #include "gc/shared/partialArrayTaskStepper.hpp"
36 #include "gc/shared/preservedMarks.hpp"
37 #include "gc/shared/stringdedup/stringDedup.hpp"
38 #include "gc/shared/taskqueue.hpp"
39 #include "memory/allocation.hpp"
40 #include "oops/oop.hpp"
41 #include "utilities/ticks.hpp"
42
43 class G1CardTable;
44 class G1CollectionSet;
45 class G1EvacFailureRegions;
46 class G1EvacuationRootClosures;
47 class G1OopStarChunkedList;
48 class G1PLABAllocator;
49 class G1HeapRegion;
50 class PreservedMarks;
51 class PreservedMarksSet;
52 class outputStream;
53
54 class G1ParScanThreadState : public CHeapObj<mtGC> {
55 G1CollectedHeap* _g1h;
56 G1ScannerTasksQueue* _task_queue;
57 G1RedirtyCardsLocalQueueSet _rdc_local_qset;
58 G1CardTable* _ct;
59 G1EvacuationRootClosures* _closures;
60
61 G1PLABAllocator* _plab_allocator;
62
63 AgeTable _age_table;
64 // Local tenuring threshold.
65 uint _tenuring_threshold;
66 G1ScanEvacuatedObjClosure _scanner;
67
68 uint _worker_id;
69
70 // Remember the last enqueued card to avoid enqueuing the same card over and over;
71 // since we only ever scan a card once, this is sufficient.
89 // Size (in elements) of a partial objArray task chunk.
90 int _partial_objarray_chunk_size;
91 PartialArrayTaskStepper _partial_array_stepper;
92 StringDedup::Requests _string_dedup_requests;
93
94 G1CardTable* ct() { return _ct; }
95
96 // Maximum number of optional regions at start of gc.
97 size_t _max_num_optional_regions;
98 G1OopStarChunkedList* _oops_into_optional_regions;
99
100 G1NUMA* _numa;
101 // Records how many object allocations happened at each node during copy to survivor.
102 // Only starts recording when log of gc+heap+numa is enabled and its data is
103 // transferred when flushed.
104 size_t* _obj_alloc_stat;
105
106 // Per-thread evacuation failure data structures.
107 ALLOCATION_FAILURE_INJECTOR_ONLY(size_t _allocation_failure_inject_counter;)
108
109 PreservedMarks* _preserved_marks;
110 EvacuationFailedInfo _evacuation_failed_info;
111 G1EvacFailureRegions* _evac_failure_regions;
112 // Number of additional cards into evacuation failed regions enqueued into
113 // the local DCQS. This is an approximation, as cards that would be added later
114 // outside of evacuation failure will not be subtracted again.
115 size_t _evac_failure_enqueued_cards;
116
117 // Enqueue the card if not already in the set; this is a best-effort attempt on
118 // detecting duplicates.
119 template <class T> bool enqueue_if_new(T* p);
120 // Enqueue the card of p into the (evacuation failed) region.
121 template <class T> void enqueue_card_into_evac_fail_region(T* p, oop obj);
122
123 bool inject_allocation_failure(uint region_idx) ALLOCATION_FAILURE_INJECTOR_RETURN_( return false; );
124
125 public:
126 G1ParScanThreadState(G1CollectedHeap* g1h,
127 G1RedirtyCardsQueueSet* rdcqs,
128 PreservedMarks* preserved_marks,
129 uint worker_id,
130 uint num_workers,
131 G1CollectionSet* collection_set,
132 G1EvacFailureRegions* evac_failure_regions);
133 virtual ~G1ParScanThreadState();
134
135 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
136
137 #ifdef ASSERT
138 bool queue_is_empty() const { return _task_queue->is_empty(); }
139 #endif
140
141 void verify_task(narrowOop* task) const NOT_DEBUG_RETURN;
142 void verify_task(oop* task) const NOT_DEBUG_RETURN;
143 void verify_task(PartialArrayScanTask task) const NOT_DEBUG_RETURN;
144 void verify_task(ScannerTask task) const NOT_DEBUG_RETURN;
145
146 void push_on_queue(ScannerTask task);
147
148 // Apply the post barrier to the given reference field. Enqueues the card of p
156 // write_ref_field_post() above) has already been performed.
157 template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o);
158
159 G1EvacuationRootClosures* closures() { return _closures; }
160 uint worker_id() { return _worker_id; }
161
162 size_t lab_waste_words() const;
163 size_t lab_undo_waste_words() const;
164
165 size_t evac_failure_enqueued_cards() const;
166
167 // Pass locally gathered statistics to global state. Returns the total number of
168 // HeapWords copied.
169 size_t flush_stats(size_t* surviving_young_words, uint num_workers, BufferNodeList* buffer_log);
170
171 private:
172 void do_partial_array(PartialArrayScanTask task);
173 void start_partial_objarray(G1HeapRegionAttr dest_dir, oop from, oop to);
174
175 HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
176 oop old,
177 size_t word_sz,
178 uint age,
179 uint node_index);
180
181 void undo_allocation(G1HeapRegionAttr dest_addr,
182 HeapWord* obj_ptr,
183 size_t word_sz,
184 uint node_index);
185
186 void update_bot_after_copying(oop obj, size_t word_sz);
187
188 oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
189 oop obj,
190 markWord old_mark);
191
192 // This method is applied to the fields of the objects that have just been copied.
193 template <class T> void do_oop_evac(T* p);
194
195 void dispatch_task(ScannerTask task);
196
197 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
198 // allocate into dest. Previous_plab_refill_failed indicates whether previous
199 // PLAB refill for the original (source) object failed.
200 // Returns a non-null pointer if successful, and updates dest if required.
201 // Also determines whether we should continue to try to allocate into the various
202 // generations or just end trying to allocate.
203 HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
204 size_t word_sz,
205 bool previous_plab_refill_failed,
206 uint node_index);
207
208 inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
209
210 void report_promotion_event(G1HeapRegionAttr const dest_attr,
211 oop const old, size_t word_sz, uint age,
212 HeapWord * const obj_ptr, uint node_index) const;
213
214 void trim_queue_to_threshold(uint threshold);
215
216 inline bool needs_partial_trimming() const;
217
218 // NUMA statistics related methods.
219 void initialize_numa_stats();
220 void flush_numa_stats();
221 inline void update_numa_stats(uint node_index);
222
223 public:
224 oop copy_to_survivor_space(G1HeapRegionAttr region_attr, oop obj, markWord old_mark);
225
226 inline void trim_queue();
227 inline void trim_queue_partially();
228 void steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues);
229
230 Tickspan trim_ticks() const;
231 void reset_trim_ticks();
232
233 // An attempt to evacuate "obj" has failed; take necessary steps.
234 oop handle_evacuation_failure_par(oop obj, markWord m, size_t word_sz, bool cause_pinned);
235
236 template <typename T>
237 inline void remember_root_into_optional_region(T* p);
238 template <typename T>
239 inline void remember_reference_into_optional_region(T* p);
240
241 inline G1OopStarChunkedList* oops_into_optional_region(const G1HeapRegion* hr);
242 };
243
244 class G1ParScanThreadStateSet : public StackObj {
245 G1CollectedHeap* _g1h;
246 G1CollectionSet* _collection_set;
247 G1RedirtyCardsQueueSet _rdcqs;
248 PreservedMarksSet _preserved_marks_set;
249 G1ParScanThreadState** _states;
250 BufferNodeList* _rdc_buffers;
251 size_t* _surviving_young_words_total;
252 uint _num_workers;
253 bool _flushed;
254 G1EvacFailureRegions* _evac_failure_regions;
255
256 public:
257 G1ParScanThreadStateSet(G1CollectedHeap* g1h,
258 uint num_workers,
259 G1CollectionSet* collection_set,
260 G1EvacFailureRegions* evac_failure_regions);
261 ~G1ParScanThreadStateSet();
262
263 G1RedirtyCardsQueueSet* rdcqs() { return &_rdcqs; }
264 BufferNodeList* rdc_buffers() { return _rdc_buffers; }
265 PreservedMarksSet* preserved_marks_set() { return &_preserved_marks_set; }
266
267 void flush_stats();
268 void record_unused_optional_region(G1HeapRegion* hr);
269
270 G1ParScanThreadState* state_for_worker(uint worker_id);
271 uint num_workers() const { return _num_workers; }
272
273 const size_t* surviving_young_words() const;
274 };
275
276 #endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1RedirtyCardsQueue.hpp"
30 #include "gc/g1/g1OopClosures.hpp"
31 #include "gc/g1/g1YoungGCAllocationFailureInjector.hpp"
32 #include "gc/shared/ageTable.hpp"
33 #include "gc/shared/copyFailedInfo.hpp"
34 #include "gc/shared/gc_globals.hpp"
35 #include "gc/shared/partialArrayTaskStepper.hpp"
36 #include "gc/shared/stringdedup/stringDedup.hpp"
37 #include "gc/shared/taskqueue.hpp"
38 #include "memory/allocation.hpp"
39 #include "oops/oop.hpp"
40 #include "utilities/ticks.hpp"
41
42 class G1CardTable;
43 class G1CollectionSet;
44 class G1EvacFailureRegions;
45 class G1EvacuationRootClosures;
46 class G1OopStarChunkedList;
47 class G1PLABAllocator;
48 class G1HeapRegion;
49 class outputStream;
50
51 class G1ParScanThreadState : public CHeapObj<mtGC> {
52 G1CollectedHeap* _g1h;
53 G1ScannerTasksQueue* _task_queue;
54 G1RedirtyCardsLocalQueueSet _rdc_local_qset;
55 G1CardTable* _ct;
56 G1EvacuationRootClosures* _closures;
57
58 G1PLABAllocator* _plab_allocator;
59
60 AgeTable _age_table;
61 // Local tenuring threshold.
62 uint _tenuring_threshold;
63 G1ScanEvacuatedObjClosure _scanner;
64
65 uint _worker_id;
66
67 // Remember the last enqueued card to avoid enqueuing the same card over and over;
68 // since we only ever scan a card once, this is sufficient.
86 // Size (in elements) of a partial objArray task chunk.
87 int _partial_objarray_chunk_size;
88 PartialArrayTaskStepper _partial_array_stepper;
89 StringDedup::Requests _string_dedup_requests;
90
91 G1CardTable* ct() { return _ct; }
92
93 // Maximum number of optional regions at start of gc.
94 size_t _max_num_optional_regions;
95 G1OopStarChunkedList* _oops_into_optional_regions;
96
97 G1NUMA* _numa;
98 // Records how many object allocations happened at each node during copy to survivor.
99 // Only starts recording when log of gc+heap+numa is enabled and its data is
100 // transferred when flushed.
101 size_t* _obj_alloc_stat;
102
103 // Per-thread evacuation failure data structures.
104 ALLOCATION_FAILURE_INJECTOR_ONLY(size_t _allocation_failure_inject_counter;)
105
106 EvacuationFailedInfo _evacuation_failed_info;
107 G1EvacFailureRegions* _evac_failure_regions;
108 // Number of additional cards into evacuation failed regions enqueued into
109 // the local DCQS. This is an approximation, as cards that would be added later
110 // outside of evacuation failure will not be subtracted again.
111 size_t _evac_failure_enqueued_cards;
112
113 // Enqueue the card if not already in the set; this is a best-effort attempt on
114 // detecting duplicates.
115 template <class T> bool enqueue_if_new(T* p);
116 // Enqueue the card of p into the (evacuation failed) region.
117 template <class T> void enqueue_card_into_evac_fail_region(T* p, oop obj);
118
119 bool inject_allocation_failure(uint region_idx) ALLOCATION_FAILURE_INJECTOR_RETURN_( return false; );
120
121 public:
122 G1ParScanThreadState(G1CollectedHeap* g1h,
123 G1RedirtyCardsQueueSet* rdcqs,
124 uint worker_id,
125 uint num_workers,
126 G1CollectionSet* collection_set,
127 G1EvacFailureRegions* evac_failure_regions);
128 virtual ~G1ParScanThreadState();
129
130 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
131
132 #ifdef ASSERT
133 bool queue_is_empty() const { return _task_queue->is_empty(); }
134 #endif
135
136 void verify_task(narrowOop* task) const NOT_DEBUG_RETURN;
137 void verify_task(oop* task) const NOT_DEBUG_RETURN;
138 void verify_task(PartialArrayScanTask task) const NOT_DEBUG_RETURN;
139 void verify_task(ScannerTask task) const NOT_DEBUG_RETURN;
140
141 void push_on_queue(ScannerTask task);
142
143 // Apply the post barrier to the given reference field. Enqueues the card of p
151 // write_ref_field_post() above) has already been performed.
152 template <class T> void enqueue_card_if_tracked(G1HeapRegionAttr region_attr, T* p, oop o);
153
154 G1EvacuationRootClosures* closures() { return _closures; }
155 uint worker_id() { return _worker_id; }
156
157 size_t lab_waste_words() const;
158 size_t lab_undo_waste_words() const;
159
160 size_t evac_failure_enqueued_cards() const;
161
162 // Pass locally gathered statistics to global state. Returns the total number of
163 // HeapWords copied.
164 size_t flush_stats(size_t* surviving_young_words, uint num_workers, BufferNodeList* buffer_log);
165
166 private:
167 void do_partial_array(PartialArrayScanTask task);
168 void start_partial_objarray(G1HeapRegionAttr dest_dir, oop from, oop to);
169
170 HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
171 Klass* klass,
172 size_t word_sz,
173 uint age,
174 uint node_index);
175
176 void undo_allocation(G1HeapRegionAttr dest_addr,
177 HeapWord* obj_ptr,
178 size_t word_sz,
179 uint node_index);
180
181 void update_bot_after_copying(oop obj, size_t word_sz);
182
183 oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
184 oop obj,
185 markWord old_mark);
186
187 // This method is applied to the fields of the objects that have just been copied.
188 template <class T> void do_oop_evac(T* p);
189
190 void dispatch_task(ScannerTask task);
191
192 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
193 // allocate into dest. Previous_plab_refill_failed indicates whether previous
194 // PLAB refill for the original (source) object failed.
195 // Returns a non-null pointer if successful, and updates dest if required.
196 // Also determines whether we should continue to try to allocate into the various
197 // generations or just end trying to allocate.
198 HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
199 size_t word_sz,
200 bool previous_plab_refill_failed,
201 uint node_index);
202
203 inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
204
205 void report_promotion_event(G1HeapRegionAttr const dest_attr,
206 Klass* klass, size_t word_sz, uint age,
207 HeapWord * const obj_ptr, uint node_index) const;
208
209 void trim_queue_to_threshold(uint threshold);
210
211 inline bool needs_partial_trimming() const;
212
213 // NUMA statistics related methods.
214 void initialize_numa_stats();
215 void flush_numa_stats();
216 inline void update_numa_stats(uint node_index);
217
218 public:
219 oop copy_to_survivor_space(G1HeapRegionAttr region_attr, oop obj, markWord old_mark);
220
221 inline void trim_queue();
222 inline void trim_queue_partially();
223 void steal_and_trim_queue(G1ScannerTasksQueueSet *task_queues);
224
225 Tickspan trim_ticks() const;
226 void reset_trim_ticks();
227
228 // An attempt to evacuate "obj" has failed; take necessary steps.
229 oop handle_evacuation_failure_par(oop obj, markWord m, size_t word_sz, bool cause_pinned);
230
231 template <typename T>
232 inline void remember_root_into_optional_region(T* p);
233 template <typename T>
234 inline void remember_reference_into_optional_region(T* p);
235
236 inline G1OopStarChunkedList* oops_into_optional_region(const G1HeapRegion* hr);
237 };
238
239 class G1ParScanThreadStateSet : public StackObj {
240 G1CollectedHeap* _g1h;
241 G1CollectionSet* _collection_set;
242 G1RedirtyCardsQueueSet _rdcqs;
243 G1ParScanThreadState** _states;
244 BufferNodeList* _rdc_buffers;
245 size_t* _surviving_young_words_total;
246 uint _num_workers;
247 bool _flushed;
248 G1EvacFailureRegions* _evac_failure_regions;
249
250 public:
251 G1ParScanThreadStateSet(G1CollectedHeap* g1h,
252 uint num_workers,
253 G1CollectionSet* collection_set,
254 G1EvacFailureRegions* evac_failure_regions);
255 ~G1ParScanThreadStateSet();
256
257 G1RedirtyCardsQueueSet* rdcqs() { return &_rdcqs; }
258 BufferNodeList* rdc_buffers() { return _rdc_buffers; }
259
260 void flush_stats();
261 void record_unused_optional_region(G1HeapRegion* hr);
262
263 G1ParScanThreadState* state_for_worker(uint worker_id);
264 uint num_workers() const { return _num_workers; }
265
266 const size_t* surviving_young_words() const;
267 };
268
269 #endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_HPP
|