49 assert(index < ParallelGCThreads, "out of range manager_array access");
50 return &_manager_array[index];
51 }
52
53 inline void PSPromotionManager::push_depth(ScannerTask task) {
54 claimed_stack_depth()->push(task);
55 }
56
57 template <class T>
58 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
59 assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
60 T heap_oop = RawAccess<>::oop_load(p);
61 if (PSScavenge::is_obj_in_young(heap_oop)) {
62 oop obj = CompressedOops::decode_not_null(heap_oop);
63 assert(!PSScavenge::is_obj_in_to_space(obj), "revisiting object?");
64 Prefetch::write(obj->mark_addr(), 0);
65 push_depth(ScannerTask(p));
66 }
67 }
68
69 inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
70 size_t obj_size,
71 uint age, bool tenured,
72 const PSPromotionLAB* lab) {
73 // Skip if memory allocation failed
74 if (new_obj != nullptr) {
75 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
76
77 if (lab != nullptr) {
78 // Promotion of object through newly allocated PLAB
79 if (gc_tracer->should_report_promotion_in_new_plab_event()) {
80 size_t obj_bytes = obj_size * HeapWordSize;
81 size_t lab_size = lab->capacity();
82 gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
83 age, tenured, lab_size);
84 }
85 } else {
86 // Promotion of object directly to heap
87 if (gc_tracer->should_report_promotion_outside_plab_event()) {
88 size_t obj_bytes = obj_size * HeapWordSize;
89 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
90 age, tenured);
91 }
92 }
93 }
94 }
95
96 class PSPushContentsClosure: public BasicOopIterateClosure {
97 PSPromotionManager* _pm;
98 public:
99 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {}
100
101 template <typename T> void do_oop_work(T* p) {
102 _pm->claim_or_forward_depth(p);
103 }
104
105 virtual void do_oop(oop* p) { do_oop_work(p); }
106 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
107 };
108
109 //
136 PSPushContentsClosure pcc(this);
137 obj->oop_iterate(&pcc, MemRegion(left, right));
138 }
139
140 template<bool promote_immediately>
141 inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
142 assert(should_scavenge(&o), "Sanity");
143
144 // NOTE! We must be very careful with any methods that access the mark
145 // in o. There may be multiple threads racing on it, and it may be forwarded
146 // at any time.
147 markWord m = o->mark();
148 if (!m.is_forwarded()) {
149 return copy_unmarked_to_survivor_space<promote_immediately>(o, m);
150 } else {
151 // Ensure any loads from the forwardee follow all changes that precede
152 // the release-cmpxchg that performed the forwarding, possibly in some
153 // other thread.
154 OrderAccess::acquire();
155 // Return the already installed forwardee.
156 return m.forwardee();
157 }
158 }
159
160 //
161 // This method is pretty bulky. It would be nice to split it up
162 // into smaller submethods, but we need to be careful not to hurt
163 // performance.
164 //
165 template<bool promote_immediately>
166 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
167 markWord test_mark) {
168 assert(should_scavenge(&o), "Sanity");
169
170 oop new_obj = nullptr;
171 bool new_obj_is_tenured = false;
172 size_t new_obj_size = o->size();
173
174 // Find the objects age, MT safe.
175 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
176 test_mark.displaced_mark_helper().age() : test_mark.age();
177
178 if (!promote_immediately) {
179 // Try allocating obj in to-space (unless too old)
180 if (age < PSScavenge::tenuring_threshold()) {
181 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
182 if (new_obj == nullptr && !_young_gen_is_full) {
183 // Do we allocate directly, or flush and refill?
184 if (new_obj_size > (YoungPLABSize / 2)) {
185 // Allocate this object directly
186 new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size));
187 promotion_trace_event(new_obj, o, new_obj_size, age, false, nullptr);
188 } else {
189 // Flush and fill
190 _young_lab.flush();
191
192 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
193 if (lab_base != nullptr) {
194 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
195 // Try the young lab allocation again.
196 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
197 promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
198 } else {
199 _young_gen_is_full = true;
200 }
201 }
202 }
203 }
204 }
205
206 // Otherwise try allocating obj tenured
207 if (new_obj == nullptr) {
208 #ifndef PRODUCT
209 if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
210 return oop_promotion_failed(o, test_mark);
211 }
212 #endif // #ifndef PRODUCT
213
214 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
215 new_obj_is_tenured = true;
216
217 if (new_obj == nullptr) {
218 if (!_old_gen_is_full) {
219 // Do we allocate directly, or flush and refill?
220 if (new_obj_size > (OldPLABSize / 2)) {
221 // Allocate this object directly
222 new_obj = cast_to_oop(old_gen()->allocate(new_obj_size));
223 promotion_trace_event(new_obj, o, new_obj_size, age, true, nullptr);
224 } else {
225 // Flush and fill
226 _old_lab.flush();
227
228 HeapWord* lab_base = old_gen()->allocate(OldPLABSize);
229 if(lab_base != nullptr) {
230 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
231 // Try the old lab allocation again.
232 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
233 promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
234 }
235 }
236 }
237
238 // This is the promotion failed test, and code handling.
239 // The code belongs here for two reasons. It is slightly
240 // different than the code below, and cannot share the
241 // CAS testing code. Keeping the code here also minimizes
242 // the impact on the common case fast path code.
243
244 if (new_obj == nullptr) {
245 _old_gen_is_full = true;
246 return oop_promotion_failed(o, test_mark);
247 }
248 }
249 }
250
251 assert(new_obj != nullptr, "allocation should have succeeded");
252
253 // Copy obj
254 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
255
256 // Parallel GC claims with a release - so other threads might access this object
257 // after claiming and they should see the "completed" object.
258 ContinuationGCSupport::transform_stack_chunk(new_obj);
259
260 // Now we have to CAS in the header.
261 // Make copy visible to threads reading the forwardee.
262 oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release);
263 if (forwardee == nullptr) { // forwardee is null when forwarding is successful
264 // We won any races, we "own" this object.
265 assert(new_obj == o->forwardee(), "Sanity");
266
267 // Increment age if obj still in new generation. Now that
268 // we're dealing with a markWord that cannot change, it is
269 // okay to use the non mt safe oop methods.
270 if (!new_obj_is_tenured) {
271 new_obj->incr_age();
272 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
273 }
274
275 // Do the size comparison first with new_obj_size, which we
276 // already have. Hopefully, only a few objects are larger than
277 // _min_array_size_for_chunking, and most of them will be arrays.
278 // So, the is->objArray() test would be very infrequent.
|
49 assert(index < ParallelGCThreads, "out of range manager_array access");
50 return &_manager_array[index];
51 }
52
53 inline void PSPromotionManager::push_depth(ScannerTask task) {
54 claimed_stack_depth()->push(task);
55 }
56
57 template <class T>
58 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
59 assert(ParallelScavengeHeap::heap()->is_in(p), "pointer outside heap");
60 T heap_oop = RawAccess<>::oop_load(p);
61 if (PSScavenge::is_obj_in_young(heap_oop)) {
62 oop obj = CompressedOops::decode_not_null(heap_oop);
63 assert(!PSScavenge::is_obj_in_to_space(obj), "revisiting object?");
64 Prefetch::write(obj->mark_addr(), 0);
65 push_depth(ScannerTask(p));
66 }
67 }
68
69 inline void PSPromotionManager::promotion_trace_event(oop new_obj, Klass* klass,
70 size_t obj_size,
71 uint age, bool tenured,
72 const PSPromotionLAB* lab) {
73 // Skip if memory allocation failed
74 if (new_obj != nullptr) {
75 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
76
77 if (lab != nullptr) {
78 // Promotion of object through newly allocated PLAB
79 if (gc_tracer->should_report_promotion_in_new_plab_event()) {
80 size_t obj_bytes = obj_size * HeapWordSize;
81 size_t lab_size = lab->capacity();
82 gc_tracer->report_promotion_in_new_plab_event(klass, obj_bytes,
83 age, tenured, lab_size);
84 }
85 } else {
86 // Promotion of object directly to heap
87 if (gc_tracer->should_report_promotion_outside_plab_event()) {
88 size_t obj_bytes = obj_size * HeapWordSize;
89 gc_tracer->report_promotion_outside_plab_event(klass, obj_bytes,
90 age, tenured);
91 }
92 }
93 }
94 }
95
96 class PSPushContentsClosure: public BasicOopIterateClosure {
97 PSPromotionManager* _pm;
98 public:
99 PSPushContentsClosure(PSPromotionManager* pm) : BasicOopIterateClosure(PSScavenge::reference_processor()), _pm(pm) {}
100
101 template <typename T> void do_oop_work(T* p) {
102 _pm->claim_or_forward_depth(p);
103 }
104
105 virtual void do_oop(oop* p) { do_oop_work(p); }
106 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
107 };
108
109 //
136 PSPushContentsClosure pcc(this);
137 obj->oop_iterate(&pcc, MemRegion(left, right));
138 }
139
140 template<bool promote_immediately>
141 inline oop PSPromotionManager::copy_to_survivor_space(oop o) {
142 assert(should_scavenge(&o), "Sanity");
143
144 // NOTE! We must be very careful with any methods that access the mark
145 // in o. There may be multiple threads racing on it, and it may be forwarded
146 // at any time.
147 markWord m = o->mark();
148 if (!m.is_forwarded()) {
149 return copy_unmarked_to_survivor_space<promote_immediately>(o, m);
150 } else {
151 // Ensure any loads from the forwardee follow all changes that precede
152 // the release-cmpxchg that performed the forwarding, possibly in some
153 // other thread.
154 OrderAccess::acquire();
155 // Return the already installed forwardee.
156 return o->forwardee(m);
157 }
158 }
159
160 //
161 // This method is pretty bulky. It would be nice to split it up
162 // into smaller submethods, but we need to be careful not to hurt
163 // performance.
164 //
165 template<bool promote_immediately>
166 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
167 markWord test_mark) {
168 assert(should_scavenge(&o), "Sanity");
169
170 oop new_obj = nullptr;
171 bool new_obj_is_tenured = false;
172 // NOTE: With compact headers, it is not safe to load the Klass* from o, because
173 // that would access the mark-word, and the mark-word might change at any time by
174 // concurrent promotion. The promoted mark-word would point to the forwardee, which
175 // may not yet have completed copying. Therefore we must load the Klass* from
176 // the mark-word that we have already loaded. This is safe, because we have checked
177 // that this is not yet forwarded in the caller.
178 Klass* klass = o->forward_safe_klass(test_mark);
179 size_t new_obj_size = o->size_given_klass(klass);
180
181 // Find the objects age, MT safe.
182 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
183 test_mark.displaced_mark_helper().age() : test_mark.age();
184
185 if (!promote_immediately) {
186 // Try allocating obj in to-space (unless too old)
187 if (age < PSScavenge::tenuring_threshold()) {
188 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
189 if (new_obj == nullptr && !_young_gen_is_full) {
190 // Do we allocate directly, or flush and refill?
191 if (new_obj_size > (YoungPLABSize / 2)) {
192 // Allocate this object directly
193 new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size));
194 promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr);
195 } else {
196 // Flush and fill
197 _young_lab.flush();
198
199 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
200 if (lab_base != nullptr) {
201 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
202 // Try the young lab allocation again.
203 new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
204 promotion_trace_event(new_obj, klass, new_obj_size, age, false, &_young_lab);
205 } else {
206 _young_gen_is_full = true;
207 }
208 }
209 }
210 }
211 }
212
213 // Otherwise try allocating obj tenured
214 if (new_obj == nullptr) {
215 #ifndef PRODUCT
216 if (ParallelScavengeHeap::heap()->promotion_should_fail()) {
217 return oop_promotion_failed(o, test_mark);
218 }
219 #endif // #ifndef PRODUCT
220
221 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
222 new_obj_is_tenured = true;
223
224 if (new_obj == nullptr) {
225 if (!_old_gen_is_full) {
226 // Do we allocate directly, or flush and refill?
227 if (new_obj_size > (OldPLABSize / 2)) {
228 // Allocate this object directly
229 new_obj = cast_to_oop(old_gen()->allocate(new_obj_size));
230 promotion_trace_event(new_obj, klass, new_obj_size, age, true, nullptr);
231 } else {
232 // Flush and fill
233 _old_lab.flush();
234
235 HeapWord* lab_base = old_gen()->allocate(OldPLABSize);
236 if(lab_base != nullptr) {
237 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
238 // Try the old lab allocation again.
239 new_obj = cast_to_oop(_old_lab.allocate(new_obj_size));
240 promotion_trace_event(new_obj, klass, new_obj_size, age, true, &_old_lab);
241 }
242 }
243 }
244
245 // This is the promotion failed test, and code handling.
246 // The code belongs here for two reasons. It is slightly
247 // different than the code below, and cannot share the
248 // CAS testing code. Keeping the code here also minimizes
249 // the impact on the common case fast path code.
250
251 if (new_obj == nullptr) {
252 _old_gen_is_full = true;
253 return oop_promotion_failed(o, test_mark);
254 }
255 }
256 }
257
258 assert(new_obj != nullptr, "allocation should have succeeded");
259
260 // Copy obj
261 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
262
263 if (UseCompactObjectHeaders) {
264 // The copy above is not atomic. Make sure we have seen the proper mark
265 // and re-install it into the copy, so that Klass* is guaranteed to be correct.
266 markWord mark = o->mark();
267 if (!mark.is_forwarded()) {
268 new_obj->set_mark(mark);
269 ContinuationGCSupport::transform_stack_chunk(new_obj);
270 } else {
271 // If we copied a mark-word that indicates 'forwarded' state, the object
272 // installation would not succeed. We cannot access Klass* anymore either.
273 // Skip the transformation.
274 }
275 } else {
276 ContinuationGCSupport::transform_stack_chunk(new_obj);
277 }
278
279 // Now we have to CAS in the header.
280 // Make copy visible to threads reading the forwardee.
281 oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_release);
282 if (forwardee == nullptr) { // forwardee is null when forwarding is successful
283 // We won any races, we "own" this object.
284 assert(new_obj == o->forwardee(), "Sanity");
285
286 // Increment age if obj still in new generation. Now that
287 // we're dealing with a markWord that cannot change, it is
288 // okay to use the non mt safe oop methods.
289 if (!new_obj_is_tenured) {
290 new_obj->incr_age();
291 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
292 }
293
294 // Do the size comparison first with new_obj_size, which we
295 // already have. Hopefully, only a few objects are larger than
296 // _min_array_size_for_chunking, and most of them will be arrays.
297 // So, the is->objArray() test would be very infrequent.
|