229 // into smaller submethods, but we need to be careful not to hurt
230 // performance.
231 //
232 template<bool promote_immediately>
233 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
234 markWord test_mark) {
235 HeapWord* new_obj_addr = nullptr;
236 bool new_obj_is_tenured = false;
237
238 // NOTE: With compact headers, it is not safe to load the Klass* from old, because
239 // that would access the mark-word, that might change at any time by concurrent
240 // workers.
241 // This mark word would refer to a forwardee, which may not yet have completed
242 // copying. Therefore we must load the Klass* from the mark-word that we already
243 // loaded. This is safe, because we only enter here if not yet forwarded.
244 assert(!test_mark.is_forwarded(), "precondition");
245 Klass* klass = UseCompactObjectHeaders
246 ? test_mark.klass()
247 : o->klass();
248
249 size_t new_obj_size = o->size_given_klass(klass);
250
251 // Find the objects age, MT safe.
252 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
253 test_mark.displaced_mark_helper().age() : test_mark.age();
254
255 if (!promote_immediately) {
256 // Try allocating obj in to-space (unless too old)
257 if (age < PSScavenge::tenuring_threshold()) {
258 new_obj_addr = allocate_in_young_gen(klass, new_obj_size, age);
259 }
260 }
261
262 // Otherwise try allocating obj tenured
263 if (new_obj_addr == nullptr) {
264 new_obj_addr = allocate_in_old_gen(klass, new_obj_size, age);
265 if (new_obj_addr == nullptr) {
266 return oop_promotion_failed(o, test_mark);
267 }
268 new_obj_is_tenured = true;
269 }
275
276 // Now we have to CAS in the header.
277 // Because the forwarding is done with memory_order_relaxed there is no
278 // ordering with the above copy. Clients that get the forwardee must not
279 // examine its contents without other synchronization, since the contents
280 // may not be up to date for them.
281 oop forwardee = o->forward_to_atomic(cast_to_oop(new_obj_addr), test_mark, memory_order_relaxed);
282 if (forwardee == nullptr) { // forwardee is null when forwarding is successful
283 // We won any races, we "own" this object.
284 oop new_obj = cast_to_oop(new_obj_addr);
285 assert(new_obj == o->forwardee(), "Sanity");
286
287 // Increment age if obj still in new generation. Now that
288 // we're dealing with a markWord that cannot change, it is
289 // okay to use the non mt safe oop methods.
290 if (!new_obj_is_tenured) {
291 new_obj->incr_age();
292 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
293 }
294
295 ContinuationGCSupport::transform_stack_chunk(new_obj);
296
297 // Do the size comparison first with new_obj_size, which we
298 // already have. Hopefully, only a few objects are larger than
299 // _min_array_size_for_chunking, and most of them will be arrays.
300 // So, the objArray test would be very infrequent.
301 if (new_obj_size > _min_array_size_for_chunking &&
302 klass->is_objArray_klass() &&
303 PSChunkLargeArrays) {
304 push_objArray(o, new_obj);
305 } else {
306 // we'll just push its contents
307 push_contents(new_obj);
308
309 if (StringDedup::is_enabled_string(klass) &&
310 psStringDedup::is_candidate_from_evacuation(new_obj, new_obj_is_tenured)) {
311 _string_dedup_requests.add(o);
312 }
313 }
314 return new_obj;
|
229 // into smaller submethods, but we need to be careful not to hurt
230 // performance.
231 //
232 template<bool promote_immediately>
233 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
234 markWord test_mark) {
235 HeapWord* new_obj_addr = nullptr;
236 bool new_obj_is_tenured = false;
237
238 // NOTE: With compact headers, it is not safe to load the Klass* from old, because
239 // that would access the mark-word, that might change at any time by concurrent
240 // workers.
241 // This mark word would refer to a forwardee, which may not yet have completed
242 // copying. Therefore we must load the Klass* from the mark-word that we already
243 // loaded. This is safe, because we only enter here if not yet forwarded.
244 assert(!test_mark.is_forwarded(), "precondition");
245 Klass* klass = UseCompactObjectHeaders
246 ? test_mark.klass()
247 : o->klass();
248
249 size_t old_obj_size = o->size_given_mark_and_klass(test_mark, klass);
250 size_t new_obj_size = o->copy_size(old_obj_size, test_mark);
251
252 // Find the objects age, MT safe.
253 uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
254 test_mark.displaced_mark_helper().age() : test_mark.age();
255
256 if (!promote_immediately) {
257 // Try allocating obj in to-space (unless too old)
258 if (age < PSScavenge::tenuring_threshold()) {
259 new_obj_addr = allocate_in_young_gen(klass, new_obj_size, age);
260 }
261 }
262
263 // Otherwise try allocating obj tenured
264 if (new_obj_addr == nullptr) {
265 new_obj_addr = allocate_in_old_gen(klass, new_obj_size, age);
266 if (new_obj_addr == nullptr) {
267 return oop_promotion_failed(o, test_mark);
268 }
269 new_obj_is_tenured = true;
270 }
276
277 // Now we have to CAS in the header.
278 // Because the forwarding is done with memory_order_relaxed there is no
279 // ordering with the above copy. Clients that get the forwardee must not
280 // examine its contents without other synchronization, since the contents
281 // may not be up to date for them.
282 oop forwardee = o->forward_to_atomic(cast_to_oop(new_obj_addr), test_mark, memory_order_relaxed);
283 if (forwardee == nullptr) { // forwardee is null when forwarding is successful
284 // We won any races, we "own" this object.
285 oop new_obj = cast_to_oop(new_obj_addr);
286 assert(new_obj == o->forwardee(), "Sanity");
287
288 // Increment age if obj still in new generation. Now that
289 // we're dealing with a markWord that cannot change, it is
290 // okay to use the non mt safe oop methods.
291 if (!new_obj_is_tenured) {
292 new_obj->incr_age();
293 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
294 }
295
296 new_obj->initialize_hash_if_necessary(o);
297
298 ContinuationGCSupport::transform_stack_chunk(new_obj);
299
300 // Do the size comparison first with new_obj_size, which we
301 // already have. Hopefully, only a few objects are larger than
302 // _min_array_size_for_chunking, and most of them will be arrays.
303 // So, the objArray test would be very infrequent.
304 if (new_obj_size > _min_array_size_for_chunking &&
305 klass->is_objArray_klass() &&
306 PSChunkLargeArrays) {
307 push_objArray(o, new_obj);
308 } else {
309 // we'll just push its contents
310 push_contents(new_obj);
311
312 if (StringDedup::is_enabled_string(klass) &&
313 psStringDedup::is_candidate_from_evacuation(new_obj, new_obj_is_tenured)) {
314 _string_dedup_requests.add(o);
315 }
316 }
317 return new_obj;
|