373 if (TraceDeoptimization) {
374 print_objects(deoptee_thread, objects, realloc_failures);
375 }
376 }
377 if (save_oop_result) {
378 // Restore result.
379 deoptee.set_saved_oop_result(&map, return_value());
380 }
381 return realloc_failures;
382 }
383
384 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
385 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
386 JavaThread* deoptee_thread = chunk->at(0)->thread();
387 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
388 assert(thread == Thread::current(), "should be");
389 HandleMark hm(thread);
390 #ifndef PRODUCT
391 bool first = true;
392 #endif // !PRODUCT
393 // Start locking from outermost/oldest frame
394 for (int i = (chunk->length() - 1); i >= 0; i--) {
395 compiledVFrame* cvf = chunk->at(i);
396 assert (cvf->scope() != nullptr,"expect only compiled java frames");
397 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
398 if (monitors->is_nonempty()) {
399 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
400 exec_mode, realloc_failures);
401 deoptimized_objects = deoptimized_objects || relocked;
402 #ifndef PRODUCT
403 if (PrintDeoptimizationDetails) {
404 ResourceMark rm;
405 stringStream st;
406 for (int j = 0; j < monitors->length(); j++) {
407 MonitorInfo* mi = monitors->at(j);
408 if (mi->eliminated()) {
409 if (first) {
410 first = false;
411 st.print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
412 }
413 if (exec_mode == Deoptimization::Unpack_none) {
414 ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor();
415 if (monitor != nullptr && monitor->object() == mi->owner()) {
416 st.print_cr(" object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner()));
417 continue;
418 }
419 }
420 if (mi->owner_is_scalar_replaced()) {
421 Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
422 st.print_cr(" failed reallocation for klass %s", k->external_name());
423 } else {
424 st.print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
425 }
426 }
427 }
428 tty->print_raw(st.freeze());
429 }
430 #endif // !PRODUCT
431 }
432 }
433 }
434
435 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
436 // The given vframes cover one physical frame.
437 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
438 bool& realloc_failures) {
439 frame deoptee = chunk->at(0)->fr();
440 JavaThread* deoptee_thread = chunk->at(0)->thread();
441 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
442 RegisterMap map(chunk->at(0)->register_map());
443 bool deoptimized_objects = false;
444
445 bool const jvmci_enabled = JVMCI_ONLY(UseJVMCICompiler) NOT_JVMCI(false);
446
447 // Reallocate the non-escaping objects and restore their fields.
448 if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
449 || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
450 realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects);
451 }
452
1637 markWord mark = obj->mark();
1638 if (exec_mode == Unpack_none) {
1639 if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1640 // With exec_mode == Unpack_none obj may be thread local and locked in
1641 // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1642 markWord dmw = mark.displaced_mark_helper();
1643 mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1644 obj->set_mark(dmw);
1645 }
1646 if (mark.has_monitor()) {
1647 // defer relocking if the deoptee thread is currently waiting for obj
1648 ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1649 if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1650 assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1651 mon_info->lock()->set_displaced_header(markWord::unused_mark());
1652 JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1653 continue;
1654 }
1655 }
1656 }
1657 if (LockingMode == LM_LIGHTWEIGHT && exec_mode == Unpack_none) {
1658 // We have lost information about the correct state of the lock stack.
1659 // Inflate the locks instead. Enter then inflate to avoid races with
1660 // deflation.
1661 ObjectSynchronizer::enter(obj, nullptr, deoptee_thread);
1662 assert(mon_info->owner()->is_locked(), "object must be locked now");
1663 ObjectMonitor* mon = ObjectSynchronizer::inflate(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1664 assert(mon->owner() == deoptee_thread, "must be");
1665 } else {
1666 BasicLock* lock = mon_info->lock();
1667 ObjectSynchronizer::enter(obj, lock, deoptee_thread);
1668 assert(mon_info->owner()->is_locked(), "object must be locked now");
1669 }
1670 }
1671 }
1672 }
1673 return relocked_objects;
1674 }
1675 #endif // COMPILER2_OR_JVMCI
1676
1677 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1678 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1679
1680 // Register map for next frame (used for stack crawl). We capture
1681 // the state of the deopt'ing frame's caller. Thus if we need to
1682 // stuff a C2I adapter we can properly fill in the callee-save
1683 // register locations.
1684 frame caller = fr.sender(reg_map);
1685 int frame_size = caller.sp() - fr.sp();
1686
1687 frame sender = caller;
|
373 if (TraceDeoptimization) {
374 print_objects(deoptee_thread, objects, realloc_failures);
375 }
376 }
377 if (save_oop_result) {
378 // Restore result.
379 deoptee.set_saved_oop_result(&map, return_value());
380 }
381 return realloc_failures;
382 }
383
384 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
385 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
386 JavaThread* deoptee_thread = chunk->at(0)->thread();
387 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
388 assert(thread == Thread::current(), "should be");
389 HandleMark hm(thread);
390 #ifndef PRODUCT
391 bool first = true;
392 #endif // !PRODUCT
393 DEBUG_ONLY(GrowableArray<oop> lock_order{0};)
394 // Start locking from outermost/oldest frame
395 for (int i = (chunk->length() - 1); i >= 0; i--) {
396 compiledVFrame* cvf = chunk->at(i);
397 assert (cvf->scope() != nullptr,"expect only compiled java frames");
398 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
399 if (monitors->is_nonempty()) {
400 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
401 exec_mode, realloc_failures);
402 deoptimized_objects = deoptimized_objects || relocked;
403 #ifdef ASSERT
404 if (LockingMode == LM_LIGHTWEIGHT && !realloc_failures) {
405 for (MonitorInfo* mi : *monitors) {
406 lock_order.push(mi->owner());
407 }
408 }
409 #endif // ASSERT
410 #ifndef PRODUCT
411 if (PrintDeoptimizationDetails) {
412 ResourceMark rm;
413 stringStream st;
414 for (int j = 0; j < monitors->length(); j++) {
415 MonitorInfo* mi = monitors->at(j);
416 if (mi->eliminated()) {
417 if (first) {
418 first = false;
419 st.print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
420 }
421 if (exec_mode == Deoptimization::Unpack_none) {
422 ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor();
423 if (monitor != nullptr && monitor->object() == mi->owner()) {
424 st.print_cr(" object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner()));
425 continue;
426 }
427 }
428 if (mi->owner_is_scalar_replaced()) {
429 Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
430 st.print_cr(" failed reallocation for klass %s", k->external_name());
431 } else {
432 st.print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
433 }
434 }
435 }
436 tty->print_raw(st.freeze());
437 }
438 #endif // !PRODUCT
439 }
440 }
441 #ifdef ASSERT
442 if (LockingMode == LM_LIGHTWEIGHT && !realloc_failures) {
443 deoptee_thread->lock_stack().verify_consistent_lock_order(lock_order, exec_mode != Deoptimization::Unpack_none);
444 }
445 #endif // ASSERT
446 }
447
448 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
449 // The given vframes cover one physical frame.
450 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
451 bool& realloc_failures) {
452 frame deoptee = chunk->at(0)->fr();
453 JavaThread* deoptee_thread = chunk->at(0)->thread();
454 CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
455 RegisterMap map(chunk->at(0)->register_map());
456 bool deoptimized_objects = false;
457
458 bool const jvmci_enabled = JVMCI_ONLY(UseJVMCICompiler) NOT_JVMCI(false);
459
460 // Reallocate the non-escaping objects and restore their fields.
461 if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
462 || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
463 realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects);
464 }
465
1650 markWord mark = obj->mark();
1651 if (exec_mode == Unpack_none) {
1652 if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1653 // With exec_mode == Unpack_none obj may be thread local and locked in
1654 // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1655 markWord dmw = mark.displaced_mark_helper();
1656 mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1657 obj->set_mark(dmw);
1658 }
1659 if (mark.has_monitor()) {
1660 // defer relocking if the deoptee thread is currently waiting for obj
1661 ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1662 if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1663 assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1664 mon_info->lock()->set_displaced_header(markWord::unused_mark());
1665 JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1666 continue;
1667 }
1668 }
1669 }
1670 if (LockingMode == LM_LIGHTWEIGHT) {
1671 // We have lost information about the correct state of the lock stack.
1672 // Inflate the locks instead. Enter then inflate to avoid races with
1673 // deflation.
1674 ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1675 assert(mon_info->owner()->is_locked(), "object must be locked now");
1676 ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1677 assert(mon->owner() == deoptee_thread, "must be");
1678 } else {
1679 BasicLock* lock = mon_info->lock();
1680 ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1681 assert(mon_info->owner()->is_locked(), "object must be locked now");
1682 }
1683 }
1684 }
1685 }
1686 return relocked_objects;
1687 }
1688 #endif // COMPILER2_OR_JVMCI
1689
1690 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1691 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1692
1693 // Register map for next frame (used for stack crawl). We capture
1694 // the state of the deopt'ing frame's caller. Thus if we need to
1695 // stuff a C2I adapter we can properly fill in the callee-save
1696 // register locations.
1697 frame caller = fr.sender(reg_map);
1698 int frame_size = caller.sp() - fr.sp();
1699
1700 frame sender = caller;
|