102 #include "jfr/metadata/jfrSerializer.hpp"
103 #endif
104
105 uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
106 uint64_t DeoptimizationScope::_active_deopt_gen = 1;
107 bool DeoptimizationScope::_committing_in_progress = false;
108
109 DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
110 DEBUG_ONLY(_deopted = false;)
111
112 MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
113 // If there is nothing to deopt _required_gen is the same as comitted.
114 _required_gen = DeoptimizationScope::_committed_deopt_gen;
115 }
116
117 DeoptimizationScope::~DeoptimizationScope() {
118 assert(_deopted, "Deopt not executed");
119 }
120
121 void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) {
122 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
123
124 // If it's already marked but we still need it to be deopted.
125 if (nm->is_marked_for_deoptimization()) {
126 dependent(nm);
127 return;
128 }
129
130 nmethod::DeoptimizationStatus status =
131 inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate;
132 Atomic::store(&nm->_deoptimization_status, status);
133
134 // Make sure active is not committed
135 assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be");
136 assert(nm->_deoptimization_generation == 0, "Is already marked");
137
138 nm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
139 _required_gen = DeoptimizationScope::_active_deopt_gen;
140 }
141
1596 InstanceKlass* ik = InstanceKlass::cast(k);
1597 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1598 } else if (k->is_typeArray_klass()) {
1599 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1600 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1601 } else if (k->is_objArray_klass()) {
1602 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1603 }
1604 }
1605 // These objects may escape when we return to Interpreter after deoptimization.
1606 // We need barrier so that stores that initialize these objects can't be reordered
1607 // with subsequent stores that make these objects accessible by other threads.
1608 OrderAccess::storestore();
1609 }
1610
1611
1612 // relock objects for which synchronization was eliminated
1613 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1614 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1615 bool relocked_objects = false;
1616 for (int i = 0; i < monitors->length(); i++) {
1617 MonitorInfo* mon_info = monitors->at(i);
1618 if (mon_info->eliminated()) {
1619 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1620 relocked_objects = true;
1621 if (!mon_info->owner_is_scalar_replaced()) {
1622 Handle obj(thread, mon_info->owner());
1623 markWord mark = obj->mark();
1624 if (exec_mode == Unpack_none) {
1625 if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1626 // With exec_mode == Unpack_none obj may be thread local and locked in
1627 // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1628 markWord dmw = mark.displaced_mark_helper();
1629 mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1630 obj->set_mark(dmw);
1631 }
1632 if (mark.has_monitor()) {
1633 // defer relocking if the deoptee thread is currently waiting for obj
1634 ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1635 if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1636 assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1637 mon_info->lock()->set_displaced_header(markWord::unused_mark());
1638 JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1639 continue;
1640 }
1641 }
1642 }
1643 if (LockingMode == LM_LIGHTWEIGHT) {
1644 // We have lost information about the correct state of the lock stack.
1645 // Inflate the locks instead. Enter then inflate to avoid races with
1646 // deflation.
1647 ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1648 assert(mon_info->owner()->is_locked(), "object must be locked now");
1649 ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1650 assert(mon->owner() == deoptee_thread, "must be");
1651 } else {
1652 BasicLock* lock = mon_info->lock();
1653 ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1654 assert(mon_info->owner()->is_locked(), "object must be locked now");
1655 }
1656 }
1657 }
1658 }
1659 return relocked_objects;
1660 }
1661 #endif // COMPILER2_OR_JVMCI
1662
1663 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1664 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1665
1666 // Register map for next frame (used for stack crawl). We capture
1667 // the state of the deopt'ing frame's caller. Thus if we need to
1668 // stuff a C2I adapter we can properly fill in the callee-save
1669 // register locations.
1670 frame caller = fr.sender(reg_map);
1671 int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1672
1673 frame sender = caller;
1674
1675 // Since the Java thread being deoptimized will eventually adjust it's own stack,
1676 // the vframeArray containing the unpacking information is allocated in the C heap.
1677 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1678 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
|
102 #include "jfr/metadata/jfrSerializer.hpp"
103 #endif
104
105 uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
106 uint64_t DeoptimizationScope::_active_deopt_gen = 1;
107 bool DeoptimizationScope::_committing_in_progress = false;
108
109 DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
110 DEBUG_ONLY(_deopted = false;)
111
112 MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
113 // If there is nothing to deopt _required_gen is the same as comitted.
114 _required_gen = DeoptimizationScope::_committed_deopt_gen;
115 }
116
117 DeoptimizationScope::~DeoptimizationScope() {
118 assert(_deopted, "Deopt not executed");
119 }
120
121 void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) {
122 if (!nm->can_be_deoptimized()) {
123 return;
124 }
125
126 ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
127
128 // If it's already marked but we still need it to be deopted.
129 if (nm->is_marked_for_deoptimization()) {
130 dependent(nm);
131 return;
132 }
133
134 nmethod::DeoptimizationStatus status =
135 inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate;
136 Atomic::store(&nm->_deoptimization_status, status);
137
138 // Make sure active is not committed
139 assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be");
140 assert(nm->_deoptimization_generation == 0, "Is already marked");
141
142 nm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
143 _required_gen = DeoptimizationScope::_active_deopt_gen;
144 }
145
1600 InstanceKlass* ik = InstanceKlass::cast(k);
1601 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1602 } else if (k->is_typeArray_klass()) {
1603 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1604 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1605 } else if (k->is_objArray_klass()) {
1606 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1607 }
1608 }
1609 // These objects may escape when we return to Interpreter after deoptimization.
1610 // We need barrier so that stores that initialize these objects can't be reordered
1611 // with subsequent stores that make these objects accessible by other threads.
1612 OrderAccess::storestore();
1613 }
1614
1615
1616 // relock objects for which synchronization was eliminated
1617 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1618 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1619 bool relocked_objects = false;
1620 LOOM_MONITOR_SUPPORT_ONLY(int compensate_extra_increment = 0;)
1621 for (int i = 0; i < monitors->length(); i++) {
1622 MonitorInfo* mon_info = monitors->at(i);
1623 if (mon_info->eliminated()) {
1624 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1625 relocked_objects = true;
1626 if (!mon_info->owner_is_scalar_replaced()) {
1627 Handle obj(thread, mon_info->owner());
1628 markWord mark = obj->mark();
1629 if (exec_mode == Unpack_none) {
1630 if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1631 // With exec_mode == Unpack_none obj may be thread local and locked in
1632 // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1633 markWord dmw = mark.displaced_mark_helper();
1634 mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1635 obj->set_mark(dmw);
1636 LOOM_MONITOR_SUPPORT_ONLY(compensate_extra_increment++;)
1637 }
1638 if (mark.has_monitor()) {
1639 // defer relocking if the deoptee thread is currently waiting for obj
1640 ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1641 if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1642 assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1643 mon_info->lock()->set_displaced_header(markWord::unused_mark());
1644 JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1645 continue;
1646 }
1647 }
1648 }
1649 if (LockingMode == LM_LIGHTWEIGHT) {
1650 // We have lost information about the correct state of the lock stack.
1651 // Inflate the locks instead. Enter then inflate to avoid races with
1652 // deflation.
1653 ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1654 assert(mon_info->owner()->is_locked(), "object must be locked now");
1655 ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1656 assert(mon->is_owner(deoptee_thread), "must be");
1657 } else {
1658 BasicLock* lock = mon_info->lock();
1659 ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1660 assert(mon_info->owner()->is_locked(), "object must be locked now");
1661 }
1662 }
1663 }
1664 }
1665 LOOM_MONITOR_SUPPORT_ONLY(deoptee_thread->dec_held_monitor_count(compensate_extra_increment);)
1666 return relocked_objects;
1667 }
1668 #endif // COMPILER2_OR_JVMCI
1669
1670 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1671 Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1672
1673 // Register map for next frame (used for stack crawl). We capture
1674 // the state of the deopt'ing frame's caller. Thus if we need to
1675 // stuff a C2I adapter we can properly fill in the callee-save
1676 // register locations.
1677 frame caller = fr.sender(reg_map);
1678 int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1679
1680 frame sender = caller;
1681
1682 // Since the Java thread being deoptimized will eventually adjust it's own stack,
1683 // the vframeArray containing the unpacking information is allocated in the C heap.
1684 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1685 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
|