< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

1611       InstanceKlass* ik = InstanceKlass::cast(k);
1612       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1613     } else if (k->is_typeArray_klass()) {
1614       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1615       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1616     } else if (k->is_objArray_klass()) {
1617       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1618     }
1619   }
1620   // These objects may escape when we return to Interpreter after deoptimization.
1621   // We need barrier so that stores that initialize these objects can't be reordered
1622   // with subsequent stores that make these objects accessible by other threads.
1623   OrderAccess::storestore();
1624 }
1625 
1626 
1627 // relock objects for which synchronization was eliminated
1628 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1629                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1630   bool relocked_objects = false;

1631   for (int i = 0; i < monitors->length(); i++) {
1632     MonitorInfo* mon_info = monitors->at(i);
1633     if (mon_info->eliminated()) {
1634       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1635       relocked_objects = true;
1636       if (!mon_info->owner_is_scalar_replaced()) {
1637         Handle obj(thread, mon_info->owner());
1638         markWord mark = obj->mark();
1639         if (exec_mode == Unpack_none) {
1640           if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1641             // With exec_mode == Unpack_none obj may be thread local and locked in
1642             // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1643             markWord dmw = mark.displaced_mark_helper();
1644             mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1645             obj->set_mark(dmw);

1646           }
1647           if (mark.has_monitor()) {
1648             // defer relocking if the deoptee thread is currently waiting for obj
1649             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1650             if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1651               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1652               mon_info->lock()->set_displaced_header(markWord::unused_mark());
1653               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1654               continue;
1655             }
1656           }
1657         }
1658         if (LockingMode == LM_LIGHTWEIGHT) {
1659           // We have lost information about the correct state of the lock stack.
1660           // Inflate the locks instead. Enter then inflate to avoid races with
1661           // deflation.
1662           ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1663           assert(mon_info->owner()->is_locked(), "object must be locked now");
1664           ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1665           assert(mon->owner() == deoptee_thread, "must be");
1666         } else {
1667           BasicLock* lock = mon_info->lock();
1668           ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1669           assert(mon_info->owner()->is_locked(), "object must be locked now");
1670         }
1671       }
1672     }
1673   }

1674   return relocked_objects;
1675 }
1676 #endif // COMPILER2_OR_JVMCI
1677 
1678 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1679   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1680 
1681   // Register map for next frame (used for stack crawl).  We capture
1682   // the state of the deopt'ing frame's caller.  Thus if we need to
1683   // stuff a C2I adapter we can properly fill in the callee-save
1684   // register locations.
1685   frame caller = fr.sender(reg_map);
1686   int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1687 
1688   frame sender = caller;
1689 
1690   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1691   // the vframeArray containing the unpacking information is allocated in the C heap.
1692   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1693   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);

1611       InstanceKlass* ik = InstanceKlass::cast(k);
1612       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1613     } else if (k->is_typeArray_klass()) {
1614       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1615       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1616     } else if (k->is_objArray_klass()) {
1617       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1618     }
1619   }
1620   // These objects may escape when we return to Interpreter after deoptimization.
1621   // We need barrier so that stores that initialize these objects can't be reordered
1622   // with subsequent stores that make these objects accessible by other threads.
1623   OrderAccess::storestore();
1624 }
1625 
1626 
1627 // relock objects for which synchronization was eliminated
1628 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1629                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1630   bool relocked_objects = false;
1631   LOOM_MONITOR_SUPPORT_ONLY(int compensate_extra_increment = 0;)
1632   for (int i = 0; i < monitors->length(); i++) {
1633     MonitorInfo* mon_info = monitors->at(i);
1634     if (mon_info->eliminated()) {
1635       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1636       relocked_objects = true;
1637       if (!mon_info->owner_is_scalar_replaced()) {
1638         Handle obj(thread, mon_info->owner());
1639         markWord mark = obj->mark();
1640         if (exec_mode == Unpack_none) {
1641           if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1642             // With exec_mode == Unpack_none obj may be thread local and locked in
1643             // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1644             markWord dmw = mark.displaced_mark_helper();
1645             mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1646             obj->set_mark(dmw);
1647             LOOM_MONITOR_SUPPORT_ONLY(compensate_extra_increment++;)
1648           }
1649           if (mark.has_monitor()) {
1650             // defer relocking if the deoptee thread is currently waiting for obj
1651             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1652             if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1653               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1654               mon_info->lock()->set_displaced_header(markWord::unused_mark());
1655               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1656               continue;
1657             }
1658           }
1659         }
1660         if (LockingMode == LM_LIGHTWEIGHT) {
1661           // We have lost information about the correct state of the lock stack.
1662           // Inflate the locks instead. Enter then inflate to avoid races with
1663           // deflation.
1664           ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1665           assert(mon_info->owner()->is_locked(), "object must be locked now");
1666           ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1667           assert(mon->is_owner(deoptee_thread), "must be");
1668         } else {
1669           BasicLock* lock = mon_info->lock();
1670           ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1671           assert(mon_info->owner()->is_locked(), "object must be locked now");
1672         }
1673       }
1674     }
1675   }
1676   LOOM_MONITOR_SUPPORT_ONLY(deoptee_thread->dec_held_monitor_count(compensate_extra_increment);)
1677   return relocked_objects;
1678 }
1679 #endif // COMPILER2_OR_JVMCI
1680 
1681 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1682   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1683 
1684   // Register map for next frame (used for stack crawl).  We capture
1685   // the state of the deopt'ing frame's caller.  Thus if we need to
1686   // stuff a C2I adapter we can properly fill in the callee-save
1687   // register locations.
1688   frame caller = fr.sender(reg_map);
1689   int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1690 
1691   frame sender = caller;
1692 
1693   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1694   // the vframeArray containing the unpacking information is allocated in the C heap.
1695   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1696   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
< prev index next >