< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

1598       InstanceKlass* ik = InstanceKlass::cast(k);
1599       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1600     } else if (k->is_typeArray_klass()) {
1601       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1602       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1603     } else if (k->is_objArray_klass()) {
1604       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1605     }
1606   }
1607   // These objects may escape when we return to Interpreter after deoptimization.
1608   // We need barrier so that stores that initialize these objects can't be reordered
1609   // with subsequent stores that make these objects accessible by other threads.
1610   OrderAccess::storestore();
1611 }
1612 
1613 
1614 // relock objects for which synchronization was eliminated
1615 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1616                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1617   bool relocked_objects = false;

1618   for (int i = 0; i < monitors->length(); i++) {
1619     MonitorInfo* mon_info = monitors->at(i);
1620     if (mon_info->eliminated()) {
1621       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1622       relocked_objects = true;
1623       if (!mon_info->owner_is_scalar_replaced()) {
1624         Handle obj(thread, mon_info->owner());
1625         markWord mark = obj->mark();
1626         if (exec_mode == Unpack_none) {
1627           if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1628             // With exec_mode == Unpack_none obj may be thread local and locked in
1629             // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1630             markWord dmw = mark.displaced_mark_helper();
1631             mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1632             obj->set_mark(dmw);

1633           }
1634           if (mark.has_monitor()) {
1635             // defer relocking if the deoptee thread is currently waiting for obj
1636             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1637             if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1638               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1639               mon_info->lock()->set_displaced_header(markWord::unused_mark());
1640               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1641               continue;
1642             }
1643           }
1644         }
1645         if (LockingMode == LM_LIGHTWEIGHT && exec_mode == Unpack_none) {
1646           // We have lost information about the correct state of the lock stack.
1647           // Inflate the locks instead. Enter then inflate to avoid races with
1648           // deflation.
1649           ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1650           assert(mon_info->owner()->is_locked(), "object must be locked now");
1651           ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1652           assert(mon->owner() == deoptee_thread, "must be");
1653         } else {
1654           BasicLock* lock = mon_info->lock();
1655           ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1656           assert(mon_info->owner()->is_locked(), "object must be locked now");
1657         }
1658       }
1659     }
1660   }

1661   return relocked_objects;
1662 }
1663 #endif // COMPILER2_OR_JVMCI
1664 
1665 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1666   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1667 
1668   // Register map for next frame (used for stack crawl).  We capture
1669   // the state of the deopt'ing frame's caller.  Thus if we need to
1670   // stuff a C2I adapter we can properly fill in the callee-save
1671   // register locations.
1672   frame caller = fr.sender(reg_map);
1673   int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1674 
1675   frame sender = caller;
1676 
1677   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1678   // the vframeArray containing the unpacking information is allocated in the C heap.
1679   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1680   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);

1598       InstanceKlass* ik = InstanceKlass::cast(k);
1599       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1600     } else if (k->is_typeArray_klass()) {
1601       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1602       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1603     } else if (k->is_objArray_klass()) {
1604       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1605     }
1606   }
1607   // These objects may escape when we return to Interpreter after deoptimization.
1608   // We need barrier so that stores that initialize these objects can't be reordered
1609   // with subsequent stores that make these objects accessible by other threads.
1610   OrderAccess::storestore();
1611 }
1612 
1613 
1614 // relock objects for which synchronization was eliminated
1615 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1616                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1617   bool relocked_objects = false;
1618   LOOM_MONITOR_SUPPORT_ONLY(int compensate_extra_increment = 0;)
1619   for (int i = 0; i < monitors->length(); i++) {
1620     MonitorInfo* mon_info = monitors->at(i);
1621     if (mon_info->eliminated()) {
1622       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1623       relocked_objects = true;
1624       if (!mon_info->owner_is_scalar_replaced()) {
1625         Handle obj(thread, mon_info->owner());
1626         markWord mark = obj->mark();
1627         if (exec_mode == Unpack_none) {
1628           if (LockingMode == LM_LEGACY && mark.has_locker() && fr.sp() > (intptr_t*)mark.locker()) {
1629             // With exec_mode == Unpack_none obj may be thread local and locked in
1630             // a callee frame. Make the lock in the callee a recursive lock and restore the displaced header.
1631             markWord dmw = mark.displaced_mark_helper();
1632             mark.locker()->set_displaced_header(markWord::encode((BasicLock*) nullptr));
1633             obj->set_mark(dmw);
1634             LOOM_MONITOR_SUPPORT_ONLY(compensate_extra_increment++;)
1635           }
1636           if (mark.has_monitor()) {
1637             // defer relocking if the deoptee thread is currently waiting for obj
1638             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1639             if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1640               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1641               mon_info->lock()->set_displaced_header(markWord::unused_mark());
1642               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1643               continue;
1644             }
1645           }
1646         }
1647         if (LockingMode == LM_LIGHTWEIGHT && exec_mode == Unpack_none) {
1648           // We have lost information about the correct state of the lock stack.
1649           // Inflate the locks instead. Enter then inflate to avoid races with
1650           // deflation.
1651           ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
1652           assert(mon_info->owner()->is_locked(), "object must be locked now");
1653           ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
1654           assert(mon->is_owner(deoptee_thread), "must be");
1655         } else {
1656           BasicLock* lock = mon_info->lock();
1657           ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1658           assert(mon_info->owner()->is_locked(), "object must be locked now");
1659         }
1660       }
1661     }
1662   }
1663   LOOM_MONITOR_SUPPORT_ONLY(deoptee_thread->dec_held_monitor_count(compensate_extra_increment);)
1664   return relocked_objects;
1665 }
1666 #endif // COMPILER2_OR_JVMCI
1667 
1668 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1669   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1670 
1671   // Register map for next frame (used for stack crawl).  We capture
1672   // the state of the deopt'ing frame's caller.  Thus if we need to
1673   // stuff a C2I adapter we can properly fill in the callee-save
1674   // register locations.
1675   frame caller = fr.sender(reg_map);
1676   int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1677 
1678   frame sender = caller;
1679 
1680   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1681   // the vframeArray containing the unpacking information is allocated in the C heap.
1682   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1683   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
< prev index next >