< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/pcDesc.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/memAllocator.hpp"
  39 #include "interpreter/bytecode.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"


  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/method.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"

  58 #include "oops/typeArrayOop.inline.hpp"
  59 #include "oops/verifyOopClosure.hpp"
  60 #include "prims/jvmtiDeferredUpdates.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "prims/jvmtiThreadState.hpp"
  63 #include "prims/methodHandles.hpp"
  64 #include "prims/vectorSupport.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/continuation.hpp"
  67 #include "runtime/continuationEntry.inline.hpp"
  68 #include "runtime/deoptimization.hpp"
  69 #include "runtime/escapeBarrier.hpp"
  70 #include "runtime/fieldDescriptor.hpp"
  71 #include "runtime/fieldDescriptor.inline.hpp"
  72 #include "runtime/frame.inline.hpp"
  73 #include "runtime/handles.inline.hpp"
  74 #include "runtime/interfaceSupport.inline.hpp"
  75 #include "runtime/javaThread.hpp"
  76 #include "runtime/jniHandles.inline.hpp"
  77 #include "runtime/keepStackGCProcessed.hpp"

 326                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 327                                   bool& deoptimized_objects) {
 328   bool realloc_failures = false;
 329   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 330 
 331   JavaThread* deoptee_thread = chunk->at(0)->thread();
 332   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 333          "a frame can only be deoptimized by the owner thread");
 334 
 335   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 336 
 337   // The flag return_oop() indicates call sites which return oop
 338   // in compiled code. Such sites include java method calls,
 339   // runtime calls (for example, used to allocate new objects/arrays
 340   // on slow code path) and any other calls generated in compiled code.
 341   // It is not guaranteed that we can get such information here only
 342   // by analyzing bytecode in deoptimized frames. This is why this flag
 343   // is set during method compilation (see Compile::Process_OopMap_Node()).
 344   // If the previous frame was popped or if we are dispatching an exception,
 345   // we don't have an oop result.
 346   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 347   Handle return_value;











 348   if (save_oop_result) {
 349     // Reallocation may trigger GC. If deoptimization happened on return from
 350     // call which returns oop we need to save it since it is not in oopmap.
 351     oop result = deoptee.saved_oop_result(&map);
 352     assert(oopDesc::is_oop_or_null(result), "must be oop");
 353     return_value = Handle(thread, result);
 354     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 355     if (TraceDeoptimization) {
 356       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 357       tty->cr();
 358     }
 359   }
 360   if (objects != nullptr) {
 361     if (exec_mode == Deoptimization::Unpack_none) {
 362       assert(thread->thread_state() == _thread_in_vm, "assumption");
 363       JavaThread* THREAD = thread; // For exception macros.
 364       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 365       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));







 366       deoptimized_objects = true;
 367     } else {
 368       JavaThread* current = thread; // For JRT_BLOCK
 369       JRT_BLOCK
 370       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);







 371       JRT_END
 372     }
 373     bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
 374     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 375     if (TraceDeoptimization) {
 376       print_objects(deoptee_thread, objects, realloc_failures);
 377     }
 378   }
 379   if (save_oop_result) {
 380     // Restore result.
 381     deoptee.set_saved_oop_result(&map, return_value());

 382   }
 383   return realloc_failures;
 384 }
 385 
 386 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 387                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 388   JavaThread* deoptee_thread = chunk->at(0)->thread();
 389   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 390   assert(thread == Thread::current(), "should be");
 391   HandleMark hm(thread);
 392 #ifndef PRODUCT
 393   bool first = true;
 394 #endif // !PRODUCT
 395   // Start locking from outermost/oldest frame
 396   for (int i = (chunk->length() - 1); i >= 0; i--) {
 397     compiledVFrame* cvf = chunk->at(i);
 398     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 399     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 400     if (monitors->is_nonempty()) {
 401       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 697   // its caller's stack by. If the caller is a compiled frame then
 698   // we pretend that the callee has no parameters so that the
 699   // extension counts for the full amount of locals and not just
 700   // locals-parms. This is because without a c2i adapter the parm
 701   // area as created by the compiled frame will not be usable by
 702   // the interpreter. (Depending on the calling convention there
 703   // may not even be enough space).
 704 
 705   // QQQ I'd rather see this pushed down into last_frame_adjust
 706   // and have it take the sender (aka caller).
 707 
 708   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 709     caller_adjustment = last_frame_adjust(0, callee_locals);
 710   } else if (callee_locals > callee_parameters) {
 711     // The caller frame may need extending to accommodate
 712     // non-parameter locals of the first unpacked interpreted frame.
 713     // Compute that adjustment.
 714     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 715   }
 716 
 717   // If the sender is deoptimized the we must retrieve the address of the handler
 718   // since the frame will "magically" show the original pc before the deopt
 719   // and we'd undo the deopt.
 720 
 721   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 722   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 723     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 724   }
 725 
 726   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 727 
 728 #if INCLUDE_JVMCI
 729   if (exceptionObject() != nullptr) {
 730     current->set_exception_oop(exceptionObject());
 731     exec_mode = Unpack_exception;
 732   }
 733 #endif
 734 
 735   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 736     assert(current->has_pending_exception(), "should have thrown OOME");
 737     current->set_exception_oop(current->pending_exception());

1197        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1198        default:;
1199      }
1200    }
1201    return nullptr;
1202 }
1203 #endif // INCLUDE_JVMCI
1204 
1205 #if COMPILER2_OR_JVMCI
1206 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1207   Handle pending_exception(THREAD, thread->pending_exception());
1208   const char* exception_file = thread->exception_file();
1209   int exception_line = thread->exception_line();
1210   thread->clear_pending_exception();
1211 
1212   bool failures = false;
1213 
1214   for (int i = 0; i < objects->length(); i++) {
1215     assert(objects->at(i)->is_object(), "invalid debug information");
1216     ObjectValue* sv = (ObjectValue*) objects->at(i);
1217 
1218     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1219     oop obj = nullptr;
1220 











1221     bool cache_init_error = false;
1222     if (k->is_instance_klass()) {
1223 #if INCLUDE_JVMCI
1224       nmethod* nm = fr->cb()->as_nmethod_or_null();
1225       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1226         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1227         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1228         if (obj != nullptr) {
1229           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1230           abv->set_cached(true);
1231         } else if (cache_init_error) {
1232           // Results in an OOME which is valid (as opposed to a class initialization error)
1233           // and is fine for the rare case a cache initialization failing.
1234           failures = true;
1235         }
1236       }
1237 #endif // INCLUDE_JVMCI
1238 
1239       InstanceKlass* ik = InstanceKlass::cast(k);
1240       if (obj == nullptr && !cache_init_error) {
1241         InternalOOMEMark iom(THREAD);
1242         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1243           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1244         } else {
1245           obj = ik->allocate_instance(THREAD);
1246         }
1247       }




1248     } else if (k->is_typeArray_klass()) {
1249       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1250       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1251       int len = sv->field_size() / type2size[ak->element_type()];
1252       InternalOOMEMark iom(THREAD);
1253       obj = ak->allocate(len, THREAD);
1254     } else if (k->is_objArray_klass()) {
1255       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1256       InternalOOMEMark iom(THREAD);
1257       obj = ak->allocate(sv->field_size(), THREAD);
1258     }
1259 
1260     if (obj == nullptr) {
1261       failures = true;
1262     }
1263 
1264     assert(sv->value().is_null(), "redundant reallocation");
1265     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1266     CLEAR_PENDING_EXCEPTION;
1267     sv->set_value(obj);
1268   }
1269 
1270   if (failures) {
1271     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1272   } else if (pending_exception.not_null()) {
1273     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1274   }
1275 
1276   return failures;
1277 }
1278 















1279 #if INCLUDE_JVMCI
1280 /**
1281  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1282  * we need to somehow be able to recover the actual kind to be able to write the correct
1283  * amount of bytes.
1284  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1285  * the entries at index n + 1 to n + i are 'markers'.
1286  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1287  * expected form of the array would be:
1288  *
1289  * {b0, b1, b2, b3, INT, marker, b6, b7}
1290  *
1291  * Thus, in order to get back the size of the entry, we simply need to count the number
1292  * of marked entries
1293  *
1294  * @param virtualArray the virtualized byte array
1295  * @param i index of the virtual entry we are recovering
1296  * @return The number of bytes the entry spans
1297  */
1298 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1424       default:
1425         ShouldNotReachHere();
1426     }
1427     index++;
1428   }
1429 }
1430 
1431 // restore fields of an eliminated object array
1432 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1433   for (int i = 0; i < sv->field_size(); i++) {
1434     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1435     assert(value->type() == T_OBJECT, "object element expected");
1436     obj->obj_at_put(i, value->get_obj()());
1437   }
1438 }
1439 
1440 class ReassignedField {
1441 public:
1442   int _offset;
1443   BasicType _type;


1444 public:
1445   ReassignedField() {
1446     _offset = 0;
1447     _type = T_ILLEGAL;
1448   }
1449 };
1450 
1451 static int compare(ReassignedField* left, ReassignedField* right) {
1452   return left->_offset - right->_offset;
1453 }
1454 
1455 // Restore fields of an eliminated instance object using the same field order
1456 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1457 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1458   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1459   InstanceKlass* ik = klass;
1460   while (ik != nullptr) {
1461     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1462       if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1463         ReassignedField field;
1464         field._offset = fs.offset();
1465         field._type = Signature::basic_type(fs.signature());









1466         fields->append(field);
1467       }
1468     }
1469     ik = ik->superklass();
1470   }
1471   fields->sort(compare);
1472   for (int i = 0; i < fields->length(); i++) {











1473     ScopeValue* scope_field = sv->field_at(svIndex);
1474     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1475     int offset = fields->at(i)._offset;
1476     BasicType type = fields->at(i)._type;
1477     switch (type) {
1478       case T_OBJECT: case T_ARRAY:

1479         assert(value->type() == T_OBJECT, "Agreement.");
1480         obj->obj_field_put(offset, value->get_obj()());
1481         break;
1482 
1483       case T_INT: case T_FLOAT: { // 4 bytes.
1484         assert(value->type() == T_INT, "Agreement.");
1485         bool big_value = false;
1486         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1487           if (scope_field->is_location()) {
1488             Location::Type type = ((LocationValue*) scope_field)->location().type();
1489             if (type == Location::dbl || type == Location::lng) {
1490               big_value = true;
1491             }
1492           }
1493           if (scope_field->is_constant_int()) {
1494             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1495             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1496               big_value = true;
1497             }
1498           }

1532         break;
1533 
1534       case T_BYTE:
1535         assert(value->type() == T_INT, "Agreement.");
1536         obj->byte_field_put(offset, (jbyte)value->get_jint());
1537         break;
1538 
1539       case T_BOOLEAN:
1540         assert(value->type() == T_INT, "Agreement.");
1541         obj->bool_field_put(offset, (jboolean)value->get_jint());
1542         break;
1543 
1544       default:
1545         ShouldNotReachHere();
1546     }
1547     svIndex++;
1548   }
1549   return svIndex;
1550 }
1551 














1552 // restore fields of all eliminated objects and arrays
1553 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1554   for (int i = 0; i < objects->length(); i++) {
1555     assert(objects->at(i)->is_object(), "invalid debug information");
1556     ObjectValue* sv = (ObjectValue*) objects->at(i);
1557     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1558     Handle obj = sv->value();
1559     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1560 #ifndef PRODUCT
1561     if (PrintDeoptimizationDetails) {
1562       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1563     }
1564 #endif // !PRODUCT
1565 
1566     if (obj.is_null()) {
1567       continue;
1568     }
1569 
1570 #if INCLUDE_JVMCI
1571     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1572     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1573       continue;
1574     }
1575 #endif // INCLUDE_JVMCI
1576     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1577       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1578       ScopeValue* payload = sv->field_at(0);
1579       if (payload->is_location() &&
1580           payload->as_LocationValue()->location().type() == Location::vector) {
1581 #ifndef PRODUCT
1582         if (PrintDeoptimizationDetails) {
1583           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1584           if (Verbose) {
1585             Handle obj = sv->value();
1586             k->oop_print_on(obj(), tty);
1587           }
1588         }
1589 #endif // !PRODUCT
1590         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1591       }
1592       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1593       // which could be restored after vector object allocation.
1594     }
1595     if (k->is_instance_klass()) {
1596       InstanceKlass* ik = InstanceKlass::cast(k);
1597       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);



1598     } else if (k->is_typeArray_klass()) {
1599       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1600       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1601     } else if (k->is_objArray_klass()) {
1602       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1603     }
1604   }
1605   // These objects may escape when we return to Interpreter after deoptimization.
1606   // We need barrier so that stores that initialize these objects can't be reordered
1607   // with subsequent stores that make these objects accessible by other threads.
1608   OrderAccess::storestore();
1609 }
1610 
1611 
1612 // relock objects for which synchronization was eliminated
1613 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1614                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1615   bool relocked_objects = false;
1616   for (int i = 0; i < monitors->length(); i++) {
1617     MonitorInfo* mon_info = monitors->at(i);

1752     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1753     nm->log_identity(xtty);
1754     xtty->end_head();
1755     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1756       xtty->begin_elem("jvms bci='%d'", sd->bci());
1757       xtty->method(sd->method());
1758       xtty->end_elem();
1759       if (sd->is_top())  break;
1760     }
1761     xtty->tail("deoptimized");
1762   }
1763 
1764   Continuation::notify_deopt(thread, fr.sp());
1765 
1766   // Patch the compiled method so that when execution returns to it we will
1767   // deopt the execution state and return to the interpreter.
1768   fr.deoptimize(thread);
1769 }
1770 
1771 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1772   // Deoptimize only if the frame comes from compile code.
1773   // Do not deoptimize the frame which is already patched
1774   // during the execution of the loops below.
1775   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1776     return;
1777   }
1778   ResourceMark rm;
1779   deoptimize_single_frame(thread, fr, reason);
1780 }
1781 
1782 #if INCLUDE_JVMCI
1783 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1784   // there is no exception handler for this pc => deoptimize
1785   nm->make_not_entrant();
1786 
1787   // Use Deoptimization::deoptimize for all of its side-effects:
1788   // gathering traps statistics, logging...
1789   // it also patches the return pc but we do not care about that
1790   // since we return a continuation to the deopt_blob below.
1791   JavaThread* thread = JavaThread::current();
1792   RegisterMap reg_map(thread,

  33 #include "code/pcDesc.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "compiler/compilationPolicy.hpp"
  36 #include "compiler/compilerDefinitions.inline.hpp"
  37 #include "gc/shared/collectedHeap.hpp"
  38 #include "gc/shared/memAllocator.hpp"
  39 #include "interpreter/bytecode.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/fieldStreams.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/inlineKlass.inline.hpp"
  61 #include "oops/typeArrayOop.inline.hpp"
  62 #include "oops/verifyOopClosure.hpp"
  63 #include "prims/jvmtiDeferredUpdates.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "prims/jvmtiThreadState.hpp"
  66 #include "prims/methodHandles.hpp"
  67 #include "prims/vectorSupport.hpp"
  68 #include "runtime/atomic.hpp"
  69 #include "runtime/continuation.hpp"
  70 #include "runtime/continuationEntry.inline.hpp"
  71 #include "runtime/deoptimization.hpp"
  72 #include "runtime/escapeBarrier.hpp"
  73 #include "runtime/fieldDescriptor.hpp"
  74 #include "runtime/fieldDescriptor.inline.hpp"
  75 #include "runtime/frame.inline.hpp"
  76 #include "runtime/handles.inline.hpp"
  77 #include "runtime/interfaceSupport.inline.hpp"
  78 #include "runtime/javaThread.hpp"
  79 #include "runtime/jniHandles.inline.hpp"
  80 #include "runtime/keepStackGCProcessed.hpp"

 329                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 330                                   bool& deoptimized_objects) {
 331   bool realloc_failures = false;
 332   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 333 
 334   JavaThread* deoptee_thread = chunk->at(0)->thread();
 335   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 336          "a frame can only be deoptimized by the owner thread");
 337 
 338   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 339 
 340   // The flag return_oop() indicates call sites which return oop
 341   // in compiled code. Such sites include java method calls,
 342   // runtime calls (for example, used to allocate new objects/arrays
 343   // on slow code path) and any other calls generated in compiled code.
 344   // It is not guaranteed that we can get such information here only
 345   // by analyzing bytecode in deoptimized frames. This is why this flag
 346   // is set during method compilation (see Compile::Process_OopMap_Node()).
 347   // If the previous frame was popped or if we are dispatching an exception,
 348   // we don't have an oop result.
 349   ScopeDesc* scope = chunk->at(0)->scope();
 350   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 351   // In case of the return of multiple values, we must take care
 352   // of all oop return values.
 353   GrowableArray<Handle> return_oops;
 354   InlineKlass* vk = nullptr;
 355   if (save_oop_result && scope->return_scalarized()) {
 356     vk = InlineKlass::returned_inline_klass(map);
 357     if (vk != nullptr) {
 358       vk->save_oop_fields(map, return_oops);
 359       save_oop_result = false;
 360     }
 361   }
 362   if (save_oop_result) {
 363     // Reallocation may trigger GC. If deoptimization happened on return from
 364     // call which returns oop we need to save it since it is not in oopmap.
 365     oop result = deoptee.saved_oop_result(&map);
 366     assert(oopDesc::is_oop_or_null(result), "must be oop");
 367     return_oops.push(Handle(thread, result));
 368     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 369     if (TraceDeoptimization) {
 370       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 371       tty->cr();
 372     }
 373   }
 374   if (objects != nullptr || vk != nullptr) {
 375     if (exec_mode == Deoptimization::Unpack_none) {
 376       assert(thread->thread_state() == _thread_in_vm, "assumption");
 377       JavaThread* THREAD = thread; // For exception macros.
 378       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 379       if (vk != nullptr) {
 380         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 381       }
 382       if (objects != nullptr) {
 383         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 384         bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
 385         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
 386       }
 387       deoptimized_objects = true;
 388     } else {
 389       JavaThread* current = thread; // For JRT_BLOCK
 390       JRT_BLOCK
 391       if (vk != nullptr) {
 392         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 393       }
 394       if (objects != nullptr) {
 395         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 396         bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
 397         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
 398       }
 399       JRT_END
 400     }
 401     if (TraceDeoptimization && objects != nullptr) {


 402       print_objects(deoptee_thread, objects, realloc_failures);
 403     }
 404   }
 405   if (save_oop_result || vk != nullptr) {
 406     // Restore result.
 407     assert(return_oops.length() == 1, "no inline type");
 408     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 409   }
 410   return realloc_failures;
 411 }
 412 
 413 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 414                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 415   JavaThread* deoptee_thread = chunk->at(0)->thread();
 416   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 417   assert(thread == Thread::current(), "should be");
 418   HandleMark hm(thread);
 419 #ifndef PRODUCT
 420   bool first = true;
 421 #endif // !PRODUCT
 422   // Start locking from outermost/oldest frame
 423   for (int i = (chunk->length() - 1); i >= 0; i--) {
 424     compiledVFrame* cvf = chunk->at(i);
 425     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 426     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 427     if (monitors->is_nonempty()) {
 428       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 724   // its caller's stack by. If the caller is a compiled frame then
 725   // we pretend that the callee has no parameters so that the
 726   // extension counts for the full amount of locals and not just
 727   // locals-parms. This is because without a c2i adapter the parm
 728   // area as created by the compiled frame will not be usable by
 729   // the interpreter. (Depending on the calling convention there
 730   // may not even be enough space).
 731 
 732   // QQQ I'd rather see this pushed down into last_frame_adjust
 733   // and have it take the sender (aka caller).
 734 
 735   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 736     caller_adjustment = last_frame_adjust(0, callee_locals);
 737   } else if (callee_locals > callee_parameters) {
 738     // The caller frame may need extending to accommodate
 739     // non-parameter locals of the first unpacked interpreted frame.
 740     // Compute that adjustment.
 741     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 742   }
 743 
 744   // If the sender is deoptimized we must retrieve the address of the handler
 745   // since the frame will "magically" show the original pc before the deopt
 746   // and we'd undo the deopt.
 747 
 748   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 749   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 750     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 751   }
 752 
 753   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 754 
 755 #if INCLUDE_JVMCI
 756   if (exceptionObject() != nullptr) {
 757     current->set_exception_oop(exceptionObject());
 758     exec_mode = Unpack_exception;
 759   }
 760 #endif
 761 
 762   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 763     assert(current->has_pending_exception(), "should have thrown OOME");
 764     current->set_exception_oop(current->pending_exception());

1224        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1225        default:;
1226      }
1227    }
1228    return nullptr;
1229 }
1230 #endif // INCLUDE_JVMCI
1231 
1232 #if COMPILER2_OR_JVMCI
1233 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1234   Handle pending_exception(THREAD, thread->pending_exception());
1235   const char* exception_file = thread->exception_file();
1236   int exception_line = thread->exception_line();
1237   thread->clear_pending_exception();
1238 
1239   bool failures = false;
1240 
1241   for (int i = 0; i < objects->length(); i++) {
1242     assert(objects->at(i)->is_object(), "invalid debug information");
1243     ObjectValue* sv = (ObjectValue*) objects->at(i);

1244     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1245 
1246     // Check if the object may be null and has an additional is_init input that needs
1247     // to be checked before using the field values. Skip re-allocation if it is null.
1248     if (sv->maybe_null()) {
1249       assert(k->is_inline_klass(), "must be an inline klass");
1250       jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
1251       if (is_init == 0) {
1252         continue;
1253       }
1254     }
1255 
1256     oop obj = nullptr;
1257     bool cache_init_error = false;
1258     if (k->is_instance_klass()) {
1259 #if INCLUDE_JVMCI
1260       nmethod* nm = fr->cb()->as_nmethod_or_null();
1261       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1262         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1263         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1264         if (obj != nullptr) {
1265           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1266           abv->set_cached(true);
1267         } else if (cache_init_error) {
1268           // Results in an OOME which is valid (as opposed to a class initialization error)
1269           // and is fine for the rare case a cache initialization failing.
1270           failures = true;
1271         }
1272       }
1273 #endif // INCLUDE_JVMCI
1274 
1275       InstanceKlass* ik = InstanceKlass::cast(k);
1276       if (obj == nullptr && !cache_init_error) {
1277         InternalOOMEMark iom(THREAD);
1278         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1279           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1280         } else {
1281           obj = ik->allocate_instance(THREAD);
1282         }
1283       }
1284     } else if (k->is_flatArray_klass()) {
1285       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1286       // Inline type array must be zeroed because not all memory is reassigned
1287       obj = ak->allocate(sv->field_size(), THREAD);
1288     } else if (k->is_typeArray_klass()) {
1289       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1290       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1291       int len = sv->field_size() / type2size[ak->element_type()];
1292       InternalOOMEMark iom(THREAD);
1293       obj = ak->allocate(len, THREAD);
1294     } else if (k->is_objArray_klass()) {
1295       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1296       InternalOOMEMark iom(THREAD);
1297       obj = ak->allocate(sv->field_size(), THREAD);
1298     }
1299 
1300     if (obj == nullptr) {
1301       failures = true;
1302     }
1303 
1304     assert(sv->value().is_null(), "redundant reallocation");
1305     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1306     CLEAR_PENDING_EXCEPTION;
1307     sv->set_value(obj);
1308   }
1309 
1310   if (failures) {
1311     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1312   } else if (pending_exception.not_null()) {
1313     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1314   }
1315 
1316   return failures;
1317 }
1318 
1319 // We're deoptimizing at the return of a call, inline type fields are
1320 // in registers. When we go back to the interpreter, it will expect a
1321 // reference to an inline type instance. Allocate and initialize it from
1322 // the register values here.
1323 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1324   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1325   if (new_vt == nullptr) {
1326     CLEAR_PENDING_EXCEPTION;
1327     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1328   }
1329   return_oops.clear();
1330   return_oops.push(Handle(THREAD, new_vt));
1331   return false;
1332 }
1333 
1334 #if INCLUDE_JVMCI
1335 /**
1336  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1337  * we need to somehow be able to recover the actual kind to be able to write the correct
1338  * amount of bytes.
1339  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1340  * the entries at index n + 1 to n + i are 'markers'.
1341  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1342  * expected form of the array would be:
1343  *
1344  * {b0, b1, b2, b3, INT, marker, b6, b7}
1345  *
1346  * Thus, in order to get back the size of the entry, we simply need to count the number
1347  * of marked entries
1348  *
1349  * @param virtualArray the virtualized byte array
1350  * @param i index of the virtual entry we are recovering
1351  * @return The number of bytes the entry spans
1352  */
1353 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1479       default:
1480         ShouldNotReachHere();
1481     }
1482     index++;
1483   }
1484 }
1485 
1486 // restore fields of an eliminated object array
1487 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1488   for (int i = 0; i < sv->field_size(); i++) {
1489     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1490     assert(value->type() == T_OBJECT, "object element expected");
1491     obj->obj_at_put(i, value->get_obj()());
1492   }
1493 }
1494 
1495 class ReassignedField {
1496 public:
1497   int _offset;
1498   BasicType _type;
1499   InstanceKlass* _klass;
1500   bool _is_flat;
1501 public:
1502   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false) { }



1503 };
1504 
1505 static int compare(ReassignedField* left, ReassignedField* right) {
1506   return left->_offset - right->_offset;
1507 }
1508 
1509 // Restore fields of an eliminated instance object using the same field order
1510 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1511 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) {
1512   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1513   InstanceKlass* ik = klass;
1514   while (ik != nullptr) {
1515     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1516       if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1517         ReassignedField field;
1518         field._offset = fs.offset();
1519         field._type = Signature::basic_type(fs.signature());
1520         if (fs.is_null_free_inline_type()) {
1521           if (fs.is_flat()) {
1522             field._is_flat = true;
1523             // Resolve klass of flat inline type field
1524             field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1525           } else {
1526             field._type = T_OBJECT;  // Can be removed once Q-descriptors have been removed.
1527           }
1528         }
1529         fields->append(field);
1530       }
1531     }
1532     ik = ik->superklass();
1533   }
1534   fields->sort(compare);
1535   for (int i = 0; i < fields->length(); i++) {
1536     BasicType type = fields->at(i)._type;
1537     int offset = base_offset + fields->at(i)._offset;
1538     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1539     if (fields->at(i)._is_flat) {
1540       // Recursively re-assign flat inline type fields
1541       InstanceKlass* vk = fields->at(i)._klass;
1542       assert(vk != nullptr, "must be resolved");
1543       offset -= InlineKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header
1544       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0);
1545       continue; // Continue because we don't need to increment svIndex
1546     }
1547     ScopeValue* scope_field = sv->field_at(svIndex);
1548     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1549     switch (type) {
1550       case T_OBJECT:
1551       case T_ARRAY:
1552         assert(value->type() == T_OBJECT, "Agreement.");
1553         obj->obj_field_put(offset, value->get_obj()());
1554         break;
1555 
1556       case T_INT: case T_FLOAT: { // 4 bytes.
1557         assert(value->type() == T_INT, "Agreement.");
1558         bool big_value = false;
1559         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1560           if (scope_field->is_location()) {
1561             Location::Type type = ((LocationValue*) scope_field)->location().type();
1562             if (type == Location::dbl || type == Location::lng) {
1563               big_value = true;
1564             }
1565           }
1566           if (scope_field->is_constant_int()) {
1567             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1568             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1569               big_value = true;
1570             }
1571           }

1605         break;
1606 
1607       case T_BYTE:
1608         assert(value->type() == T_INT, "Agreement.");
1609         obj->byte_field_put(offset, (jbyte)value->get_jint());
1610         break;
1611 
1612       case T_BOOLEAN:
1613         assert(value->type() == T_INT, "Agreement.");
1614         obj->bool_field_put(offset, (jboolean)value->get_jint());
1615         break;
1616 
1617       default:
1618         ShouldNotReachHere();
1619     }
1620     svIndex++;
1621   }
1622   return svIndex;
1623 }
1624 
1625 // restore fields of an eliminated inline type array
1626 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1627   InlineKlass* vk = vak->element_klass();
1628   assert(vk->flat_array(), "should only be used for flat inline type arrays");
1629   // Adjust offset to omit oop header
1630   int base_offset = arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT) - InlineKlass::cast(vk)->first_field_offset();
1631   // Initialize all elements of the flat inline type array
1632   for (int i = 0; i < sv->field_size(); i++) {
1633     ScopeValue* val = sv->field_at(i);
1634     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1635     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, CHECK);
1636   }
1637 }
1638 
1639 // restore fields of all eliminated objects and arrays
1640 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1641   for (int i = 0; i < objects->length(); i++) {
1642     assert(objects->at(i)->is_object(), "invalid debug information");
1643     ObjectValue* sv = (ObjectValue*) objects->at(i);
1644     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1645     Handle obj = sv->value();
1646     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1647 #ifndef PRODUCT
1648     if (PrintDeoptimizationDetails) {
1649       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1650     }
1651 #endif // !PRODUCT
1652 
1653     if (obj.is_null()) {
1654       continue;
1655     }
1656 
1657 #if INCLUDE_JVMCI
1658     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1659     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1660       continue;
1661     }
1662 #endif // INCLUDE_JVMCI
1663     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1664       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1665       ScopeValue* payload = sv->field_at(0);
1666       if (payload->is_location() &&
1667           payload->as_LocationValue()->location().type() == Location::vector) {
1668 #ifndef PRODUCT
1669         if (PrintDeoptimizationDetails) {
1670           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1671           if (Verbose) {
1672             Handle obj = sv->value();
1673             k->oop_print_on(obj(), tty);
1674           }
1675         }
1676 #endif // !PRODUCT
1677         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1678       }
1679       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1680       // which could be restored after vector object allocation.
1681     }
1682     if (k->is_instance_klass()) {
1683       InstanceKlass* ik = InstanceKlass::cast(k);
1684       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK);
1685     } else if (k->is_flatArray_klass()) {
1686       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1687       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1688     } else if (k->is_typeArray_klass()) {
1689       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1690       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1691     } else if (k->is_objArray_klass()) {
1692       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1693     }
1694   }
1695   // These objects may escape when we return to Interpreter after deoptimization.
1696   // We need barrier so that stores that initialize these objects can't be reordered
1697   // with subsequent stores that make these objects accessible by other threads.
1698   OrderAccess::storestore();
1699 }
1700 
1701 
1702 // relock objects for which synchronization was eliminated
1703 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1704                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1705   bool relocked_objects = false;
1706   for (int i = 0; i < monitors->length(); i++) {
1707     MonitorInfo* mon_info = monitors->at(i);

1842     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1843     nm->log_identity(xtty);
1844     xtty->end_head();
1845     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1846       xtty->begin_elem("jvms bci='%d'", sd->bci());
1847       xtty->method(sd->method());
1848       xtty->end_elem();
1849       if (sd->is_top())  break;
1850     }
1851     xtty->tail("deoptimized");
1852   }
1853 
1854   Continuation::notify_deopt(thread, fr.sp());
1855 
1856   // Patch the compiled method so that when execution returns to it we will
1857   // deopt the execution state and return to the interpreter.
1858   fr.deoptimize(thread);
1859 }
1860 
1861 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1862   // Deoptimize only if the frame comes from compiled code.
1863   // Do not deoptimize the frame which is already patched
1864   // during the execution of the loops below.
1865   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1866     return;
1867   }
1868   ResourceMark rm;
1869   deoptimize_single_frame(thread, fr, reason);
1870 }
1871 
1872 #if INCLUDE_JVMCI
1873 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1874   // there is no exception handler for this pc => deoptimize
1875   nm->make_not_entrant();
1876 
1877   // Use Deoptimization::deoptimize for all of its side-effects:
1878   // gathering traps statistics, logging...
1879   // it also patches the return pc but we do not care about that
1880   // since we return a continuation to the deopt_blob below.
1881   JavaThread* thread = JavaThread::current();
1882   RegisterMap reg_map(thread,
< prev index next >