33 #include "code/scopeDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/bytecode.inline.hpp"
40 #include "interpreter/bytecodeStream.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/oopMapCache.hpp"
43 #include "jvm.h"
44 #include "logging/log.hpp"
45 #include "logging/logLevel.hpp"
46 #include "logging/logMessage.hpp"
47 #include "logging/logStream.hpp"
48 #include "memory/allocation.inline.hpp"
49 #include "memory/oopFactory.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/constantPool.hpp"
53 #include "oops/fieldStreams.inline.hpp"
54 #include "oops/method.hpp"
55 #include "oops/objArrayKlass.hpp"
56 #include "oops/objArrayOop.inline.hpp"
57 #include "oops/oop.inline.hpp"
58 #include "oops/typeArrayOop.inline.hpp"
59 #include "oops/verifyOopClosure.hpp"
60 #include "prims/jvmtiDeferredUpdates.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "prims/jvmtiThreadState.hpp"
63 #include "prims/methodHandles.hpp"
64 #include "prims/vectorSupport.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/basicLock.inline.hpp"
67 #include "runtime/continuation.hpp"
68 #include "runtime/continuationEntry.inline.hpp"
69 #include "runtime/deoptimization.hpp"
70 #include "runtime/escapeBarrier.hpp"
71 #include "runtime/fieldDescriptor.hpp"
72 #include "runtime/fieldDescriptor.inline.hpp"
73 #include "runtime/frame.inline.hpp"
74 #include "runtime/handles.inline.hpp"
75 #include "runtime/interfaceSupport.inline.hpp"
76 #include "runtime/javaThread.hpp"
77 #include "runtime/jniHandles.inline.hpp"
333 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
334 bool& deoptimized_objects) {
335 bool realloc_failures = false;
336 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
337
338 JavaThread* deoptee_thread = chunk->at(0)->thread();
339 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
340 "a frame can only be deoptimized by the owner thread");
341
342 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
343
344 // The flag return_oop() indicates call sites which return oop
345 // in compiled code. Such sites include java method calls,
346 // runtime calls (for example, used to allocate new objects/arrays
347 // on slow code path) and any other calls generated in compiled code.
348 // It is not guaranteed that we can get such information here only
349 // by analyzing bytecode in deoptimized frames. This is why this flag
350 // is set during method compilation (see Compile::Process_OopMap_Node()).
351 // If the previous frame was popped or if we are dispatching an exception,
352 // we don't have an oop result.
353 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
354 Handle return_value;
355 if (save_oop_result) {
356 // Reallocation may trigger GC. If deoptimization happened on return from
357 // call which returns oop we need to save it since it is not in oopmap.
358 oop result = deoptee.saved_oop_result(&map);
359 assert(oopDesc::is_oop_or_null(result), "must be oop");
360 return_value = Handle(thread, result);
361 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
362 if (TraceDeoptimization) {
363 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
364 tty->cr();
365 }
366 }
367 if (objects != nullptr) {
368 if (exec_mode == Deoptimization::Unpack_none) {
369 assert(thread->thread_state() == _thread_in_vm, "assumption");
370 JavaThread* THREAD = thread; // For exception macros.
371 // Clear pending OOM if reallocation fails and return true indicating allocation failure
372 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
373 deoptimized_objects = true;
374 } else {
375 JavaThread* current = thread; // For JRT_BLOCK
376 JRT_BLOCK
377 realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
378 JRT_END
379 }
380 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
381 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
382 if (TraceDeoptimization) {
383 print_objects(deoptee_thread, objects, realloc_failures);
384 }
385 }
386 if (save_oop_result) {
387 // Restore result.
388 deoptee.set_saved_oop_result(&map, return_value());
389 }
390 return realloc_failures;
391 }
392
393 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
394 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
395 JavaThread* deoptee_thread = chunk->at(0)->thread();
396 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
397 assert(thread == Thread::current(), "should be");
398 HandleMark hm(thread);
399 #ifndef PRODUCT
400 bool first = true;
401 #endif // !PRODUCT
402 // Start locking from outermost/oldest frame
403 for (int i = (chunk->length() - 1); i >= 0; i--) {
404 compiledVFrame* cvf = chunk->at(i);
405 assert (cvf->scope() != nullptr,"expect only compiled java frames");
406 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
407 if (monitors->is_nonempty()) {
408 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
702 // its caller's stack by. If the caller is a compiled frame then
703 // we pretend that the callee has no parameters so that the
704 // extension counts for the full amount of locals and not just
705 // locals-parms. This is because without a c2i adapter the parm
706 // area as created by the compiled frame will not be usable by
707 // the interpreter. (Depending on the calling convention there
708 // may not even be enough space).
709
710 // QQQ I'd rather see this pushed down into last_frame_adjust
711 // and have it take the sender (aka caller).
712
713 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
714 caller_adjustment = last_frame_adjust(0, callee_locals);
715 } else if (callee_locals > callee_parameters) {
716 // The caller frame may need extending to accommodate
717 // non-parameter locals of the first unpacked interpreted frame.
718 // Compute that adjustment.
719 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
720 }
721
722 // If the sender is deoptimized the we must retrieve the address of the handler
723 // since the frame will "magically" show the original pc before the deopt
724 // and we'd undo the deopt.
725
726 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
727 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
728 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
729 }
730
731 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
732
733 #if INCLUDE_JVMCI
734 if (exceptionObject() != nullptr) {
735 current->set_exception_oop(exceptionObject());
736 exec_mode = Unpack_exception;
737 }
738 #endif
739
740 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
741 assert(current->has_pending_exception(), "should have thrown OOME");
742 current->set_exception_oop(current->pending_exception());
1207 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1208 default:;
1209 }
1210 }
1211 return nullptr;
1212 }
1213 #endif // INCLUDE_JVMCI
1214
1215 #if COMPILER2_OR_JVMCI
1216 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1217 Handle pending_exception(THREAD, thread->pending_exception());
1218 const char* exception_file = thread->exception_file();
1219 int exception_line = thread->exception_line();
1220 thread->clear_pending_exception();
1221
1222 bool failures = false;
1223
1224 for (int i = 0; i < objects->length(); i++) {
1225 assert(objects->at(i)->is_object(), "invalid debug information");
1226 ObjectValue* sv = (ObjectValue*) objects->at(i);
1227
1228 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1229 oop obj = nullptr;
1230
1231 bool cache_init_error = false;
1232 if (k->is_instance_klass()) {
1233 #if INCLUDE_JVMCI
1234 nmethod* nm = fr->cb()->as_nmethod_or_null();
1235 if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1236 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1237 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1238 if (obj != nullptr) {
1239 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1240 abv->set_cached(true);
1241 } else if (cache_init_error) {
1242 // Results in an OOME which is valid (as opposed to a class initialization error)
1243 // and is fine for the rare case a cache initialization failing.
1244 failures = true;
1245 }
1246 }
1247 #endif // INCLUDE_JVMCI
1248
1249 InstanceKlass* ik = InstanceKlass::cast(k);
1250 if (obj == nullptr && !cache_init_error) {
1251 InternalOOMEMark iom(THREAD);
1252 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1253 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1254 } else {
1255 obj = ik->allocate_instance(THREAD);
1256 }
1257 }
1258 } else if (k->is_typeArray_klass()) {
1259 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1260 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1261 int len = sv->field_size() / type2size[ak->element_type()];
1262 InternalOOMEMark iom(THREAD);
1263 obj = ak->allocate(len, THREAD);
1264 } else if (k->is_objArray_klass()) {
1265 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1266 InternalOOMEMark iom(THREAD);
1267 obj = ak->allocate(sv->field_size(), THREAD);
1268 }
1269
1270 if (obj == nullptr) {
1271 failures = true;
1272 }
1273
1274 assert(sv->value().is_null(), "redundant reallocation");
1275 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1276 CLEAR_PENDING_EXCEPTION;
1277 sv->set_value(obj);
1278 }
1279
1280 if (failures) {
1281 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1282 } else if (pending_exception.not_null()) {
1283 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1284 }
1285
1286 return failures;
1287 }
1288
1289 #if INCLUDE_JVMCI
1290 /**
1291 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1292 * we need to somehow be able to recover the actual kind to be able to write the correct
1293 * amount of bytes.
1294 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1295 * the entries at index n + 1 to n + i are 'markers'.
1296 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1297 * expected form of the array would be:
1298 *
1299 * {b0, b1, b2, b3, INT, marker, b6, b7}
1300 *
1301 * Thus, in order to get back the size of the entry, we simply need to count the number
1302 * of marked entries
1303 *
1304 * @param virtualArray the virtualized byte array
1305 * @param i index of the virtual entry we are recovering
1306 * @return The number of bytes the entry spans
1307 */
1308 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1434 default:
1435 ShouldNotReachHere();
1436 }
1437 index++;
1438 }
1439 }
1440
1441 // restore fields of an eliminated object array
1442 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1443 for (int i = 0; i < sv->field_size(); i++) {
1444 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1445 assert(value->type() == T_OBJECT, "object element expected");
1446 obj->obj_at_put(i, value->get_obj()());
1447 }
1448 }
1449
1450 class ReassignedField {
1451 public:
1452 int _offset;
1453 BasicType _type;
1454 public:
1455 ReassignedField() {
1456 _offset = 0;
1457 _type = T_ILLEGAL;
1458 }
1459 };
1460
1461 static int compare(ReassignedField* left, ReassignedField* right) {
1462 return left->_offset - right->_offset;
1463 }
1464
1465 // Restore fields of an eliminated instance object using the same field order
1466 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1467 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1468 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1469 InstanceKlass* ik = klass;
1470 while (ik != nullptr) {
1471 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1472 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1473 ReassignedField field;
1474 field._offset = fs.offset();
1475 field._type = Signature::basic_type(fs.signature());
1476 fields->append(field);
1477 }
1478 }
1479 ik = ik->superklass();
1480 }
1481 fields->sort(compare);
1482 for (int i = 0; i < fields->length(); i++) {
1483 ScopeValue* scope_field = sv->field_at(svIndex);
1484 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1485 int offset = fields->at(i)._offset;
1486 BasicType type = fields->at(i)._type;
1487 switch (type) {
1488 case T_OBJECT: case T_ARRAY:
1489 assert(value->type() == T_OBJECT, "Agreement.");
1490 obj->obj_field_put(offset, value->get_obj()());
1491 break;
1492
1493 case T_INT: case T_FLOAT: { // 4 bytes.
1494 assert(value->type() == T_INT, "Agreement.");
1495 bool big_value = false;
1496 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1497 if (scope_field->is_location()) {
1498 Location::Type type = ((LocationValue*) scope_field)->location().type();
1499 if (type == Location::dbl || type == Location::lng) {
1500 big_value = true;
1501 }
1502 }
1503 if (scope_field->is_constant_int()) {
1504 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1505 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1506 big_value = true;
1507 }
1508 }
1539 case T_CHAR:
1540 assert(value->type() == T_INT, "Agreement.");
1541 obj->char_field_put(offset, (jchar)value->get_jint());
1542 break;
1543
1544 case T_BYTE:
1545 assert(value->type() == T_INT, "Agreement.");
1546 obj->byte_field_put(offset, (jbyte)value->get_jint());
1547 break;
1548
1549 case T_BOOLEAN:
1550 assert(value->type() == T_INT, "Agreement.");
1551 obj->bool_field_put(offset, (jboolean)value->get_jint());
1552 break;
1553
1554 default:
1555 ShouldNotReachHere();
1556 }
1557 svIndex++;
1558 }
1559 return svIndex;
1560 }
1561
1562 // restore fields of all eliminated objects and arrays
1563 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1564 for (int i = 0; i < objects->length(); i++) {
1565 assert(objects->at(i)->is_object(), "invalid debug information");
1566 ObjectValue* sv = (ObjectValue*) objects->at(i);
1567 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1568 Handle obj = sv->value();
1569 assert(obj.not_null() || realloc_failures, "reallocation was missed");
1570 #ifndef PRODUCT
1571 if (PrintDeoptimizationDetails) {
1572 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1573 }
1574 #endif // !PRODUCT
1575
1576 if (obj.is_null()) {
1577 continue;
1578 }
1579
1580 #if INCLUDE_JVMCI
1581 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1582 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1583 continue;
1584 }
1585 #endif // INCLUDE_JVMCI
1586 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1587 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1588 ScopeValue* payload = sv->field_at(0);
1589 if (payload->is_location() &&
1590 payload->as_LocationValue()->location().type() == Location::vector) {
1591 #ifndef PRODUCT
1592 if (PrintDeoptimizationDetails) {
1593 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1594 if (Verbose) {
1595 Handle obj = sv->value();
1596 k->oop_print_on(obj(), tty);
1597 }
1598 }
1599 #endif // !PRODUCT
1600 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1601 }
1602 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1603 // which could be restored after vector object allocation.
1604 }
1605 if (k->is_instance_klass()) {
1606 InstanceKlass* ik = InstanceKlass::cast(k);
1607 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1608 } else if (k->is_typeArray_klass()) {
1609 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1610 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1611 } else if (k->is_objArray_klass()) {
1612 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1613 }
1614 }
1615 // These objects may escape when we return to Interpreter after deoptimization.
1616 // We need barrier so that stores that initialize these objects can't be reordered
1617 // with subsequent stores that make these objects accessible by other threads.
1618 OrderAccess::storestore();
1619 }
1620
1621
1622 // relock objects for which synchronization was eliminated
1623 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1624 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1625 bool relocked_objects = false;
1626 for (int i = 0; i < monitors->length(); i++) {
1627 MonitorInfo* mon_info = monitors->at(i);
1777 xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1778 nm->log_identity(xtty);
1779 xtty->end_head();
1780 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1781 xtty->begin_elem("jvms bci='%d'", sd->bci());
1782 xtty->method(sd->method());
1783 xtty->end_elem();
1784 if (sd->is_top()) break;
1785 }
1786 xtty->tail("deoptimized");
1787 }
1788
1789 Continuation::notify_deopt(thread, fr.sp());
1790
1791 // Patch the compiled method so that when execution returns to it we will
1792 // deopt the execution state and return to the interpreter.
1793 fr.deoptimize(thread);
1794 }
1795
1796 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1797 // Deoptimize only if the frame comes from compile code.
1798 // Do not deoptimize the frame which is already patched
1799 // during the execution of the loops below.
1800 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1801 return;
1802 }
1803 ResourceMark rm;
1804 deoptimize_single_frame(thread, fr, reason);
1805 }
1806
1807 #if INCLUDE_JVMCI
1808 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1809 // there is no exception handler for this pc => deoptimize
1810 nm->make_not_entrant();
1811
1812 // Use Deoptimization::deoptimize for all of its side-effects:
1813 // gathering traps statistics, logging...
1814 // it also patches the return pc but we do not care about that
1815 // since we return a continuation to the deopt_blob below.
1816 JavaThread* thread = JavaThread::current();
1817 RegisterMap reg_map(thread,
|
33 #include "code/scopeDesc.hpp"
34 #include "compiler/compilationPolicy.hpp"
35 #include "compiler/compilerDefinitions.inline.hpp"
36 #include "gc/shared/collectedHeap.hpp"
37 #include "gc/shared/memAllocator.hpp"
38 #include "interpreter/bytecode.hpp"
39 #include "interpreter/bytecode.inline.hpp"
40 #include "interpreter/bytecodeStream.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/oopMapCache.hpp"
43 #include "jvm.h"
44 #include "logging/log.hpp"
45 #include "logging/logLevel.hpp"
46 #include "logging/logMessage.hpp"
47 #include "logging/logStream.hpp"
48 #include "memory/allocation.inline.hpp"
49 #include "memory/oopFactory.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/constantPool.hpp"
53 #include "oops/flatArrayKlass.hpp"
54 #include "oops/flatArrayOop.hpp"
55 #include "oops/fieldStreams.inline.hpp"
56 #include "oops/method.hpp"
57 #include "oops/objArrayKlass.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "oops/inlineKlass.inline.hpp"
61 #include "oops/typeArrayOop.inline.hpp"
62 #include "oops/verifyOopClosure.hpp"
63 #include "prims/jvmtiDeferredUpdates.hpp"
64 #include "prims/jvmtiExport.hpp"
65 #include "prims/jvmtiThreadState.hpp"
66 #include "prims/methodHandles.hpp"
67 #include "prims/vectorSupport.hpp"
68 #include "runtime/atomic.hpp"
69 #include "runtime/basicLock.inline.hpp"
70 #include "runtime/continuation.hpp"
71 #include "runtime/continuationEntry.inline.hpp"
72 #include "runtime/deoptimization.hpp"
73 #include "runtime/escapeBarrier.hpp"
74 #include "runtime/fieldDescriptor.hpp"
75 #include "runtime/fieldDescriptor.inline.hpp"
76 #include "runtime/frame.inline.hpp"
77 #include "runtime/handles.inline.hpp"
78 #include "runtime/interfaceSupport.inline.hpp"
79 #include "runtime/javaThread.hpp"
80 #include "runtime/jniHandles.inline.hpp"
336 frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
337 bool& deoptimized_objects) {
338 bool realloc_failures = false;
339 assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
340
341 JavaThread* deoptee_thread = chunk->at(0)->thread();
342 assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
343 "a frame can only be deoptimized by the owner thread");
344
345 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
346
347 // The flag return_oop() indicates call sites which return oop
348 // in compiled code. Such sites include java method calls,
349 // runtime calls (for example, used to allocate new objects/arrays
350 // on slow code path) and any other calls generated in compiled code.
351 // It is not guaranteed that we can get such information here only
352 // by analyzing bytecode in deoptimized frames. This is why this flag
353 // is set during method compilation (see Compile::Process_OopMap_Node()).
354 // If the previous frame was popped or if we are dispatching an exception,
355 // we don't have an oop result.
356 ScopeDesc* scope = chunk->at(0)->scope();
357 bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
358 // In case of the return of multiple values, we must take care
359 // of all oop return values.
360 GrowableArray<Handle> return_oops;
361 InlineKlass* vk = nullptr;
362 if (save_oop_result && scope->return_scalarized()) {
363 vk = InlineKlass::returned_inline_klass(map);
364 if (vk != nullptr) {
365 vk->save_oop_fields(map, return_oops);
366 save_oop_result = false;
367 }
368 }
369 if (save_oop_result) {
370 // Reallocation may trigger GC. If deoptimization happened on return from
371 // call which returns oop we need to save it since it is not in oopmap.
372 oop result = deoptee.saved_oop_result(&map);
373 assert(oopDesc::is_oop_or_null(result), "must be oop");
374 return_oops.push(Handle(thread, result));
375 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
376 if (TraceDeoptimization) {
377 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
378 tty->cr();
379 }
380 }
381 if (objects != nullptr || vk != nullptr) {
382 if (exec_mode == Deoptimization::Unpack_none) {
383 assert(thread->thread_state() == _thread_in_vm, "assumption");
384 JavaThread* THREAD = thread; // For exception macros.
385 // Clear pending OOM if reallocation fails and return true indicating allocation failure
386 if (vk != nullptr) {
387 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
388 }
389 if (objects != nullptr) {
390 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
391 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
392 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, CHECK_AND_CLEAR_(true));
393 }
394 deoptimized_objects = true;
395 } else {
396 JavaThread* current = thread; // For JRT_BLOCK
397 JRT_BLOCK
398 if (vk != nullptr) {
399 realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
400 }
401 if (objects != nullptr) {
402 realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
403 bool skip_internal = (compiled_method != nullptr) && !compiled_method->is_compiled_by_jvmci();
404 Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD);
405 }
406 JRT_END
407 }
408 if (TraceDeoptimization && objects != nullptr) {
409 print_objects(deoptee_thread, objects, realloc_failures);
410 }
411 }
412 if (save_oop_result || vk != nullptr) {
413 // Restore result.
414 assert(return_oops.length() == 1, "no inline type");
415 deoptee.set_saved_oop_result(&map, return_oops.pop()());
416 }
417 return realloc_failures;
418 }
419
420 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
421 frame& deoptee, int exec_mode, bool& deoptimized_objects) {
422 JavaThread* deoptee_thread = chunk->at(0)->thread();
423 assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
424 assert(thread == Thread::current(), "should be");
425 HandleMark hm(thread);
426 #ifndef PRODUCT
427 bool first = true;
428 #endif // !PRODUCT
429 // Start locking from outermost/oldest frame
430 for (int i = (chunk->length() - 1); i >= 0; i--) {
431 compiledVFrame* cvf = chunk->at(i);
432 assert (cvf->scope() != nullptr,"expect only compiled java frames");
433 GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
434 if (monitors->is_nonempty()) {
435 bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
729 // its caller's stack by. If the caller is a compiled frame then
730 // we pretend that the callee has no parameters so that the
731 // extension counts for the full amount of locals and not just
732 // locals-parms. This is because without a c2i adapter the parm
733 // area as created by the compiled frame will not be usable by
734 // the interpreter. (Depending on the calling convention there
735 // may not even be enough space).
736
737 // QQQ I'd rather see this pushed down into last_frame_adjust
738 // and have it take the sender (aka caller).
739
740 if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
741 caller_adjustment = last_frame_adjust(0, callee_locals);
742 } else if (callee_locals > callee_parameters) {
743 // The caller frame may need extending to accommodate
744 // non-parameter locals of the first unpacked interpreted frame.
745 // Compute that adjustment.
746 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
747 }
748
749 // If the sender is deoptimized we must retrieve the address of the handler
750 // since the frame will "magically" show the original pc before the deopt
751 // and we'd undo the deopt.
752
753 frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
754 if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
755 ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
756 }
757
758 assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
759
760 #if INCLUDE_JVMCI
761 if (exceptionObject() != nullptr) {
762 current->set_exception_oop(exceptionObject());
763 exec_mode = Unpack_exception;
764 }
765 #endif
766
767 if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
768 assert(current->has_pending_exception(), "should have thrown OOME");
769 current->set_exception_oop(current->pending_exception());
1234 case T_LONG: return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1235 default:;
1236 }
1237 }
1238 return nullptr;
1239 }
1240 #endif // INCLUDE_JVMCI
1241
1242 #if COMPILER2_OR_JVMCI
1243 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1244 Handle pending_exception(THREAD, thread->pending_exception());
1245 const char* exception_file = thread->exception_file();
1246 int exception_line = thread->exception_line();
1247 thread->clear_pending_exception();
1248
1249 bool failures = false;
1250
1251 for (int i = 0; i < objects->length(); i++) {
1252 assert(objects->at(i)->is_object(), "invalid debug information");
1253 ObjectValue* sv = (ObjectValue*) objects->at(i);
1254 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1255
1256 // Check if the object may be null and has an additional is_init input that needs
1257 // to be checked before using the field values. Skip re-allocation if it is null.
1258 if (sv->maybe_null()) {
1259 assert(k->is_inline_klass(), "must be an inline klass");
1260 jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
1261 if (is_init == 0) {
1262 continue;
1263 }
1264 }
1265
1266 oop obj = nullptr;
1267 bool cache_init_error = false;
1268 if (k->is_instance_klass()) {
1269 #if INCLUDE_JVMCI
1270 nmethod* nm = fr->cb()->as_nmethod_or_null();
1271 if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1272 AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1273 obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1274 if (obj != nullptr) {
1275 // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1276 abv->set_cached(true);
1277 } else if (cache_init_error) {
1278 // Results in an OOME which is valid (as opposed to a class initialization error)
1279 // and is fine for the rare case a cache initialization failing.
1280 failures = true;
1281 }
1282 }
1283 #endif // INCLUDE_JVMCI
1284
1285 InstanceKlass* ik = InstanceKlass::cast(k);
1286 if (obj == nullptr && !cache_init_error) {
1287 InternalOOMEMark iom(THREAD);
1288 if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1289 obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1290 } else {
1291 obj = ik->allocate_instance(THREAD);
1292 }
1293 }
1294 } else if (k->is_flatArray_klass()) {
1295 FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1296 // Inline type array must be zeroed because not all memory is reassigned
1297 obj = ak->allocate(sv->field_size(), ak->layout_kind(), THREAD);
1298 } else if (k->is_typeArray_klass()) {
1299 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1300 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1301 int len = sv->field_size() / type2size[ak->element_type()];
1302 InternalOOMEMark iom(THREAD);
1303 obj = ak->allocate(len, THREAD);
1304 } else if (k->is_objArray_klass()) {
1305 ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1306 InternalOOMEMark iom(THREAD);
1307 obj = ak->allocate(sv->field_size(), THREAD);
1308 }
1309
1310 if (obj == nullptr) {
1311 failures = true;
1312 }
1313
1314 assert(sv->value().is_null(), "redundant reallocation");
1315 assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1316 CLEAR_PENDING_EXCEPTION;
1317 sv->set_value(obj);
1318 }
1319
1320 if (failures) {
1321 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1322 } else if (pending_exception.not_null()) {
1323 thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1324 }
1325
1326 return failures;
1327 }
1328
1329 // We're deoptimizing at the return of a call, inline type fields are
1330 // in registers. When we go back to the interpreter, it will expect a
1331 // reference to an inline type instance. Allocate and initialize it from
1332 // the register values here.
1333 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1334 oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1335 if (new_vt == nullptr) {
1336 CLEAR_PENDING_EXCEPTION;
1337 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1338 }
1339 return_oops.clear();
1340 return_oops.push(Handle(THREAD, new_vt));
1341 return false;
1342 }
1343
1344 #if INCLUDE_JVMCI
1345 /**
1346 * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1347 * we need to somehow be able to recover the actual kind to be able to write the correct
1348 * amount of bytes.
1349 * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1350 * the entries at index n + 1 to n + i are 'markers'.
1351 * For example, if we were writing a short at index 4 of a byte array of size 8, the
1352 * expected form of the array would be:
1353 *
1354 * {b0, b1, b2, b3, INT, marker, b6, b7}
1355 *
1356 * Thus, in order to get back the size of the entry, we simply need to count the number
1357 * of marked entries
1358 *
1359 * @param virtualArray the virtualized byte array
1360 * @param i index of the virtual entry we are recovering
1361 * @return The number of bytes the entry spans
1362 */
1363 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1489 default:
1490 ShouldNotReachHere();
1491 }
1492 index++;
1493 }
1494 }
1495
1496 // restore fields of an eliminated object array
1497 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1498 for (int i = 0; i < sv->field_size(); i++) {
1499 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1500 assert(value->type() == T_OBJECT, "object element expected");
1501 obj->obj_at_put(i, value->get_obj()());
1502 }
1503 }
1504
1505 class ReassignedField {
1506 public:
1507 int _offset;
1508 BasicType _type;
1509 InstanceKlass* _klass;
1510 bool _is_flat;
1511 bool _is_null_free;
1512 public:
1513 ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }
1514 };
1515
1516 static int compare(ReassignedField* left, ReassignedField* right) {
1517 return left->_offset - right->_offset;
1518 }
1519
1520 // Restore fields of an eliminated instance object using the same field order
1521 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1522 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, GrowableArray<int>* null_marker_offsets, TRAPS) {
1523 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1524 InstanceKlass* ik = klass;
1525 while (ik != nullptr) {
1526 for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1527 if (!fs.access_flags().is_static() && (!skip_internal || !fs.field_flags().is_injected())) {
1528 ReassignedField field;
1529 field._offset = fs.offset();
1530 field._type = Signature::basic_type(fs.signature());
1531 if (fs.is_flat()) {
1532 field._is_flat = true;
1533 field._is_null_free = fs.is_null_free_inline_type();
1534 // Resolve klass of flat inline type field
1535 field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1536 }
1537 fields->append(field);
1538 }
1539 }
1540 ik = ik->superklass();
1541 }
1542 fields->sort(compare);
1543 // Keep track of null marker offset for flat fields
1544 bool set_null_markers = false;
1545 if (null_marker_offsets == nullptr) {
1546 set_null_markers = true;
1547 null_marker_offsets = new GrowableArray<int>();
1548 }
1549 for (int i = 0; i < fields->length(); i++) {
1550 BasicType type = fields->at(i)._type;
1551 int offset = base_offset + fields->at(i)._offset;
1552 // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1553 if (fields->at(i)._is_flat) {
1554 // Recursively re-assign flat inline type fields
1555 InstanceKlass* vk = fields->at(i)._klass;
1556 assert(vk != nullptr, "must be resolved");
1557 offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1558 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, null_marker_offsets, CHECK_0);
1559 if (!fields->at(i)._is_null_free) {
1560 int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1561 null_marker_offsets->append(nm_offset);
1562 }
1563 continue; // Continue because we don't need to increment svIndex
1564 }
1565 ScopeValue* scope_field = sv->field_at(svIndex);
1566 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1567 switch (type) {
1568 case T_OBJECT:
1569 case T_ARRAY:
1570 assert(value->type() == T_OBJECT, "Agreement.");
1571 obj->obj_field_put(offset, value->get_obj()());
1572 break;
1573
1574 case T_INT: case T_FLOAT: { // 4 bytes.
1575 assert(value->type() == T_INT, "Agreement.");
1576 bool big_value = false;
1577 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1578 if (scope_field->is_location()) {
1579 Location::Type type = ((LocationValue*) scope_field)->location().type();
1580 if (type == Location::dbl || type == Location::lng) {
1581 big_value = true;
1582 }
1583 }
1584 if (scope_field->is_constant_int()) {
1585 ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1586 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1587 big_value = true;
1588 }
1589 }
1620 case T_CHAR:
1621 assert(value->type() == T_INT, "Agreement.");
1622 obj->char_field_put(offset, (jchar)value->get_jint());
1623 break;
1624
1625 case T_BYTE:
1626 assert(value->type() == T_INT, "Agreement.");
1627 obj->byte_field_put(offset, (jbyte)value->get_jint());
1628 break;
1629
1630 case T_BOOLEAN:
1631 assert(value->type() == T_INT, "Agreement.");
1632 obj->bool_field_put(offset, (jboolean)value->get_jint());
1633 break;
1634
1635 default:
1636 ShouldNotReachHere();
1637 }
1638 svIndex++;
1639 }
1640 if (set_null_markers) {
1641 // The null marker values come after all the field values in the debug info
1642 for (int i = 0; i < null_marker_offsets->length(); ++i) {
1643 int offset = null_marker_offsets->at(i);
1644 jbyte is_init = (jbyte)StackValue::create_stack_value(fr, reg_map, sv->field_at(svIndex++))->get_jint();
1645 obj->byte_field_put(offset, is_init);
1646 }
1647 }
1648 return svIndex;
1649 }
1650
1651 // restore fields of an eliminated inline type array
1652 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS) {
1653 InlineKlass* vk = vak->element_klass();
1654 assert(vk->flat_array(), "should only be used for flat inline type arrays");
1655 // Adjust offset to omit oop header
1656 int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - InlineKlass::cast(vk)->payload_offset();
1657 // Initialize all elements of the flat inline type array
1658 for (int i = 0; i < sv->field_size(); i++) {
1659 ScopeValue* val = sv->field_at(i);
1660 int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1661 reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, skip_internal, offset, nullptr, CHECK);
1662 }
1663 }
1664
1665 // restore fields of all eliminated objects and arrays
1666 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) {
1667 for (int i = 0; i < objects->length(); i++) {
1668 assert(objects->at(i)->is_object(), "invalid debug information");
1669 ObjectValue* sv = (ObjectValue*) objects->at(i);
1670 Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1671 Handle obj = sv->value();
1672 assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1673 #ifndef PRODUCT
1674 if (PrintDeoptimizationDetails) {
1675 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1676 }
1677 #endif // !PRODUCT
1678
1679 if (obj.is_null()) {
1680 continue;
1681 }
1682
1683 #if INCLUDE_JVMCI
1684 // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1685 if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1686 continue;
1687 }
1688 #endif // INCLUDE_JVMCI
1689 if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1690 assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1691 ScopeValue* payload = sv->field_at(0);
1692 if (payload->is_location() &&
1693 payload->as_LocationValue()->location().type() == Location::vector) {
1694 #ifndef PRODUCT
1695 if (PrintDeoptimizationDetails) {
1696 tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1697 if (Verbose) {
1698 Handle obj = sv->value();
1699 k->oop_print_on(obj(), tty);
1700 }
1701 }
1702 #endif // !PRODUCT
1703 continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1704 }
1705 // Else fall-through to do assignment for scalar-replaced boxed vector representation
1706 // which could be restored after vector object allocation.
1707 }
1708 if (k->is_instance_klass()) {
1709 InstanceKlass* ik = InstanceKlass::cast(k);
1710 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, nullptr, CHECK);
1711 } else if (k->is_flatArray_klass()) {
1712 FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1713 reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, skip_internal, CHECK);
1714 } else if (k->is_typeArray_klass()) {
1715 TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1716 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1717 } else if (k->is_objArray_klass()) {
1718 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1719 }
1720 }
1721 // These objects may escape when we return to Interpreter after deoptimization.
1722 // We need barrier so that stores that initialize these objects can't be reordered
1723 // with subsequent stores that make these objects accessible by other threads.
1724 OrderAccess::storestore();
1725 }
1726
1727
1728 // relock objects for which synchronization was eliminated
1729 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1730 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1731 bool relocked_objects = false;
1732 for (int i = 0; i < monitors->length(); i++) {
1733 MonitorInfo* mon_info = monitors->at(i);
1883 xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1884 nm->log_identity(xtty);
1885 xtty->end_head();
1886 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1887 xtty->begin_elem("jvms bci='%d'", sd->bci());
1888 xtty->method(sd->method());
1889 xtty->end_elem();
1890 if (sd->is_top()) break;
1891 }
1892 xtty->tail("deoptimized");
1893 }
1894
1895 Continuation::notify_deopt(thread, fr.sp());
1896
1897 // Patch the compiled method so that when execution returns to it we will
1898 // deopt the execution state and return to the interpreter.
1899 fr.deoptimize(thread);
1900 }
1901
1902 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1903 // Deoptimize only if the frame comes from compiled code.
1904 // Do not deoptimize the frame which is already patched
1905 // during the execution of the loops below.
1906 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1907 return;
1908 }
1909 ResourceMark rm;
1910 deoptimize_single_frame(thread, fr, reason);
1911 }
1912
1913 #if INCLUDE_JVMCI
1914 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1915 // there is no exception handler for this pc => deoptimize
1916 nm->make_not_entrant();
1917
1918 // Use Deoptimization::deoptimize for all of its side-effects:
1919 // gathering traps statistics, logging...
1920 // it also patches the return pc but we do not care about that
1921 // since we return a continuation to the deopt_blob below.
1922 JavaThread* thread = JavaThread::current();
1923 RegisterMap reg_map(thread,
|