< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/bytecode.inline.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"


  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/method.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"

  58 #include "oops/typeArrayOop.inline.hpp"
  59 #include "oops/verifyOopClosure.hpp"
  60 #include "prims/jvmtiDeferredUpdates.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "prims/jvmtiThreadState.hpp"
  63 #include "prims/methodHandles.hpp"
  64 #include "prims/vectorSupport.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/basicLock.inline.hpp"
  67 #include "runtime/continuation.hpp"
  68 #include "runtime/continuationEntry.inline.hpp"
  69 #include "runtime/deoptimization.hpp"
  70 #include "runtime/escapeBarrier.hpp"
  71 #include "runtime/fieldDescriptor.hpp"
  72 #include "runtime/fieldDescriptor.inline.hpp"
  73 #include "runtime/frame.inline.hpp"
  74 #include "runtime/handles.inline.hpp"
  75 #include "runtime/interfaceSupport.inline.hpp"
  76 #include "runtime/javaThread.hpp"
  77 #include "runtime/jniHandles.inline.hpp"

 334                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 335                                   bool& deoptimized_objects) {
 336   bool realloc_failures = false;
 337   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 338 
 339   JavaThread* deoptee_thread = chunk->at(0)->thread();
 340   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 341          "a frame can only be deoptimized by the owner thread");
 342 
 343   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 344 
 345   // The flag return_oop() indicates call sites which return oop
 346   // in compiled code. Such sites include java method calls,
 347   // runtime calls (for example, used to allocate new objects/arrays
 348   // on slow code path) and any other calls generated in compiled code.
 349   // It is not guaranteed that we can get such information here only
 350   // by analyzing bytecode in deoptimized frames. This is why this flag
 351   // is set during method compilation (see Compile::Process_OopMap_Node()).
 352   // If the previous frame was popped or if we are dispatching an exception,
 353   // we don't have an oop result.
 354   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 355   Handle return_value;











 356   if (save_oop_result) {
 357     // Reallocation may trigger GC. If deoptimization happened on return from
 358     // call which returns oop we need to save it since it is not in oopmap.
 359     oop result = deoptee.saved_oop_result(&map);
 360     assert(oopDesc::is_oop_or_null(result), "must be oop");
 361     return_value = Handle(thread, result);
 362     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 363     if (TraceDeoptimization) {
 364       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 365       tty->cr();
 366     }
 367   }
 368   if (objects != nullptr) {
 369     if (exec_mode == Deoptimization::Unpack_none) {
 370       assert(thread->thread_state() == _thread_in_vm, "assumption");
 371       JavaThread* THREAD = thread; // For exception macros.
 372       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 373       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));








 374       deoptimized_objects = true;
 375     } else {
 376       JavaThread* current = thread; // For JRT_BLOCK
 377       JRT_BLOCK
 378       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);








 379       JRT_END
 380     }
 381     guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 382     bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 383     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci);
 384     if (TraceDeoptimization) {
 385       print_objects(deoptee_thread, objects, realloc_failures);
 386     }
 387   }
 388   if (save_oop_result) {
 389     // Restore result.
 390     deoptee.set_saved_oop_result(&map, return_value());

 391   }
 392   return realloc_failures;
 393 }
 394 
 395 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 396                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 397   JavaThread* deoptee_thread = chunk->at(0)->thread();
 398   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 399   assert(thread == Thread::current(), "should be");
 400   HandleMark hm(thread);
 401 #ifndef PRODUCT
 402   bool first = true;
 403 #endif // !PRODUCT
 404   // Start locking from outermost/oldest frame
 405   for (int i = (chunk->length() - 1); i >= 0; i--) {
 406     compiledVFrame* cvf = chunk->at(i);
 407     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 408     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 409     if (monitors->is_nonempty()) {
 410       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 705   // its caller's stack by. If the caller is a compiled frame then
 706   // we pretend that the callee has no parameters so that the
 707   // extension counts for the full amount of locals and not just
 708   // locals-parms. This is because without a c2i adapter the parm
 709   // area as created by the compiled frame will not be usable by
 710   // the interpreter. (Depending on the calling convention there
 711   // may not even be enough space).
 712 
 713   // QQQ I'd rather see this pushed down into last_frame_adjust
 714   // and have it take the sender (aka caller).
 715 
 716   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 717     caller_adjustment = last_frame_adjust(0, callee_locals);
 718   } else if (callee_locals > callee_parameters) {
 719     // The caller frame may need extending to accommodate
 720     // non-parameter locals of the first unpacked interpreted frame.
 721     // Compute that adjustment.
 722     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 723   }
 724 
 725   // If the sender is deoptimized the we must retrieve the address of the handler
 726   // since the frame will "magically" show the original pc before the deopt
 727   // and we'd undo the deopt.
 728 
 729   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 730   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 731     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 732   }
 733 
 734   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 735 
 736 #if INCLUDE_JVMCI
 737   if (exceptionObject() != nullptr) {
 738     current->set_exception_oop(exceptionObject());
 739     exec_mode = Unpack_exception;
 740   }
 741 #endif
 742 
 743   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 744     assert(current->has_pending_exception(), "should have thrown OOME");
 745     current->set_exception_oop(current->pending_exception());

1221        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1222        default:;
1223      }
1224    }
1225    return nullptr;
1226 }
1227 #endif // INCLUDE_JVMCI
1228 
1229 #if COMPILER2_OR_JVMCI
1230 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1231   Handle pending_exception(THREAD, thread->pending_exception());
1232   const char* exception_file = thread->exception_file();
1233   int exception_line = thread->exception_line();
1234   thread->clear_pending_exception();
1235 
1236   bool failures = false;
1237 
1238   for (int i = 0; i < objects->length(); i++) {
1239     assert(objects->at(i)->is_object(), "invalid debug information");
1240     ObjectValue* sv = (ObjectValue*) objects->at(i);
1241 
1242     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1243     oop obj = nullptr;
1244 











1245     bool cache_init_error = false;
1246     if (k->is_instance_klass()) {
1247 #if INCLUDE_JVMCI
1248       nmethod* nm = fr->cb()->as_nmethod_or_null();
1249       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1250         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1251         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1252         if (obj != nullptr) {
1253           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1254           abv->set_cached(true);
1255         } else if (cache_init_error) {
1256           // Results in an OOME which is valid (as opposed to a class initialization error)
1257           // and is fine for the rare case a cache initialization failing.
1258           failures = true;
1259         }
1260       }
1261 #endif // INCLUDE_JVMCI
1262 
1263       InstanceKlass* ik = InstanceKlass::cast(k);
1264       if (obj == nullptr && !cache_init_error) {
1265         InternalOOMEMark iom(THREAD);
1266         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1267           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1268         } else {
1269           obj = ik->allocate_instance(THREAD);
1270         }
1271       }




1272     } else if (k->is_typeArray_klass()) {
1273       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1274       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1275       int len = sv->field_size() / type2size[ak->element_type()];
1276       InternalOOMEMark iom(THREAD);
1277       obj = ak->allocate(len, THREAD);
1278     } else if (k->is_objArray_klass()) {
1279       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1280       InternalOOMEMark iom(THREAD);
1281       obj = ak->allocate(sv->field_size(), THREAD);
1282     }
1283 
1284     if (obj == nullptr) {
1285       failures = true;
1286     }
1287 
1288     assert(sv->value().is_null(), "redundant reallocation");
1289     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1290     CLEAR_PENDING_EXCEPTION;
1291     sv->set_value(obj);
1292   }
1293 
1294   if (failures) {
1295     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1296   } else if (pending_exception.not_null()) {
1297     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1298   }
1299 
1300   return failures;
1301 }
1302 















1303 #if INCLUDE_JVMCI
1304 /**
1305  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1306  * we need to somehow be able to recover the actual kind to be able to write the correct
1307  * amount of bytes.
1308  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1309  * the entries at index n + 1 to n + i are 'markers'.
1310  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1311  * expected form of the array would be:
1312  *
1313  * {b0, b1, b2, b3, INT, marker, b6, b7}
1314  *
1315  * Thus, in order to get back the size of the entry, we simply need to count the number
1316  * of marked entries
1317  *
1318  * @param virtualArray the virtualized byte array
1319  * @param i index of the virtual entry we are recovering
1320  * @return The number of bytes the entry spans
1321  */
1322 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1448       default:
1449         ShouldNotReachHere();
1450     }
1451     index++;
1452   }
1453 }
1454 
1455 // restore fields of an eliminated object array
1456 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1457   for (int i = 0; i < sv->field_size(); i++) {
1458     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1459     assert(value->type() == T_OBJECT, "object element expected");
1460     obj->obj_at_put(i, value->get_obj()());
1461   }
1462 }
1463 
1464 class ReassignedField {
1465 public:
1466   int _offset;
1467   BasicType _type;



1468 public:
1469   ReassignedField() {
1470     _offset = 0;
1471     _type = T_ILLEGAL;
1472   }
1473 };
1474 
1475 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1476 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1477   InstanceKlass* super = klass->superklass();
1478   if (super != nullptr) {
1479     get_reassigned_fields(super, fields, is_jvmci);
1480   }
1481   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1482     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1483       ReassignedField field;
1484       field._offset = fs.offset();
1485       field._type = Signature::basic_type(fs.signature());






1486       fields->append(field);
1487     }
1488   }
1489   return fields;
1490 }
1491 
1492 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1493 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci) {

1494   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1495   for (int i = 0; i < fields->length(); i++) {



















1496     ScopeValue* scope_field = sv->field_at(svIndex);
1497     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1498     int offset = fields->at(i)._offset;
1499     BasicType type = fields->at(i)._type;
1500     switch (type) {
1501       case T_OBJECT: case T_ARRAY:

1502         assert(value->type() == T_OBJECT, "Agreement.");
1503         obj->obj_field_put(offset, value->get_obj()());
1504         break;
1505 
1506       case T_INT: case T_FLOAT: { // 4 bytes.
1507         assert(value->type() == T_INT, "Agreement.");
1508         bool big_value = false;
1509         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1510           if (scope_field->is_location()) {
1511             Location::Type type = ((LocationValue*) scope_field)->location().type();
1512             if (type == Location::dbl || type == Location::lng) {
1513               big_value = true;
1514             }
1515           }
1516           if (scope_field->is_constant_int()) {
1517             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1518             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1519               big_value = true;
1520             }
1521           }

1552       case T_CHAR:
1553         assert(value->type() == T_INT, "Agreement.");
1554         obj->char_field_put(offset, (jchar)value->get_jint());
1555         break;
1556 
1557       case T_BYTE:
1558         assert(value->type() == T_INT, "Agreement.");
1559         obj->byte_field_put(offset, (jbyte)value->get_jint());
1560         break;
1561 
1562       case T_BOOLEAN:
1563         assert(value->type() == T_INT, "Agreement.");
1564         obj->bool_field_put(offset, (jboolean)value->get_jint());
1565         break;
1566 
1567       default:
1568         ShouldNotReachHere();
1569     }
1570     svIndex++;
1571   }

1572   return svIndex;
1573 }
1574 














1575 // restore fields of all eliminated objects and arrays
1576 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci) {
1577   for (int i = 0; i < objects->length(); i++) {
1578     assert(objects->at(i)->is_object(), "invalid debug information");
1579     ObjectValue* sv = (ObjectValue*) objects->at(i);
1580     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1581     Handle obj = sv->value();
1582     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1583 #ifndef PRODUCT
1584     if (PrintDeoptimizationDetails) {
1585       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1586     }
1587 #endif // !PRODUCT
1588 
1589     if (obj.is_null()) {
1590       continue;
1591     }
1592 
1593 #if INCLUDE_JVMCI
1594     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1595     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1596       continue;
1597     }
1598 #endif // INCLUDE_JVMCI
1599     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1600       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1601       ScopeValue* payload = sv->field_at(0);
1602       if (payload->is_location() &&
1603           payload->as_LocationValue()->location().type() == Location::vector) {
1604 #ifndef PRODUCT
1605         if (PrintDeoptimizationDetails) {
1606           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1607           if (Verbose) {
1608             Handle obj = sv->value();
1609             k->oop_print_on(obj(), tty);
1610           }
1611         }
1612 #endif // !PRODUCT
1613         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1614       }
1615       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1616       // which could be restored after vector object allocation.
1617     }
1618     if (k->is_instance_klass()) {
1619       InstanceKlass* ik = InstanceKlass::cast(k);
1620       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci);



1621     } else if (k->is_typeArray_klass()) {
1622       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1623       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1624     } else if (k->is_objArray_klass()) {
1625       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1626     }
1627   }
1628   // These objects may escape when we return to Interpreter after deoptimization.
1629   // We need barrier so that stores that initialize these objects can't be reordered
1630   // with subsequent stores that make these objects accessible by other threads.
1631   OrderAccess::storestore();
1632 }
1633 
1634 
1635 // relock objects for which synchronization was eliminated
1636 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1637                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1638   bool relocked_objects = false;
1639   for (int i = 0; i < monitors->length(); i++) {
1640     MonitorInfo* mon_info = monitors->at(i);

1796     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1797     nm->log_identity(xtty);
1798     xtty->end_head();
1799     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1800       xtty->begin_elem("jvms bci='%d'", sd->bci());
1801       xtty->method(sd->method());
1802       xtty->end_elem();
1803       if (sd->is_top())  break;
1804     }
1805     xtty->tail("deoptimized");
1806   }
1807 
1808   Continuation::notify_deopt(thread, fr.sp());
1809 
1810   // Patch the compiled method so that when execution returns to it we will
1811   // deopt the execution state and return to the interpreter.
1812   fr.deoptimize(thread);
1813 }
1814 
1815 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1816   // Deoptimize only if the frame comes from compile code.
1817   // Do not deoptimize the frame which is already patched
1818   // during the execution of the loops below.
1819   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1820     return;
1821   }
1822   ResourceMark rm;
1823   deoptimize_single_frame(thread, fr, reason);
1824 }
1825 
1826 #if INCLUDE_JVMCI
1827 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1828   // there is no exception handler for this pc => deoptimize
1829   nm->make_not_entrant("missing exception handler");
1830 
1831   // Use Deoptimization::deoptimize for all of its side-effects:
1832   // gathering traps statistics, logging...
1833   // it also patches the return pc but we do not care about that
1834   // since we return a continuation to the deopt_blob below.
1835   JavaThread* thread = JavaThread::current();
1836   RegisterMap reg_map(thread,

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/bytecode.inline.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/fieldStreams.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/inlineKlass.inline.hpp"
  61 #include "oops/typeArrayOop.inline.hpp"
  62 #include "oops/verifyOopClosure.hpp"
  63 #include "prims/jvmtiDeferredUpdates.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "prims/jvmtiThreadState.hpp"
  66 #include "prims/methodHandles.hpp"
  67 #include "prims/vectorSupport.hpp"
  68 #include "runtime/atomic.hpp"
  69 #include "runtime/basicLock.inline.hpp"
  70 #include "runtime/continuation.hpp"
  71 #include "runtime/continuationEntry.inline.hpp"
  72 #include "runtime/deoptimization.hpp"
  73 #include "runtime/escapeBarrier.hpp"
  74 #include "runtime/fieldDescriptor.hpp"
  75 #include "runtime/fieldDescriptor.inline.hpp"
  76 #include "runtime/frame.inline.hpp"
  77 #include "runtime/handles.inline.hpp"
  78 #include "runtime/interfaceSupport.inline.hpp"
  79 #include "runtime/javaThread.hpp"
  80 #include "runtime/jniHandles.inline.hpp"

 337                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 338                                   bool& deoptimized_objects) {
 339   bool realloc_failures = false;
 340   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 341 
 342   JavaThread* deoptee_thread = chunk->at(0)->thread();
 343   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 344          "a frame can only be deoptimized by the owner thread");
 345 
 346   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 347 
 348   // The flag return_oop() indicates call sites which return oop
 349   // in compiled code. Such sites include java method calls,
 350   // runtime calls (for example, used to allocate new objects/arrays
 351   // on slow code path) and any other calls generated in compiled code.
 352   // It is not guaranteed that we can get such information here only
 353   // by analyzing bytecode in deoptimized frames. This is why this flag
 354   // is set during method compilation (see Compile::Process_OopMap_Node()).
 355   // If the previous frame was popped or if we are dispatching an exception,
 356   // we don't have an oop result.
 357   ScopeDesc* scope = chunk->at(0)->scope();
 358   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 359   // In case of the return of multiple values, we must take care
 360   // of all oop return values.
 361   GrowableArray<Handle> return_oops;
 362   InlineKlass* vk = nullptr;
 363   if (save_oop_result && scope->return_scalarized()) {
 364     vk = InlineKlass::returned_inline_klass(map);
 365     if (vk != nullptr) {
 366       vk->save_oop_fields(map, return_oops);
 367       save_oop_result = false;
 368     }
 369   }
 370   if (save_oop_result) {
 371     // Reallocation may trigger GC. If deoptimization happened on return from
 372     // call which returns oop we need to save it since it is not in oopmap.
 373     oop result = deoptee.saved_oop_result(&map);
 374     assert(oopDesc::is_oop_or_null(result), "must be oop");
 375     return_oops.push(Handle(thread, result));
 376     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 377     if (TraceDeoptimization) {
 378       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 379       tty->cr();
 380     }
 381   }
 382   if (objects != nullptr || vk != nullptr) {
 383     if (exec_mode == Deoptimization::Unpack_none) {
 384       assert(thread->thread_state() == _thread_in_vm, "assumption");
 385       JavaThread* THREAD = thread; // For exception macros.
 386       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 387       if (vk != nullptr) {
 388         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 389       }
 390       if (objects != nullptr) {
 391         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 392         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 393         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 394         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 395       }
 396       deoptimized_objects = true;
 397     } else {
 398       JavaThread* current = thread; // For JRT_BLOCK
 399       JRT_BLOCK
 400       if (vk != nullptr) {
 401         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 402       }
 403       if (objects != nullptr) {
 404         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 405         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 406         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 407         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 408       }
 409       JRT_END
 410     }
 411     if (TraceDeoptimization && objects != nullptr) {



 412       print_objects(deoptee_thread, objects, realloc_failures);
 413     }
 414   }
 415   if (save_oop_result || vk != nullptr) {
 416     // Restore result.
 417     assert(return_oops.length() == 1, "no inline type");
 418     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 419   }
 420   return realloc_failures;
 421 }
 422 
 423 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 424                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 425   JavaThread* deoptee_thread = chunk->at(0)->thread();
 426   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 427   assert(thread == Thread::current(), "should be");
 428   HandleMark hm(thread);
 429 #ifndef PRODUCT
 430   bool first = true;
 431 #endif // !PRODUCT
 432   // Start locking from outermost/oldest frame
 433   for (int i = (chunk->length() - 1); i >= 0; i--) {
 434     compiledVFrame* cvf = chunk->at(i);
 435     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 436     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 437     if (monitors->is_nonempty()) {
 438       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 733   // its caller's stack by. If the caller is a compiled frame then
 734   // we pretend that the callee has no parameters so that the
 735   // extension counts for the full amount of locals and not just
 736   // locals-parms. This is because without a c2i adapter the parm
 737   // area as created by the compiled frame will not be usable by
 738   // the interpreter. (Depending on the calling convention there
 739   // may not even be enough space).
 740 
 741   // QQQ I'd rather see this pushed down into last_frame_adjust
 742   // and have it take the sender (aka caller).
 743 
 744   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 745     caller_adjustment = last_frame_adjust(0, callee_locals);
 746   } else if (callee_locals > callee_parameters) {
 747     // The caller frame may need extending to accommodate
 748     // non-parameter locals of the first unpacked interpreted frame.
 749     // Compute that adjustment.
 750     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 751   }
 752 
 753   // If the sender is deoptimized we must retrieve the address of the handler
 754   // since the frame will "magically" show the original pc before the deopt
 755   // and we'd undo the deopt.
 756 
 757   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 758   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 759     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 760   }
 761 
 762   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 763 
 764 #if INCLUDE_JVMCI
 765   if (exceptionObject() != nullptr) {
 766     current->set_exception_oop(exceptionObject());
 767     exec_mode = Unpack_exception;
 768   }
 769 #endif
 770 
 771   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 772     assert(current->has_pending_exception(), "should have thrown OOME");
 773     current->set_exception_oop(current->pending_exception());

1249        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1250        default:;
1251      }
1252    }
1253    return nullptr;
1254 }
1255 #endif // INCLUDE_JVMCI
1256 
1257 #if COMPILER2_OR_JVMCI
1258 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1259   Handle pending_exception(THREAD, thread->pending_exception());
1260   const char* exception_file = thread->exception_file();
1261   int exception_line = thread->exception_line();
1262   thread->clear_pending_exception();
1263 
1264   bool failures = false;
1265 
1266   for (int i = 0; i < objects->length(); i++) {
1267     assert(objects->at(i)->is_object(), "invalid debug information");
1268     ObjectValue* sv = (ObjectValue*) objects->at(i);

1269     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1270 
1271     // Check if the object may be null and has an additional null_marker input that needs
1272     // to be checked before using the field values. Skip re-allocation if it is null.
1273     if (sv->maybe_null()) {
1274       assert(k->is_inline_klass(), "must be an inline klass");
1275       jint null_marker = StackValue::create_stack_value(fr, reg_map, sv->null_marker())->get_jint();
1276       if (null_marker == 0) {
1277         continue;
1278       }
1279     }
1280 
1281     oop obj = nullptr;
1282     bool cache_init_error = false;
1283     if (k->is_instance_klass()) {
1284 #if INCLUDE_JVMCI
1285       nmethod* nm = fr->cb()->as_nmethod_or_null();
1286       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1287         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1288         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1289         if (obj != nullptr) {
1290           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1291           abv->set_cached(true);
1292         } else if (cache_init_error) {
1293           // Results in an OOME which is valid (as opposed to a class initialization error)
1294           // and is fine for the rare case a cache initialization failing.
1295           failures = true;
1296         }
1297       }
1298 #endif // INCLUDE_JVMCI
1299 
1300       InstanceKlass* ik = InstanceKlass::cast(k);
1301       if (obj == nullptr && !cache_init_error) {
1302         InternalOOMEMark iom(THREAD);
1303         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1304           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1305         } else {
1306           obj = ik->allocate_instance(THREAD);
1307         }
1308       }
1309     } else if (k->is_flatArray_klass()) {
1310       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1311       // Inline type array must be zeroed because not all memory is reassigned
1312       obj = ak->allocate(sv->field_size(), ak->layout_kind(), THREAD);
1313     } else if (k->is_typeArray_klass()) {
1314       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1315       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1316       int len = sv->field_size() / type2size[ak->element_type()];
1317       InternalOOMEMark iom(THREAD);
1318       obj = ak->allocate(len, THREAD);
1319     } else if (k->is_objArray_klass()) {
1320       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1321       InternalOOMEMark iom(THREAD);
1322       obj = ak->allocate(sv->field_size(), THREAD);
1323     }
1324 
1325     if (obj == nullptr) {
1326       failures = true;
1327     }
1328 
1329     assert(sv->value().is_null(), "redundant reallocation");
1330     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1331     CLEAR_PENDING_EXCEPTION;
1332     sv->set_value(obj);
1333   }
1334 
1335   if (failures) {
1336     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1337   } else if (pending_exception.not_null()) {
1338     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1339   }
1340 
1341   return failures;
1342 }
1343 
1344 // We're deoptimizing at the return of a call, inline type fields are
1345 // in registers. When we go back to the interpreter, it will expect a
1346 // reference to an inline type instance. Allocate and initialize it from
1347 // the register values here.
1348 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1349   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1350   if (new_vt == nullptr) {
1351     CLEAR_PENDING_EXCEPTION;
1352     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1353   }
1354   return_oops.clear();
1355   return_oops.push(Handle(THREAD, new_vt));
1356   return false;
1357 }
1358 
1359 #if INCLUDE_JVMCI
1360 /**
1361  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1362  * we need to somehow be able to recover the actual kind to be able to write the correct
1363  * amount of bytes.
1364  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1365  * the entries at index n + 1 to n + i are 'markers'.
1366  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1367  * expected form of the array would be:
1368  *
1369  * {b0, b1, b2, b3, INT, marker, b6, b7}
1370  *
1371  * Thus, in order to get back the size of the entry, we simply need to count the number
1372  * of marked entries
1373  *
1374  * @param virtualArray the virtualized byte array
1375  * @param i index of the virtual entry we are recovering
1376  * @return The number of bytes the entry spans
1377  */
1378 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1504       default:
1505         ShouldNotReachHere();
1506     }
1507     index++;
1508   }
1509 }
1510 
1511 // restore fields of an eliminated object array
1512 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1513   for (int i = 0; i < sv->field_size(); i++) {
1514     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1515     assert(value->type() == T_OBJECT, "object element expected");
1516     obj->obj_at_put(i, value->get_obj()());
1517   }
1518 }
1519 
1520 class ReassignedField {
1521 public:
1522   int _offset;
1523   BasicType _type;
1524   InstanceKlass* _klass;
1525   bool _is_flat;
1526   bool _is_null_free;
1527 public:
1528   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }



1529 };
1530 
1531 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1532 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1533   InstanceKlass* super = klass->superklass();
1534   if (super != nullptr) {
1535     get_reassigned_fields(super, fields, is_jvmci);
1536   }
1537   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1538     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1539       ReassignedField field;
1540       field._offset = fs.offset();
1541       field._type = Signature::basic_type(fs.signature());
1542       if (fs.is_flat()) {
1543         field._is_flat = true;
1544         field._is_null_free = fs.is_null_free_inline_type();
1545         // Resolve klass of flat inline type field
1546         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1547       }
1548       fields->append(field);
1549     }
1550   }
1551   return fields;
1552 }
1553 
1554 // Restore fields of an eliminated instance object employing the same field order used by the
1555 // compiler when it scalarizes an object at safepoints.
1556 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, TRAPS) {
1557   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1558   for (int i = 0; i < fields->length(); i++) {
1559     BasicType type = fields->at(i)._type;
1560     int offset = base_offset + fields->at(i)._offset;
1561     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1562     if (fields->at(i)._is_flat) {
1563       // Recursively re-assign flat inline type fields
1564       InstanceKlass* vk = fields->at(i)._klass;
1565       assert(vk != nullptr, "must be resolved");
1566       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1567       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, CHECK_0);
1568       if (!fields->at(i)._is_null_free) {
1569         ScopeValue* scope_field = sv->field_at(svIndex);
1570         StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1571         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1572         obj->bool_field_put(nm_offset, value->get_jint() & 1);
1573         svIndex++;
1574       }
1575       continue; // Continue because we don't need to increment svIndex
1576     }
1577 
1578     ScopeValue* scope_field = sv->field_at(svIndex);
1579     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1580     switch (type) {
1581       case T_OBJECT:
1582       case T_ARRAY:
1583         assert(value->type() == T_OBJECT, "Agreement.");
1584         obj->obj_field_put(offset, value->get_obj()());
1585         break;
1586 
1587       case T_INT: case T_FLOAT: { // 4 bytes.
1588         assert(value->type() == T_INT, "Agreement.");
1589         bool big_value = false;
1590         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1591           if (scope_field->is_location()) {
1592             Location::Type type = ((LocationValue*) scope_field)->location().type();
1593             if (type == Location::dbl || type == Location::lng) {
1594               big_value = true;
1595             }
1596           }
1597           if (scope_field->is_constant_int()) {
1598             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1599             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1600               big_value = true;
1601             }
1602           }

1633       case T_CHAR:
1634         assert(value->type() == T_INT, "Agreement.");
1635         obj->char_field_put(offset, (jchar)value->get_jint());
1636         break;
1637 
1638       case T_BYTE:
1639         assert(value->type() == T_INT, "Agreement.");
1640         obj->byte_field_put(offset, (jbyte)value->get_jint());
1641         break;
1642 
1643       case T_BOOLEAN:
1644         assert(value->type() == T_INT, "Agreement.");
1645         obj->bool_field_put(offset, (jboolean)value->get_jint());
1646         break;
1647 
1648       default:
1649         ShouldNotReachHere();
1650     }
1651     svIndex++;
1652   }
1653 
1654   return svIndex;
1655 }
1656 
1657 // restore fields of an eliminated inline type array
1658 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1659   InlineKlass* vk = vak->element_klass();
1660   assert(vk->maybe_flat_in_array(), "should only be used for flat inline type arrays");
1661   // Adjust offset to omit oop header
1662   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - InlineKlass::cast(vk)->payload_offset();
1663   // Initialize all elements of the flat inline type array
1664   for (int i = 0; i < sv->field_size(); i++) {
1665     ScopeValue* val = sv->field_at(i);
1666     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1667     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, is_jvmci, offset, CHECK);
1668   }
1669 }
1670 
1671 // restore fields of all eliminated objects and arrays
1672 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1673   for (int i = 0; i < objects->length(); i++) {
1674     assert(objects->at(i)->is_object(), "invalid debug information");
1675     ObjectValue* sv = (ObjectValue*) objects->at(i);
1676     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1677     Handle obj = sv->value();
1678     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1679 #ifndef PRODUCT
1680     if (PrintDeoptimizationDetails) {
1681       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1682     }
1683 #endif // !PRODUCT
1684 
1685     if (obj.is_null()) {
1686       continue;
1687     }
1688 
1689 #if INCLUDE_JVMCI
1690     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1691     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1692       continue;
1693     }
1694 #endif // INCLUDE_JVMCI
1695     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1696       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1697       ScopeValue* payload = sv->field_at(0);
1698       if (payload->is_location() &&
1699           payload->as_LocationValue()->location().type() == Location::vector) {
1700 #ifndef PRODUCT
1701         if (PrintDeoptimizationDetails) {
1702           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1703           if (Verbose) {
1704             Handle obj = sv->value();
1705             k->oop_print_on(obj(), tty);
1706           }
1707         }
1708 #endif // !PRODUCT
1709         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1710       }
1711       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1712       // which could be restored after vector object allocation.
1713     }
1714     if (k->is_instance_klass()) {
1715       InstanceKlass* ik = InstanceKlass::cast(k);
1716       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, CHECK);
1717     } else if (k->is_flatArray_klass()) {
1718       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1719       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1720     } else if (k->is_typeArray_klass()) {
1721       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1722       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1723     } else if (k->is_objArray_klass()) {
1724       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1725     }
1726   }
1727   // These objects may escape when we return to Interpreter after deoptimization.
1728   // We need barrier so that stores that initialize these objects can't be reordered
1729   // with subsequent stores that make these objects accessible by other threads.
1730   OrderAccess::storestore();
1731 }
1732 
1733 
1734 // relock objects for which synchronization was eliminated
1735 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1736                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1737   bool relocked_objects = false;
1738   for (int i = 0; i < monitors->length(); i++) {
1739     MonitorInfo* mon_info = monitors->at(i);

1895     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1896     nm->log_identity(xtty);
1897     xtty->end_head();
1898     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1899       xtty->begin_elem("jvms bci='%d'", sd->bci());
1900       xtty->method(sd->method());
1901       xtty->end_elem();
1902       if (sd->is_top())  break;
1903     }
1904     xtty->tail("deoptimized");
1905   }
1906 
1907   Continuation::notify_deopt(thread, fr.sp());
1908 
1909   // Patch the compiled method so that when execution returns to it we will
1910   // deopt the execution state and return to the interpreter.
1911   fr.deoptimize(thread);
1912 }
1913 
1914 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1915   // Deoptimize only if the frame comes from compiled code.
1916   // Do not deoptimize the frame which is already patched
1917   // during the execution of the loops below.
1918   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1919     return;
1920   }
1921   ResourceMark rm;
1922   deoptimize_single_frame(thread, fr, reason);
1923 }
1924 
1925 #if INCLUDE_JVMCI
1926 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1927   // there is no exception handler for this pc => deoptimize
1928   nm->make_not_entrant("missing exception handler");
1929 
1930   // Use Deoptimization::deoptimize for all of its side-effects:
1931   // gathering traps statistics, logging...
1932   // it also patches the return pc but we do not care about that
1933   // since we return a continuation to the deopt_blob below.
1934   JavaThread* thread = JavaThread::current();
1935   RegisterMap reg_map(thread,
< prev index next >