< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"



  53 #include "oops/method.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/typeArrayOop.inline.hpp"
  58 #include "oops/verifyOopClosure.hpp"
  59 #include "prims/jvmtiDeferredUpdates.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "prims/jvmtiThreadState.hpp"
  62 #include "prims/methodHandles.hpp"
  63 #include "prims/vectorSupport.hpp"
  64 #include "runtime/atomicAccess.hpp"
  65 #include "runtime/basicLock.inline.hpp"
  66 #include "runtime/continuation.hpp"
  67 #include "runtime/continuationEntry.inline.hpp"
  68 #include "runtime/deoptimization.hpp"
  69 #include "runtime/escapeBarrier.hpp"
  70 #include "runtime/fieldDescriptor.inline.hpp"
  71 #include "runtime/frame.inline.hpp"
  72 #include "runtime/handles.inline.hpp"

 281 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 282 // which is called from the method fetch_unroll_info_helper below.
 283 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 284   // fetch_unroll_info() is called at the beginning of the deoptimization
 285   // handler. Note this fact before we start generating temporary frames
 286   // that can confuse an asynchronous stack walker. This counter is
 287   // decremented at the end of unpack_frames().
 288   current->inc_in_deopt_handler();
 289 
 290   if (exec_mode == Unpack_exception) {
 291     // When we get here, a callee has thrown an exception into a deoptimized
 292     // frame. That throw might have deferred stack watermark checking until
 293     // after unwinding. So we deal with such deferred requests here.
 294     StackWatermarkSet::after_unwind(current);
 295   }
 296 
 297   return fetch_unroll_info_helper(current, exec_mode);
 298 JRT_END
 299 
 300 #if COMPILER2_OR_JVMCI


















 301 // print information about reallocated objects
 302 static void print_objects(JavaThread* deoptee_thread,
 303                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 304   ResourceMark rm;
 305   stringStream st;  // change to logStream with logging
 306   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 307   fieldDescriptor fd;
 308 
 309   for (int i = 0; i < objects->length(); i++) {
 310     ObjectValue* sv = (ObjectValue*) objects->at(i);
 311     Handle obj = sv->value();
 312 
 313     if (obj.is_null()) {
 314       st.print_cr("     nullptr");
 315       continue;
 316     }
 317 
 318     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

 319 
 320     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 321     k->print_value_on(&st);
 322     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 323 
 324     if (Verbose && k != nullptr) {
 325       k->oop_print_on(obj(), &st);
 326     }
 327   }
 328   tty->print_raw(st.freeze());
 329 }
 330 
 331 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 332                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 333                                   bool& deoptimized_objects) {
 334   bool realloc_failures = false;
 335   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 336 
 337   JavaThread* deoptee_thread = chunk->at(0)->thread();
 338   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 339          "a frame can only be deoptimized by the owner thread");
 340 
 341   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 342 
 343   // The flag return_oop() indicates call sites which return oop
 344   // in compiled code. Such sites include java method calls,
 345   // runtime calls (for example, used to allocate new objects/arrays
 346   // on slow code path) and any other calls generated in compiled code.
 347   // It is not guaranteed that we can get such information here only
 348   // by analyzing bytecode in deoptimized frames. This is why this flag
 349   // is set during method compilation (see Compile::Process_OopMap_Node()).
 350   // If the previous frame was popped or if we are dispatching an exception,
 351   // we don't have an oop result.
 352   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 353   Handle return_value;











 354   if (save_oop_result) {
 355     // Reallocation may trigger GC. If deoptimization happened on return from
 356     // call which returns oop we need to save it since it is not in oopmap.
 357     oop result = deoptee.saved_oop_result(&map);
 358     assert(oopDesc::is_oop_or_null(result), "must be oop");
 359     return_value = Handle(thread, result);
 360     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 361     if (TraceDeoptimization) {
 362       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 363       tty->cr();
 364     }
 365   }
 366   if (objects != nullptr) {
 367     if (exec_mode == Deoptimization::Unpack_none) {
 368       assert(thread->thread_state() == _thread_in_vm, "assumption");
 369       JavaThread* THREAD = thread; // For exception macros.
 370       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 371       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));








 372       deoptimized_objects = true;
 373     } else {
 374       JavaThread* current = thread; // For JRT_BLOCK
 375       JRT_BLOCK
 376       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);








 377       JRT_END
 378     }
 379     guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 380     bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 381     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci);
 382     if (TraceDeoptimization) {
 383       print_objects(deoptee_thread, objects, realloc_failures);
 384     }
 385   }
 386   if (save_oop_result) {
 387     // Restore result.
 388     deoptee.set_saved_oop_result(&map, return_value());

 389   }
 390   return realloc_failures;
 391 }
 392 
 393 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 394                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 395   JavaThread* deoptee_thread = chunk->at(0)->thread();
 396   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 397   assert(thread == Thread::current(), "should be");
 398   HandleMark hm(thread);
 399 #ifndef PRODUCT
 400   bool first = true;
 401 #endif // !PRODUCT
 402   // Start locking from outermost/oldest frame
 403   for (int i = (chunk->length() - 1); i >= 0; i--) {
 404     compiledVFrame* cvf = chunk->at(i);
 405     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 406     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 407     if (monitors->is_nonempty()) {
 408       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 699   // its caller's stack by. If the caller is a compiled frame then
 700   // we pretend that the callee has no parameters so that the
 701   // extension counts for the full amount of locals and not just
 702   // locals-parms. This is because without a c2i adapter the parm
 703   // area as created by the compiled frame will not be usable by
 704   // the interpreter. (Depending on the calling convention there
 705   // may not even be enough space).
 706 
 707   // QQQ I'd rather see this pushed down into last_frame_adjust
 708   // and have it take the sender (aka caller).
 709 
 710   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 711     caller_adjustment = last_frame_adjust(0, callee_locals);
 712   } else if (callee_locals > callee_parameters) {
 713     // The caller frame may need extending to accommodate
 714     // non-parameter locals of the first unpacked interpreted frame.
 715     // Compute that adjustment.
 716     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 717   }
 718 
 719   // If the sender is deoptimized the we must retrieve the address of the handler
 720   // since the frame will "magically" show the original pc before the deopt
 721   // and we'd undo the deopt.
 722 
 723   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 724   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 725     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 726   }
 727 
 728   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 729 
 730 #if INCLUDE_JVMCI
 731   if (exceptionObject() != nullptr) {
 732     current->set_exception_oop(exceptionObject());
 733     exec_mode = Unpack_exception;
 734     assert(array->element(0)->rethrow_exception(), "must be");
 735   }
 736 #endif
 737 
 738   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 739     assert(current->has_pending_exception(), "should have thrown OOME");

1216        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1217        default:;
1218      }
1219    }
1220    return nullptr;
1221 }
1222 #endif // INCLUDE_JVMCI
1223 
1224 #if COMPILER2_OR_JVMCI
1225 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1226   Handle pending_exception(THREAD, thread->pending_exception());
1227   const char* exception_file = thread->exception_file();
1228   int exception_line = thread->exception_line();
1229   thread->clear_pending_exception();
1230 
1231   bool failures = false;
1232 
1233   for (int i = 0; i < objects->length(); i++) {
1234     assert(objects->at(i)->is_object(), "invalid debug information");
1235     ObjectValue* sv = (ObjectValue*) objects->at(i);
1236 
1237     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1238     oop obj = nullptr;









1239 

1240     bool cache_init_error = false;
1241     if (k->is_instance_klass()) {
1242 #if INCLUDE_JVMCI
1243       nmethod* nm = fr->cb()->as_nmethod_or_null();
1244       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1245         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1246         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1247         if (obj != nullptr) {
1248           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1249           abv->set_cached(true);
1250         } else if (cache_init_error) {
1251           // Results in an OOME which is valid (as opposed to a class initialization error)
1252           // and is fine for the rare case a cache initialization failing.
1253           failures = true;
1254         }
1255       }
1256 #endif // INCLUDE_JVMCI
1257 
1258       InstanceKlass* ik = InstanceKlass::cast(k);
1259       if (obj == nullptr && !cache_init_error) {
1260         InternalOOMEMark iom(THREAD);
1261         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1262           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1263         } else {
1264           obj = ik->allocate_instance(THREAD);
1265         }
1266       }




1267     } else if (k->is_typeArray_klass()) {
1268       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1269       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1270       int len = sv->field_size() / type2size[ak->element_type()];
1271       InternalOOMEMark iom(THREAD);
1272       obj = ak->allocate_instance(len, THREAD);
1273     } else if (k->is_objArray_klass()) {
1274       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1275       InternalOOMEMark iom(THREAD);
1276       obj = ak->allocate_instance(sv->field_size(), THREAD);
1277     }
1278 
1279     if (obj == nullptr) {
1280       failures = true;
1281     }
1282 
1283     assert(sv->value().is_null(), "redundant reallocation");
1284     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1285     CLEAR_PENDING_EXCEPTION;
1286     sv->set_value(obj);
1287   }
1288 
1289   if (failures) {
1290     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1291   } else if (pending_exception.not_null()) {
1292     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1293   }
1294 
1295   return failures;
1296 }
1297 















1298 #if INCLUDE_JVMCI
1299 /**
1300  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1301  * we need to somehow be able to recover the actual kind to be able to write the correct
1302  * amount of bytes.
1303  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1304  * the entries at index n + 1 to n + i are 'markers'.
1305  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1306  * expected form of the array would be:
1307  *
1308  * {b0, b1, b2, b3, INT, marker, b6, b7}
1309  *
1310  * Thus, in order to get back the size of the entry, we simply need to count the number
1311  * of marked entries
1312  *
1313  * @param virtualArray the virtualized byte array
1314  * @param i index of the virtual entry we are recovering
1315  * @return The number of bytes the entry spans
1316  */
1317 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1443       default:
1444         ShouldNotReachHere();
1445     }
1446     index++;
1447   }
1448 }
1449 
1450 // restore fields of an eliminated object array
1451 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1452   for (int i = 0; i < sv->field_size(); i++) {
1453     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1454     assert(value->type() == T_OBJECT, "object element expected");
1455     obj->obj_at_put(i, value->get_obj()());
1456   }
1457 }
1458 
1459 class ReassignedField {
1460 public:
1461   int _offset;
1462   BasicType _type;



1463 public:
1464   ReassignedField() {
1465     _offset = 0;
1466     _type = T_ILLEGAL;
1467   }
1468 };
1469 
1470 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1471 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1472   InstanceKlass* super = klass->super();
1473   if (super != nullptr) {
1474     get_reassigned_fields(super, fields, is_jvmci);
1475   }
1476   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1477     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1478       ReassignedField field;
1479       field._offset = fs.offset();
1480       field._type = Signature::basic_type(fs.signature());






1481       fields->append(field);
1482     }
1483   }
1484   return fields;
1485 }
1486 
1487 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1488 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci) {

1489   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1490   for (int i = 0; i < fields->length(); i++) {



















1491     ScopeValue* scope_field = sv->field_at(svIndex);
1492     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1493     int offset = fields->at(i)._offset;
1494     BasicType type = fields->at(i)._type;
1495     switch (type) {
1496       case T_OBJECT: case T_ARRAY:

1497         assert(value->type() == T_OBJECT, "Agreement.");
1498         obj->obj_field_put(offset, value->get_obj()());
1499         break;
1500 
1501       case T_INT: case T_FLOAT: { // 4 bytes.
1502         assert(value->type() == T_INT, "Agreement.");
1503         bool big_value = false;
1504         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1505           if (scope_field->is_location()) {
1506             Location::Type type = ((LocationValue*) scope_field)->location().type();
1507             if (type == Location::dbl || type == Location::lng) {
1508               big_value = true;
1509             }
1510           }
1511           if (scope_field->is_constant_int()) {
1512             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1513             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1514               big_value = true;
1515             }
1516           }

1547       case T_CHAR:
1548         assert(value->type() == T_INT, "Agreement.");
1549         obj->char_field_put(offset, (jchar)value->get_jint());
1550         break;
1551 
1552       case T_BYTE:
1553         assert(value->type() == T_INT, "Agreement.");
1554         obj->byte_field_put(offset, (jbyte)value->get_jint());
1555         break;
1556 
1557       case T_BOOLEAN:
1558         assert(value->type() == T_INT, "Agreement.");
1559         obj->bool_field_put(offset, (jboolean)value->get_jint());
1560         break;
1561 
1562       default:
1563         ShouldNotReachHere();
1564     }
1565     svIndex++;
1566   }

1567   return svIndex;
1568 }
1569 























1570 // restore fields of all eliminated objects and arrays
1571 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci) {
1572   for (int i = 0; i < objects->length(); i++) {
1573     assert(objects->at(i)->is_object(), "invalid debug information");
1574     ObjectValue* sv = (ObjectValue*) objects->at(i);
1575     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());


1576     Handle obj = sv->value();
1577     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1578 #ifndef PRODUCT
1579     if (PrintDeoptimizationDetails) {
1580       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1581     }
1582 #endif // !PRODUCT
1583 
1584     if (obj.is_null()) {
1585       continue;
1586     }
1587 
1588 #if INCLUDE_JVMCI
1589     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1590     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1591       continue;
1592     }
1593 #endif // INCLUDE_JVMCI
1594     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1595       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1596       ScopeValue* payload = sv->field_at(0);
1597       if (payload->is_location() &&
1598           payload->as_LocationValue()->location().type() == Location::vector) {
1599 #ifndef PRODUCT
1600         if (PrintDeoptimizationDetails) {
1601           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1602           if (Verbose) {
1603             Handle obj = sv->value();
1604             k->oop_print_on(obj(), tty);
1605           }
1606         }
1607 #endif // !PRODUCT
1608         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1609       }
1610       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1611       // which could be restored after vector object allocation.
1612     }
1613     if (k->is_instance_klass()) {
1614       InstanceKlass* ik = InstanceKlass::cast(k);
1615       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci);



1616     } else if (k->is_typeArray_klass()) {
1617       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1618       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1619     } else if (k->is_objArray_klass()) {
1620       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1621     }
1622   }
1623   // These objects may escape when we return to Interpreter after deoptimization.
1624   // We need barrier so that stores that initialize these objects can't be reordered
1625   // with subsequent stores that make these objects accessible by other threads.
1626   OrderAccess::storestore();
1627 }
1628 
1629 
1630 // relock objects for which synchronization was eliminated
1631 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1632                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1633   bool relocked_objects = false;
1634   for (int i = 0; i < monitors->length(); i++) {
1635     MonitorInfo* mon_info = monitors->at(i);
1636     if (mon_info->eliminated()) {
1637       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1638       relocked_objects = true;
1639       if (!mon_info->owner_is_scalar_replaced()) {

1777     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1778     nm->log_identity(xtty);
1779     xtty->end_head();
1780     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1781       xtty->begin_elem("jvms bci='%d'", sd->bci());
1782       xtty->method(sd->method());
1783       xtty->end_elem();
1784       if (sd->is_top())  break;
1785     }
1786     xtty->tail("deoptimized");
1787   }
1788 
1789   Continuation::notify_deopt(thread, fr.sp());
1790 
1791   // Patch the compiled method so that when execution returns to it we will
1792   // deopt the execution state and return to the interpreter.
1793   fr.deoptimize(thread);
1794 }
1795 
1796 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1797   // Deoptimize only if the frame comes from compile code.
1798   // Do not deoptimize the frame which is already patched
1799   // during the execution of the loops below.
1800   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1801     return;
1802   }
1803   ResourceMark rm;
1804   deoptimize_single_frame(thread, fr, reason);
1805 }
1806 
1807 #if INCLUDE_JVMCI
1808 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1809   // there is no exception handler for this pc => deoptimize
1810   nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1811 
1812   // Use Deoptimization::deoptimize for all of its side-effects:
1813   // gathering traps statistics, logging...
1814   // it also patches the return pc but we do not care about that
1815   // since we return a continuation to the deopt_blob below.
1816   JavaThread* thread = JavaThread::current();
1817   RegisterMap reg_map(thread,

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/inlineKlass.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/typeArrayOop.inline.hpp"
  61 #include "oops/verifyOopClosure.hpp"
  62 #include "prims/jvmtiDeferredUpdates.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "prims/jvmtiThreadState.hpp"
  65 #include "prims/methodHandles.hpp"
  66 #include "prims/vectorSupport.hpp"
  67 #include "runtime/atomicAccess.hpp"
  68 #include "runtime/basicLock.inline.hpp"
  69 #include "runtime/continuation.hpp"
  70 #include "runtime/continuationEntry.inline.hpp"
  71 #include "runtime/deoptimization.hpp"
  72 #include "runtime/escapeBarrier.hpp"
  73 #include "runtime/fieldDescriptor.inline.hpp"
  74 #include "runtime/frame.inline.hpp"
  75 #include "runtime/handles.inline.hpp"

 284 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 285 // which is called from the method fetch_unroll_info_helper below.
 286 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 287   // fetch_unroll_info() is called at the beginning of the deoptimization
 288   // handler. Note this fact before we start generating temporary frames
 289   // that can confuse an asynchronous stack walker. This counter is
 290   // decremented at the end of unpack_frames().
 291   current->inc_in_deopt_handler();
 292 
 293   if (exec_mode == Unpack_exception) {
 294     // When we get here, a callee has thrown an exception into a deoptimized
 295     // frame. That throw might have deferred stack watermark checking until
 296     // after unwinding. So we deal with such deferred requests here.
 297     StackWatermarkSet::after_unwind(current);
 298   }
 299 
 300   return fetch_unroll_info_helper(current, exec_mode);
 301 JRT_END
 302 
 303 #if COMPILER2_OR_JVMCI
 304 
 305 static Klass* get_refined_array_klass(Klass* k, frame* fr, RegisterMap* map, ObjectValue* sv, TRAPS) {
 306   // If it's an array, get the properties
 307   if (k->is_array_klass() && !k->is_typeArray_klass()) {
 308     assert(!k->is_refArray_klass() && !k->is_flatArray_klass(), "Unexpected refined klass");
 309     nmethod* nm = fr->cb()->as_nmethod_or_null();
 310     if (nm->is_compiled_by_c2()) {
 311       assert(sv->has_properties(), "Property information is missing");
 312       ArrayKlass::ArrayProperties props = static_cast<ArrayKlass::ArrayProperties>(StackValue::create_stack_value(fr, map, sv->properties())->get_jint());
 313       k = ObjArrayKlass::cast(k)->klass_with_properties(props, THREAD);
 314     } else {
 315       // TODO Graal needs to be fixed. Just go with the default properties for now
 316       k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayKlass::ArrayProperties::DEFAULT, THREAD);
 317     }
 318   }
 319   return k;
 320 }
 321 
 322 // print information about reallocated objects
 323 static void print_objects(JavaThread* deoptee_thread, frame* deoptee, RegisterMap* map,
 324                           GrowableArray<ScopeValue*>* objects, bool realloc_failures, TRAPS) {
 325   ResourceMark rm;
 326   stringStream st;  // change to logStream with logging
 327   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 328   fieldDescriptor fd;
 329 
 330   for (int i = 0; i < objects->length(); i++) {
 331     ObjectValue* sv = (ObjectValue*) objects->at(i);
 332     Handle obj = sv->value();
 333 
 334     if (obj.is_null()) {
 335       st.print_cr("     nullptr");
 336       continue;
 337     }
 338 
 339     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 340     k = get_refined_array_klass(k, deoptee, map, sv, THREAD);
 341 
 342     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 343     k->print_value_on(&st);
 344     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 345 
 346     if (Verbose && k != nullptr) {
 347       k->oop_print_on(obj(), &st);
 348     }
 349   }
 350   tty->print_raw(st.freeze());
 351 }
 352 
 353 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 354                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 355                                   bool& deoptimized_objects) {
 356   bool realloc_failures = false;
 357   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 358 
 359   JavaThread* deoptee_thread = chunk->at(0)->thread();
 360   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 361          "a frame can only be deoptimized by the owner thread");
 362 
 363   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 364 
 365   // The flag return_oop() indicates call sites which return oop
 366   // in compiled code. Such sites include java method calls,
 367   // runtime calls (for example, used to allocate new objects/arrays
 368   // on slow code path) and any other calls generated in compiled code.
 369   // It is not guaranteed that we can get such information here only
 370   // by analyzing bytecode in deoptimized frames. This is why this flag
 371   // is set during method compilation (see Compile::Process_OopMap_Node()).
 372   // If the previous frame was popped or if we are dispatching an exception,
 373   // we don't have an oop result.
 374   ScopeDesc* scope = chunk->at(0)->scope();
 375   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 376   // In case of the return of multiple values, we must take care
 377   // of all oop return values.
 378   GrowableArray<Handle> return_oops;
 379   InlineKlass* vk = nullptr;
 380   if (save_oop_result && scope->return_scalarized()) {
 381     vk = InlineKlass::returned_inline_klass(map);
 382     if (vk != nullptr) {
 383       vk->save_oop_fields(map, return_oops);
 384       save_oop_result = false;
 385     }
 386   }
 387   if (save_oop_result) {
 388     // Reallocation may trigger GC. If deoptimization happened on return from
 389     // call which returns oop we need to save it since it is not in oopmap.
 390     oop result = deoptee.saved_oop_result(&map);
 391     assert(oopDesc::is_oop_or_null(result), "must be oop");
 392     return_oops.push(Handle(thread, result));
 393     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 394     if (TraceDeoptimization) {
 395       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 396       tty->cr();
 397     }
 398   }
 399   if (objects != nullptr || vk != nullptr) {
 400     if (exec_mode == Deoptimization::Unpack_none) {
 401       assert(thread->thread_state() == _thread_in_vm, "assumption");
 402       JavaThread* THREAD = thread; // For exception macros.
 403       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 404       if (vk != nullptr) {
 405         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 406       }
 407       if (objects != nullptr) {
 408         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 409         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 410         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 411         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 412       }
 413       deoptimized_objects = true;
 414     } else {
 415       JavaThread* current = thread; // For JRT_BLOCK
 416       JRT_BLOCK
 417       if (vk != nullptr) {
 418         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 419       }
 420       if (objects != nullptr) {
 421         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 422         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 423         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 424         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 425       }
 426       JRT_END
 427     }
 428     if (TraceDeoptimization && objects != nullptr) {
 429       print_objects(deoptee_thread, &deoptee, &map, objects, realloc_failures, thread);



 430     }
 431   }
 432   if (save_oop_result || vk != nullptr) {
 433     // Restore result.
 434     assert(return_oops.length() == 1, "no inline type");
 435     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 436   }
 437   return realloc_failures;
 438 }
 439 
 440 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 441                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 442   JavaThread* deoptee_thread = chunk->at(0)->thread();
 443   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 444   assert(thread == Thread::current(), "should be");
 445   HandleMark hm(thread);
 446 #ifndef PRODUCT
 447   bool first = true;
 448 #endif // !PRODUCT
 449   // Start locking from outermost/oldest frame
 450   for (int i = (chunk->length() - 1); i >= 0; i--) {
 451     compiledVFrame* cvf = chunk->at(i);
 452     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 453     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 454     if (monitors->is_nonempty()) {
 455       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 746   // its caller's stack by. If the caller is a compiled frame then
 747   // we pretend that the callee has no parameters so that the
 748   // extension counts for the full amount of locals and not just
 749   // locals-parms. This is because without a c2i adapter the parm
 750   // area as created by the compiled frame will not be usable by
 751   // the interpreter. (Depending on the calling convention there
 752   // may not even be enough space).
 753 
 754   // QQQ I'd rather see this pushed down into last_frame_adjust
 755   // and have it take the sender (aka caller).
 756 
 757   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 758     caller_adjustment = last_frame_adjust(0, callee_locals);
 759   } else if (callee_locals > callee_parameters) {
 760     // The caller frame may need extending to accommodate
 761     // non-parameter locals of the first unpacked interpreted frame.
 762     // Compute that adjustment.
 763     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 764   }
 765 
 766   // If the sender is deoptimized we must retrieve the address of the handler
 767   // since the frame will "magically" show the original pc before the deopt
 768   // and we'd undo the deopt.
 769 
 770   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 771   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 772     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 773   }
 774 
 775   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 776 
 777 #if INCLUDE_JVMCI
 778   if (exceptionObject() != nullptr) {
 779     current->set_exception_oop(exceptionObject());
 780     exec_mode = Unpack_exception;
 781     assert(array->element(0)->rethrow_exception(), "must be");
 782   }
 783 #endif
 784 
 785   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 786     assert(current->has_pending_exception(), "should have thrown OOME");

1263        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1264        default:;
1265      }
1266    }
1267    return nullptr;
1268 }
1269 #endif // INCLUDE_JVMCI
1270 
1271 #if COMPILER2_OR_JVMCI
1272 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1273   Handle pending_exception(THREAD, thread->pending_exception());
1274   const char* exception_file = thread->exception_file();
1275   int exception_line = thread->exception_line();
1276   thread->clear_pending_exception();
1277 
1278   bool failures = false;
1279 
1280   for (int i = 0; i < objects->length(); i++) {
1281     assert(objects->at(i)->is_object(), "invalid debug information");
1282     ObjectValue* sv = (ObjectValue*) objects->at(i);

1283     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1284     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1285 
1286     // Check if the object may be null and has an additional null_marker input that needs
1287     // to be checked before using the field values. Skip re-allocation if it is null.
1288     if (k->is_inline_klass() && sv->has_properties()) {
1289       jint null_marker = StackValue::create_stack_value(fr, reg_map, sv->properties())->get_jint();
1290       if (null_marker == 0) {
1291         continue;
1292       }
1293     }
1294 
1295     oop obj = nullptr;
1296     bool cache_init_error = false;
1297     if (k->is_instance_klass()) {
1298 #if INCLUDE_JVMCI
1299       nmethod* nm = fr->cb()->as_nmethod_or_null();
1300       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1301         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1302         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1303         if (obj != nullptr) {
1304           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1305           abv->set_cached(true);
1306         } else if (cache_init_error) {
1307           // Results in an OOME which is valid (as opposed to a class initialization error)
1308           // and is fine for the rare case a cache initialization failing.
1309           failures = true;
1310         }
1311       }
1312 #endif // INCLUDE_JVMCI
1313 
1314       InstanceKlass* ik = InstanceKlass::cast(k);
1315       if (obj == nullptr && !cache_init_error) {
1316         InternalOOMEMark iom(THREAD);
1317         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1318           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1319         } else {
1320           obj = ik->allocate_instance(THREAD);
1321         }
1322       }
1323     } else if (k->is_flatArray_klass()) {
1324       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1325       // Inline type array must be zeroed because not all memory is reassigned
1326       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1327     } else if (k->is_typeArray_klass()) {
1328       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1329       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1330       int len = sv->field_size() / type2size[ak->element_type()];
1331       InternalOOMEMark iom(THREAD);
1332       obj = ak->allocate_instance(len, THREAD);
1333     } else if (k->is_refArray_klass()) {
1334       RefArrayKlass* ak = RefArrayKlass::cast(k);
1335       InternalOOMEMark iom(THREAD);
1336       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1337     }
1338 
1339     if (obj == nullptr) {
1340       failures = true;
1341     }
1342 
1343     assert(sv->value().is_null(), "redundant reallocation");
1344     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1345     CLEAR_PENDING_EXCEPTION;
1346     sv->set_value(obj);
1347   }
1348 
1349   if (failures) {
1350     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1351   } else if (pending_exception.not_null()) {
1352     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1353   }
1354 
1355   return failures;
1356 }
1357 
1358 // We're deoptimizing at the return of a call, inline type fields are
1359 // in registers. When we go back to the interpreter, it will expect a
1360 // reference to an inline type instance. Allocate and initialize it from
1361 // the register values here.
1362 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1363   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1364   if (new_vt == nullptr) {
1365     CLEAR_PENDING_EXCEPTION;
1366     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1367   }
1368   return_oops.clear();
1369   return_oops.push(Handle(THREAD, new_vt));
1370   return false;
1371 }
1372 
1373 #if INCLUDE_JVMCI
1374 /**
1375  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1376  * we need to somehow be able to recover the actual kind to be able to write the correct
1377  * amount of bytes.
1378  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1379  * the entries at index n + 1 to n + i are 'markers'.
1380  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1381  * expected form of the array would be:
1382  *
1383  * {b0, b1, b2, b3, INT, marker, b6, b7}
1384  *
1385  * Thus, in order to get back the size of the entry, we simply need to count the number
1386  * of marked entries
1387  *
1388  * @param virtualArray the virtualized byte array
1389  * @param i index of the virtual entry we are recovering
1390  * @return The number of bytes the entry spans
1391  */
1392 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1518       default:
1519         ShouldNotReachHere();
1520     }
1521     index++;
1522   }
1523 }
1524 
1525 // restore fields of an eliminated object array
1526 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1527   for (int i = 0; i < sv->field_size(); i++) {
1528     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1529     assert(value->type() == T_OBJECT, "object element expected");
1530     obj->obj_at_put(i, value->get_obj()());
1531   }
1532 }
1533 
1534 class ReassignedField {
1535 public:
1536   int _offset;
1537   BasicType _type;
1538   InstanceKlass* _klass;
1539   bool _is_flat;
1540   bool _is_null_free;
1541 public:
1542   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }



1543 };
1544 
1545 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1546 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1547   InstanceKlass* super = klass->super();
1548   if (super != nullptr) {
1549     get_reassigned_fields(super, fields, is_jvmci);
1550   }
1551   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1552     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1553       ReassignedField field;
1554       field._offset = fs.offset();
1555       field._type = Signature::basic_type(fs.signature());
1556       if (fs.is_flat()) {
1557         field._is_flat = true;
1558         field._is_null_free = fs.is_null_free_inline_type();
1559         // Resolve klass of flat inline type field
1560         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1561       }
1562       fields->append(field);
1563     }
1564   }
1565   return fields;
1566 }
1567 
1568 // Restore fields of an eliminated instance object employing the same field order used by the
1569 // compiler when it scalarizes an object at safepoints.
1570 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, TRAPS) {
1571   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1572   for (int i = 0; i < fields->length(); i++) {
1573     BasicType type = fields->at(i)._type;
1574     int offset = base_offset + fields->at(i)._offset;
1575     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1576     if (fields->at(i)._is_flat) {
1577       // Recursively re-assign flat inline type fields
1578       InstanceKlass* vk = fields->at(i)._klass;
1579       assert(vk != nullptr, "must be resolved");
1580       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1581       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, CHECK_0);
1582       if (!fields->at(i)._is_null_free) {
1583         ScopeValue* scope_field = sv->field_at(svIndex);
1584         StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1585         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1586         obj->bool_field_put(nm_offset, value->get_jint() & 1);
1587         svIndex++;
1588       }
1589       continue; // Continue because we don't need to increment svIndex
1590     }
1591 
1592     ScopeValue* scope_field = sv->field_at(svIndex);
1593     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1594     switch (type) {
1595       case T_OBJECT:
1596       case T_ARRAY:
1597         assert(value->type() == T_OBJECT, "Agreement.");
1598         obj->obj_field_put(offset, value->get_obj()());
1599         break;
1600 
1601       case T_INT: case T_FLOAT: { // 4 bytes.
1602         assert(value->type() == T_INT, "Agreement.");
1603         bool big_value = false;
1604         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1605           if (scope_field->is_location()) {
1606             Location::Type type = ((LocationValue*) scope_field)->location().type();
1607             if (type == Location::dbl || type == Location::lng) {
1608               big_value = true;
1609             }
1610           }
1611           if (scope_field->is_constant_int()) {
1612             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1613             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1614               big_value = true;
1615             }
1616           }

1647       case T_CHAR:
1648         assert(value->type() == T_INT, "Agreement.");
1649         obj->char_field_put(offset, (jchar)value->get_jint());
1650         break;
1651 
1652       case T_BYTE:
1653         assert(value->type() == T_INT, "Agreement.");
1654         obj->byte_field_put(offset, (jbyte)value->get_jint());
1655         break;
1656 
1657       case T_BOOLEAN:
1658         assert(value->type() == T_INT, "Agreement.");
1659         obj->bool_field_put(offset, (jboolean)value->get_jint());
1660         break;
1661 
1662       default:
1663         ShouldNotReachHere();
1664     }
1665     svIndex++;
1666   }
1667 
1668   return svIndex;
1669 }
1670 
1671 // restore fields of an eliminated inline type array
1672 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1673   InlineKlass* vk = vak->element_klass();
1674   assert(vk->maybe_flat_in_array(), "should only be used for flat inline type arrays");
1675   // Adjust offset to omit oop header
1676   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - vk->payload_offset();
1677   // Initialize all elements of the flat inline type array
1678   for (int i = 0; i < sv->field_size(); i++) {
1679     ObjectValue* val = sv->field_at(i)->as_ObjectValue();
1680     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1681     reassign_fields_by_klass(vk, fr, reg_map, val, 0, (oop)obj, is_jvmci, offset, CHECK);
1682     if (!obj->is_null_free_array()) {
1683       jboolean null_marker_value;
1684       if (val->has_properties()) {
1685         null_marker_value = StackValue::create_stack_value(fr, reg_map, val->properties())->get_jint() & 1;
1686       } else {
1687         null_marker_value = 1;
1688       }
1689       obj->bool_field_put(offset + vk->null_marker_offset(), null_marker_value);
1690     }
1691   }
1692 }
1693 
1694 // restore fields of all eliminated objects and arrays
1695 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1696   for (int i = 0; i < objects->length(); i++) {
1697     assert(objects->at(i)->is_object(), "invalid debug information");
1698     ObjectValue* sv = (ObjectValue*) objects->at(i);
1699     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1700     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1701 
1702     Handle obj = sv->value();
1703     assert(obj.not_null() || realloc_failures || sv->has_properties(), "reallocation was missed");
1704 #ifndef PRODUCT
1705     if (PrintDeoptimizationDetails) {
1706       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1707     }
1708 #endif // !PRODUCT
1709 
1710     if (obj.is_null()) {
1711       continue;
1712     }
1713 
1714 #if INCLUDE_JVMCI
1715     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1716     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1717       continue;
1718     }
1719 #endif // INCLUDE_JVMCI
1720     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1721       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1722       ScopeValue* payload = sv->field_at(0);
1723       if (payload->is_location() &&
1724           payload->as_LocationValue()->location().type() == Location::vector) {
1725 #ifndef PRODUCT
1726         if (PrintDeoptimizationDetails) {
1727           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1728           if (Verbose) {
1729             Handle obj = sv->value();
1730             k->oop_print_on(obj(), tty);
1731           }
1732         }
1733 #endif // !PRODUCT
1734         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1735       }
1736       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1737       // which could be restored after vector object allocation.
1738     }
1739     if (k->is_instance_klass()) {
1740       InstanceKlass* ik = InstanceKlass::cast(k);
1741       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, CHECK);
1742     } else if (k->is_flatArray_klass()) {
1743       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1744       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1745     } else if (k->is_typeArray_klass()) {
1746       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1747       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1748     } else if (k->is_refArray_klass()) {
1749       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1750     }
1751   }
1752   // These objects may escape when we return to Interpreter after deoptimization.
1753   // We need barrier so that stores that initialize these objects can't be reordered
1754   // with subsequent stores that make these objects accessible by other threads.
1755   OrderAccess::storestore();
1756 }
1757 
1758 
1759 // relock objects for which synchronization was eliminated
1760 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1761                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1762   bool relocked_objects = false;
1763   for (int i = 0; i < monitors->length(); i++) {
1764     MonitorInfo* mon_info = monitors->at(i);
1765     if (mon_info->eliminated()) {
1766       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1767       relocked_objects = true;
1768       if (!mon_info->owner_is_scalar_replaced()) {

1906     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1907     nm->log_identity(xtty);
1908     xtty->end_head();
1909     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1910       xtty->begin_elem("jvms bci='%d'", sd->bci());
1911       xtty->method(sd->method());
1912       xtty->end_elem();
1913       if (sd->is_top())  break;
1914     }
1915     xtty->tail("deoptimized");
1916   }
1917 
1918   Continuation::notify_deopt(thread, fr.sp());
1919 
1920   // Patch the compiled method so that when execution returns to it we will
1921   // deopt the execution state and return to the interpreter.
1922   fr.deoptimize(thread);
1923 }
1924 
1925 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1926   // Deoptimize only if the frame comes from compiled code.
1927   // Do not deoptimize the frame which is already patched
1928   // during the execution of the loops below.
1929   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1930     return;
1931   }
1932   ResourceMark rm;
1933   deoptimize_single_frame(thread, fr, reason);
1934 }
1935 
1936 #if INCLUDE_JVMCI
1937 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1938   // there is no exception handler for this pc => deoptimize
1939   nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1940 
1941   // Use Deoptimization::deoptimize for all of its side-effects:
1942   // gathering traps statistics, logging...
1943   // it also patches the return pc but we do not care about that
1944   // since we return a continuation to the deopt_blob below.
1945   JavaThread* thread = JavaThread::current();
1946   RegisterMap reg_map(thread,
< prev index next >