< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/bytecode.inline.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"


  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/method.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"

  58 #include "oops/typeArrayOop.inline.hpp"
  59 #include "oops/verifyOopClosure.hpp"
  60 #include "prims/jvmtiDeferredUpdates.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "prims/jvmtiThreadState.hpp"
  63 #include "prims/methodHandles.hpp"
  64 #include "prims/vectorSupport.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/basicLock.inline.hpp"
  67 #include "runtime/continuation.hpp"
  68 #include "runtime/continuationEntry.inline.hpp"
  69 #include "runtime/deoptimization.hpp"
  70 #include "runtime/escapeBarrier.hpp"
  71 #include "runtime/fieldDescriptor.hpp"
  72 #include "runtime/fieldDescriptor.inline.hpp"
  73 #include "runtime/frame.inline.hpp"
  74 #include "runtime/handles.inline.hpp"
  75 #include "runtime/interfaceSupport.inline.hpp"
  76 #include "runtime/javaThread.hpp"
  77 #include "runtime/jniHandles.inline.hpp"

 283 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 284 // which is called from the method fetch_unroll_info_helper below.
 285 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 286   // fetch_unroll_info() is called at the beginning of the deoptimization
 287   // handler. Note this fact before we start generating temporary frames
 288   // that can confuse an asynchronous stack walker. This counter is
 289   // decremented at the end of unpack_frames().
 290   current->inc_in_deopt_handler();
 291 
 292   if (exec_mode == Unpack_exception) {
 293     // When we get here, a callee has thrown an exception into a deoptimized
 294     // frame. That throw might have deferred stack watermark checking until
 295     // after unwinding. So we deal with such deferred requests here.
 296     StackWatermarkSet::after_unwind(current);
 297   }
 298 
 299   return fetch_unroll_info_helper(current, exec_mode);
 300 JRT_END
 301 
 302 #if COMPILER2_OR_JVMCI


















 303 // print information about reallocated objects
 304 static void print_objects(JavaThread* deoptee_thread,
 305                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 306   ResourceMark rm;
 307   stringStream st;  // change to logStream with logging
 308   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 309   fieldDescriptor fd;
 310 
 311   for (int i = 0; i < objects->length(); i++) {
 312     ObjectValue* sv = (ObjectValue*) objects->at(i);
 313     Handle obj = sv->value();
 314 
 315     if (obj.is_null()) {
 316       st.print_cr("     nullptr");
 317       continue;
 318     }
 319 
 320     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

 321 
 322     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 323     k->print_value_on(&st);
 324     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 325 
 326     if (Verbose && k != nullptr) {
 327       k->oop_print_on(obj(), &st);
 328     }
 329   }
 330   tty->print_raw(st.freeze());
 331 }
 332 
 333 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 334                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 335                                   bool& deoptimized_objects) {
 336   bool realloc_failures = false;
 337   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 338 
 339   JavaThread* deoptee_thread = chunk->at(0)->thread();
 340   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 341          "a frame can only be deoptimized by the owner thread");
 342 
 343   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 344 
 345   // The flag return_oop() indicates call sites which return oop
 346   // in compiled code. Such sites include java method calls,
 347   // runtime calls (for example, used to allocate new objects/arrays
 348   // on slow code path) and any other calls generated in compiled code.
 349   // It is not guaranteed that we can get such information here only
 350   // by analyzing bytecode in deoptimized frames. This is why this flag
 351   // is set during method compilation (see Compile::Process_OopMap_Node()).
 352   // If the previous frame was popped or if we are dispatching an exception,
 353   // we don't have an oop result.
 354   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 355   Handle return_value;











 356   if (save_oop_result) {
 357     // Reallocation may trigger GC. If deoptimization happened on return from
 358     // call which returns oop we need to save it since it is not in oopmap.
 359     oop result = deoptee.saved_oop_result(&map);
 360     assert(oopDesc::is_oop_or_null(result), "must be oop");
 361     return_value = Handle(thread, result);
 362     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 363     if (TraceDeoptimization) {
 364       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 365       tty->cr();
 366     }
 367   }
 368   if (objects != nullptr) {
 369     if (exec_mode == Deoptimization::Unpack_none) {
 370       assert(thread->thread_state() == _thread_in_vm, "assumption");
 371       JavaThread* THREAD = thread; // For exception macros.
 372       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 373       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));








 374       deoptimized_objects = true;
 375     } else {
 376       JavaThread* current = thread; // For JRT_BLOCK
 377       JRT_BLOCK
 378       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);








 379       JRT_END
 380     }
 381     guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 382     bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 383     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci);
 384     if (TraceDeoptimization) {
 385       print_objects(deoptee_thread, objects, realloc_failures);
 386     }
 387   }
 388   if (save_oop_result) {
 389     // Restore result.
 390     deoptee.set_saved_oop_result(&map, return_value());

 391   }
 392   return realloc_failures;
 393 }
 394 
 395 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 396                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 397   JavaThread* deoptee_thread = chunk->at(0)->thread();
 398   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 399   assert(thread == Thread::current(), "should be");
 400   HandleMark hm(thread);
 401 #ifndef PRODUCT
 402   bool first = true;
 403 #endif // !PRODUCT
 404   // Start locking from outermost/oldest frame
 405   for (int i = (chunk->length() - 1); i >= 0; i--) {
 406     compiledVFrame* cvf = chunk->at(i);
 407     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 408     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 409     if (monitors->is_nonempty()) {
 410       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 706   // its caller's stack by. If the caller is a compiled frame then
 707   // we pretend that the callee has no parameters so that the
 708   // extension counts for the full amount of locals and not just
 709   // locals-parms. This is because without a c2i adapter the parm
 710   // area as created by the compiled frame will not be usable by
 711   // the interpreter. (Depending on the calling convention there
 712   // may not even be enough space).
 713 
 714   // QQQ I'd rather see this pushed down into last_frame_adjust
 715   // and have it take the sender (aka caller).
 716 
 717   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 718     caller_adjustment = last_frame_adjust(0, callee_locals);
 719   } else if (callee_locals > callee_parameters) {
 720     // The caller frame may need extending to accommodate
 721     // non-parameter locals of the first unpacked interpreted frame.
 722     // Compute that adjustment.
 723     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 724   }
 725 
 726   // If the sender is deoptimized the we must retrieve the address of the handler
 727   // since the frame will "magically" show the original pc before the deopt
 728   // and we'd undo the deopt.
 729 
 730   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 731   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 732     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 733   }
 734 
 735   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 736 
 737 #if INCLUDE_JVMCI
 738   if (exceptionObject() != nullptr) {
 739     current->set_exception_oop(exceptionObject());
 740     exec_mode = Unpack_exception;
 741     assert(array->element(0)->rethrow_exception(), "must be");
 742   }
 743 #endif
 744 
 745   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 746     assert(current->has_pending_exception(), "should have thrown OOME");

1223        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1224        default:;
1225      }
1226    }
1227    return nullptr;
1228 }
1229 #endif // INCLUDE_JVMCI
1230 
1231 #if COMPILER2_OR_JVMCI
1232 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1233   Handle pending_exception(THREAD, thread->pending_exception());
1234   const char* exception_file = thread->exception_file();
1235   int exception_line = thread->exception_line();
1236   thread->clear_pending_exception();
1237 
1238   bool failures = false;
1239 
1240   for (int i = 0; i < objects->length(); i++) {
1241     assert(objects->at(i)->is_object(), "invalid debug information");
1242     ObjectValue* sv = (ObjectValue*) objects->at(i);
1243 
1244     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1245     oop obj = nullptr;









1246 

1247     bool cache_init_error = false;
1248     if (k->is_instance_klass()) {
1249 #if INCLUDE_JVMCI
1250       nmethod* nm = fr->cb()->as_nmethod_or_null();
1251       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1252         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1253         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1254         if (obj != nullptr) {
1255           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1256           abv->set_cached(true);
1257         } else if (cache_init_error) {
1258           // Results in an OOME which is valid (as opposed to a class initialization error)
1259           // and is fine for the rare case a cache initialization failing.
1260           failures = true;
1261         }
1262       }
1263 #endif // INCLUDE_JVMCI
1264 
1265       InstanceKlass* ik = InstanceKlass::cast(k);
1266       if (obj == nullptr && !cache_init_error) {
1267         InternalOOMEMark iom(THREAD);
1268         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1269           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1270         } else {
1271           obj = ik->allocate_instance(THREAD);
1272         }
1273       }




1274     } else if (k->is_typeArray_klass()) {
1275       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1276       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1277       int len = sv->field_size() / type2size[ak->element_type()];
1278       InternalOOMEMark iom(THREAD);
1279       obj = ak->allocate_instance(len, THREAD);
1280     } else if (k->is_objArray_klass()) {
1281       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1282       InternalOOMEMark iom(THREAD);
1283       obj = ak->allocate_instance(sv->field_size(), THREAD);
1284     }
1285 
1286     if (obj == nullptr) {
1287       failures = true;
1288     }
1289 
1290     assert(sv->value().is_null(), "redundant reallocation");
1291     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1292     CLEAR_PENDING_EXCEPTION;
1293     sv->set_value(obj);
1294   }
1295 
1296   if (failures) {
1297     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1298   } else if (pending_exception.not_null()) {
1299     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1300   }
1301 
1302   return failures;
1303 }
1304 















1305 #if INCLUDE_JVMCI
1306 /**
1307  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1308  * we need to somehow be able to recover the actual kind to be able to write the correct
1309  * amount of bytes.
1310  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1311  * the entries at index n + 1 to n + i are 'markers'.
1312  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1313  * expected form of the array would be:
1314  *
1315  * {b0, b1, b2, b3, INT, marker, b6, b7}
1316  *
1317  * Thus, in order to get back the size of the entry, we simply need to count the number
1318  * of marked entries
1319  *
1320  * @param virtualArray the virtualized byte array
1321  * @param i index of the virtual entry we are recovering
1322  * @return The number of bytes the entry spans
1323  */
1324 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1450       default:
1451         ShouldNotReachHere();
1452     }
1453     index++;
1454   }
1455 }
1456 
1457 // restore fields of an eliminated object array
1458 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1459   for (int i = 0; i < sv->field_size(); i++) {
1460     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1461     assert(value->type() == T_OBJECT, "object element expected");
1462     obj->obj_at_put(i, value->get_obj()());
1463   }
1464 }
1465 
1466 class ReassignedField {
1467 public:
1468   int _offset;
1469   BasicType _type;



1470 public:
1471   ReassignedField() {
1472     _offset = 0;
1473     _type = T_ILLEGAL;
1474   }
1475 };
1476 
1477 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1478 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1479   InstanceKlass* super = klass->superklass();
1480   if (super != nullptr) {
1481     get_reassigned_fields(super, fields, is_jvmci);
1482   }
1483   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1484     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1485       ReassignedField field;
1486       field._offset = fs.offset();
1487       field._type = Signature::basic_type(fs.signature());






1488       fields->append(field);
1489     }
1490   }
1491   return fields;
1492 }
1493 
1494 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1495 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci) {

1496   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1497   for (int i = 0; i < fields->length(); i++) {



















1498     ScopeValue* scope_field = sv->field_at(svIndex);
1499     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1500     int offset = fields->at(i)._offset;
1501     BasicType type = fields->at(i)._type;
1502     switch (type) {
1503       case T_OBJECT: case T_ARRAY:

1504         assert(value->type() == T_OBJECT, "Agreement.");
1505         obj->obj_field_put(offset, value->get_obj()());
1506         break;
1507 
1508       case T_INT: case T_FLOAT: { // 4 bytes.
1509         assert(value->type() == T_INT, "Agreement.");
1510         bool big_value = false;
1511         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1512           if (scope_field->is_location()) {
1513             Location::Type type = ((LocationValue*) scope_field)->location().type();
1514             if (type == Location::dbl || type == Location::lng) {
1515               big_value = true;
1516             }
1517           }
1518           if (scope_field->is_constant_int()) {
1519             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1520             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1521               big_value = true;
1522             }
1523           }

1554       case T_CHAR:
1555         assert(value->type() == T_INT, "Agreement.");
1556         obj->char_field_put(offset, (jchar)value->get_jint());
1557         break;
1558 
1559       case T_BYTE:
1560         assert(value->type() == T_INT, "Agreement.");
1561         obj->byte_field_put(offset, (jbyte)value->get_jint());
1562         break;
1563 
1564       case T_BOOLEAN:
1565         assert(value->type() == T_INT, "Agreement.");
1566         obj->bool_field_put(offset, (jboolean)value->get_jint());
1567         break;
1568 
1569       default:
1570         ShouldNotReachHere();
1571     }
1572     svIndex++;
1573   }

1574   return svIndex;
1575 }
1576 














1577 // restore fields of all eliminated objects and arrays
1578 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci) {
1579   for (int i = 0; i < objects->length(); i++) {
1580     assert(objects->at(i)->is_object(), "invalid debug information");
1581     ObjectValue* sv = (ObjectValue*) objects->at(i);
1582     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());


1583     Handle obj = sv->value();
1584     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1585 #ifndef PRODUCT
1586     if (PrintDeoptimizationDetails) {
1587       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1588     }
1589 #endif // !PRODUCT
1590 
1591     if (obj.is_null()) {
1592       continue;
1593     }
1594 
1595 #if INCLUDE_JVMCI
1596     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1597     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1598       continue;
1599     }
1600 #endif // INCLUDE_JVMCI
1601     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1602       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1603       ScopeValue* payload = sv->field_at(0);
1604       if (payload->is_location() &&
1605           payload->as_LocationValue()->location().type() == Location::vector) {
1606 #ifndef PRODUCT
1607         if (PrintDeoptimizationDetails) {
1608           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1609           if (Verbose) {
1610             Handle obj = sv->value();
1611             k->oop_print_on(obj(), tty);
1612           }
1613         }
1614 #endif // !PRODUCT
1615         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1616       }
1617       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1618       // which could be restored after vector object allocation.
1619     }
1620     if (k->is_instance_klass()) {
1621       InstanceKlass* ik = InstanceKlass::cast(k);
1622       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci);



1623     } else if (k->is_typeArray_klass()) {
1624       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1625       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1626     } else if (k->is_objArray_klass()) {
1627       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1628     }
1629   }
1630   // These objects may escape when we return to Interpreter after deoptimization.
1631   // We need barrier so that stores that initialize these objects can't be reordered
1632   // with subsequent stores that make these objects accessible by other threads.
1633   OrderAccess::storestore();
1634 }
1635 
1636 
1637 // relock objects for which synchronization was eliminated
1638 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1639                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1640   bool relocked_objects = false;
1641   for (int i = 0; i < monitors->length(); i++) {
1642     MonitorInfo* mon_info = monitors->at(i);
1643     if (mon_info->eliminated()) {
1644       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1645       relocked_objects = true;
1646       if (!mon_info->owner_is_scalar_replaced()) {

1798     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1799     nm->log_identity(xtty);
1800     xtty->end_head();
1801     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1802       xtty->begin_elem("jvms bci='%d'", sd->bci());
1803       xtty->method(sd->method());
1804       xtty->end_elem();
1805       if (sd->is_top())  break;
1806     }
1807     xtty->tail("deoptimized");
1808   }
1809 
1810   Continuation::notify_deopt(thread, fr.sp());
1811 
1812   // Patch the compiled method so that when execution returns to it we will
1813   // deopt the execution state and return to the interpreter.
1814   fr.deoptimize(thread);
1815 }
1816 
1817 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1818   // Deoptimize only if the frame comes from compile code.
1819   // Do not deoptimize the frame which is already patched
1820   // during the execution of the loops below.
1821   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1822     return;
1823   }
1824   ResourceMark rm;
1825   deoptimize_single_frame(thread, fr, reason);
1826 }
1827 
1828 #if INCLUDE_JVMCI
1829 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1830   // there is no exception handler for this pc => deoptimize
1831   nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1832 
1833   // Use Deoptimization::deoptimize for all of its side-effects:
1834   // gathering traps statistics, logging...
1835   // it also patches the return pc but we do not care about that
1836   // since we return a continuation to the deopt_blob below.
1837   JavaThread* thread = JavaThread::current();
1838   RegisterMap reg_map(thread,

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/bytecode.inline.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/fieldStreams.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/inlineKlass.inline.hpp"
  61 #include "oops/typeArrayOop.inline.hpp"
  62 #include "oops/verifyOopClosure.hpp"
  63 #include "prims/jvmtiDeferredUpdates.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "prims/jvmtiThreadState.hpp"
  66 #include "prims/methodHandles.hpp"
  67 #include "prims/vectorSupport.hpp"
  68 #include "runtime/atomic.hpp"
  69 #include "runtime/basicLock.inline.hpp"
  70 #include "runtime/continuation.hpp"
  71 #include "runtime/continuationEntry.inline.hpp"
  72 #include "runtime/deoptimization.hpp"
  73 #include "runtime/escapeBarrier.hpp"
  74 #include "runtime/fieldDescriptor.hpp"
  75 #include "runtime/fieldDescriptor.inline.hpp"
  76 #include "runtime/frame.inline.hpp"
  77 #include "runtime/handles.inline.hpp"
  78 #include "runtime/interfaceSupport.inline.hpp"
  79 #include "runtime/javaThread.hpp"
  80 #include "runtime/jniHandles.inline.hpp"

 286 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 287 // which is called from the method fetch_unroll_info_helper below.
 288 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 289   // fetch_unroll_info() is called at the beginning of the deoptimization
 290   // handler. Note this fact before we start generating temporary frames
 291   // that can confuse an asynchronous stack walker. This counter is
 292   // decremented at the end of unpack_frames().
 293   current->inc_in_deopt_handler();
 294 
 295   if (exec_mode == Unpack_exception) {
 296     // When we get here, a callee has thrown an exception into a deoptimized
 297     // frame. That throw might have deferred stack watermark checking until
 298     // after unwinding. So we deal with such deferred requests here.
 299     StackWatermarkSet::after_unwind(current);
 300   }
 301 
 302   return fetch_unroll_info_helper(current, exec_mode);
 303 JRT_END
 304 
 305 #if COMPILER2_OR_JVMCI
 306 
 307 static Klass* get_refined_array_klass(Klass* k, frame* fr, RegisterMap* map, ObjectValue* sv, TRAPS) {
 308   // If it's an array, get the properties
 309   if (k->is_array_klass() && !k->is_typeArray_klass()) {
 310     assert(!k->is_refArray_klass() && !k->is_flatArray_klass(), "Unexpected refined klass");
 311     nmethod* nm = fr->cb()->as_nmethod_or_null();
 312     if (nm->is_compiled_by_c2()) {
 313       assert(sv->has_properties(), "Property information is missing");
 314       ArrayKlass::ArrayProperties props = static_cast<ArrayKlass::ArrayProperties>(StackValue::create_stack_value(fr, map, sv->properties())->get_jint());
 315       k = ObjArrayKlass::cast(k)->klass_with_properties(props, THREAD);
 316     } else {
 317       // TODO Graal needs to be fixed. Just go with the default properties for now
 318       k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayKlass::ArrayProperties::DEFAULT, THREAD);
 319     }
 320   }
 321   return k;
 322 }
 323 
 324 // print information about reallocated objects
 325 static void print_objects(JavaThread* deoptee_thread, frame* deoptee, RegisterMap* map,
 326                           GrowableArray<ScopeValue*>* objects, bool realloc_failures, TRAPS) {
 327   ResourceMark rm;
 328   stringStream st;  // change to logStream with logging
 329   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 330   fieldDescriptor fd;
 331 
 332   for (int i = 0; i < objects->length(); i++) {
 333     ObjectValue* sv = (ObjectValue*) objects->at(i);
 334     Handle obj = sv->value();
 335 
 336     if (obj.is_null()) {
 337       st.print_cr("     nullptr");
 338       continue;
 339     }
 340 
 341     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 342     k = get_refined_array_klass(k, deoptee, map, sv, THREAD);
 343 
 344     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 345     k->print_value_on(&st);
 346     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 347 
 348     if (Verbose && k != nullptr) {
 349       k->oop_print_on(obj(), &st);
 350     }
 351   }
 352   tty->print_raw(st.freeze());
 353 }
 354 
 355 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 356                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 357                                   bool& deoptimized_objects) {
 358   bool realloc_failures = false;
 359   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 360 
 361   JavaThread* deoptee_thread = chunk->at(0)->thread();
 362   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 363          "a frame can only be deoptimized by the owner thread");
 364 
 365   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 366 
 367   // The flag return_oop() indicates call sites which return oop
 368   // in compiled code. Such sites include java method calls,
 369   // runtime calls (for example, used to allocate new objects/arrays
 370   // on slow code path) and any other calls generated in compiled code.
 371   // It is not guaranteed that we can get such information here only
 372   // by analyzing bytecode in deoptimized frames. This is why this flag
 373   // is set during method compilation (see Compile::Process_OopMap_Node()).
 374   // If the previous frame was popped or if we are dispatching an exception,
 375   // we don't have an oop result.
 376   ScopeDesc* scope = chunk->at(0)->scope();
 377   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 378   // In case of the return of multiple values, we must take care
 379   // of all oop return values.
 380   GrowableArray<Handle> return_oops;
 381   InlineKlass* vk = nullptr;
 382   if (save_oop_result && scope->return_scalarized()) {
 383     vk = InlineKlass::returned_inline_klass(map);
 384     if (vk != nullptr) {
 385       vk->save_oop_fields(map, return_oops);
 386       save_oop_result = false;
 387     }
 388   }
 389   if (save_oop_result) {
 390     // Reallocation may trigger GC. If deoptimization happened on return from
 391     // call which returns oop we need to save it since it is not in oopmap.
 392     oop result = deoptee.saved_oop_result(&map);
 393     assert(oopDesc::is_oop_or_null(result), "must be oop");
 394     return_oops.push(Handle(thread, result));
 395     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 396     if (TraceDeoptimization) {
 397       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 398       tty->cr();
 399     }
 400   }
 401   if (objects != nullptr || vk != nullptr) {
 402     if (exec_mode == Deoptimization::Unpack_none) {
 403       assert(thread->thread_state() == _thread_in_vm, "assumption");
 404       JavaThread* THREAD = thread; // For exception macros.
 405       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 406       if (vk != nullptr) {
 407         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 408       }
 409       if (objects != nullptr) {
 410         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 411         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 412         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 413         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 414       }
 415       deoptimized_objects = true;
 416     } else {
 417       JavaThread* current = thread; // For JRT_BLOCK
 418       JRT_BLOCK
 419       if (vk != nullptr) {
 420         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 421       }
 422       if (objects != nullptr) {
 423         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 424         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 425         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 426         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 427       }
 428       JRT_END
 429     }
 430     if (TraceDeoptimization && objects != nullptr) {
 431       print_objects(deoptee_thread, &deoptee, &map, objects, realloc_failures, thread);



 432     }
 433   }
 434   if (save_oop_result || vk != nullptr) {
 435     // Restore result.
 436     assert(return_oops.length() == 1, "no inline type");
 437     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 438   }
 439   return realloc_failures;
 440 }
 441 
 442 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 443                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 444   JavaThread* deoptee_thread = chunk->at(0)->thread();
 445   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 446   assert(thread == Thread::current(), "should be");
 447   HandleMark hm(thread);
 448 #ifndef PRODUCT
 449   bool first = true;
 450 #endif // !PRODUCT
 451   // Start locking from outermost/oldest frame
 452   for (int i = (chunk->length() - 1); i >= 0; i--) {
 453     compiledVFrame* cvf = chunk->at(i);
 454     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 455     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 456     if (monitors->is_nonempty()) {
 457       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 753   // its caller's stack by. If the caller is a compiled frame then
 754   // we pretend that the callee has no parameters so that the
 755   // extension counts for the full amount of locals and not just
 756   // locals-parms. This is because without a c2i adapter the parm
 757   // area as created by the compiled frame will not be usable by
 758   // the interpreter. (Depending on the calling convention there
 759   // may not even be enough space).
 760 
 761   // QQQ I'd rather see this pushed down into last_frame_adjust
 762   // and have it take the sender (aka caller).
 763 
 764   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 765     caller_adjustment = last_frame_adjust(0, callee_locals);
 766   } else if (callee_locals > callee_parameters) {
 767     // The caller frame may need extending to accommodate
 768     // non-parameter locals of the first unpacked interpreted frame.
 769     // Compute that adjustment.
 770     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 771   }
 772 
 773   // If the sender is deoptimized we must retrieve the address of the handler
 774   // since the frame will "magically" show the original pc before the deopt
 775   // and we'd undo the deopt.
 776 
 777   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 778   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 779     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 780   }
 781 
 782   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 783 
 784 #if INCLUDE_JVMCI
 785   if (exceptionObject() != nullptr) {
 786     current->set_exception_oop(exceptionObject());
 787     exec_mode = Unpack_exception;
 788     assert(array->element(0)->rethrow_exception(), "must be");
 789   }
 790 #endif
 791 
 792   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 793     assert(current->has_pending_exception(), "should have thrown OOME");

1270        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1271        default:;
1272      }
1273    }
1274    return nullptr;
1275 }
1276 #endif // INCLUDE_JVMCI
1277 
1278 #if COMPILER2_OR_JVMCI
1279 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1280   Handle pending_exception(THREAD, thread->pending_exception());
1281   const char* exception_file = thread->exception_file();
1282   int exception_line = thread->exception_line();
1283   thread->clear_pending_exception();
1284 
1285   bool failures = false;
1286 
1287   for (int i = 0; i < objects->length(); i++) {
1288     assert(objects->at(i)->is_object(), "invalid debug information");
1289     ObjectValue* sv = (ObjectValue*) objects->at(i);

1290     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1291     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1292 
1293     // Check if the object may be null and has an additional null_marker input that needs
1294     // to be checked before using the field values. Skip re-allocation if it is null.
1295     if (k->is_inline_klass() && sv->has_properties()) {
1296       jint null_marker = StackValue::create_stack_value(fr, reg_map, sv->properties())->get_jint();
1297       if (null_marker == 0) {
1298         continue;
1299       }
1300     }
1301 
1302     oop obj = nullptr;
1303     bool cache_init_error = false;
1304     if (k->is_instance_klass()) {
1305 #if INCLUDE_JVMCI
1306       nmethod* nm = fr->cb()->as_nmethod_or_null();
1307       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1308         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1309         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1310         if (obj != nullptr) {
1311           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1312           abv->set_cached(true);
1313         } else if (cache_init_error) {
1314           // Results in an OOME which is valid (as opposed to a class initialization error)
1315           // and is fine for the rare case a cache initialization failing.
1316           failures = true;
1317         }
1318       }
1319 #endif // INCLUDE_JVMCI
1320 
1321       InstanceKlass* ik = InstanceKlass::cast(k);
1322       if (obj == nullptr && !cache_init_error) {
1323         InternalOOMEMark iom(THREAD);
1324         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1325           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1326         } else {
1327           obj = ik->allocate_instance(THREAD);
1328         }
1329       }
1330     } else if (k->is_flatArray_klass()) {
1331       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1332       // Inline type array must be zeroed because not all memory is reassigned
1333       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1334     } else if (k->is_typeArray_klass()) {
1335       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1336       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1337       int len = sv->field_size() / type2size[ak->element_type()];
1338       InternalOOMEMark iom(THREAD);
1339       obj = ak->allocate_instance(len, THREAD);
1340     } else if (k->is_refArray_klass()) {
1341       RefArrayKlass* ak = RefArrayKlass::cast(k);
1342       InternalOOMEMark iom(THREAD);
1343       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1344     }
1345 
1346     if (obj == nullptr) {
1347       failures = true;
1348     }
1349 
1350     assert(sv->value().is_null(), "redundant reallocation");
1351     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1352     CLEAR_PENDING_EXCEPTION;
1353     sv->set_value(obj);
1354   }
1355 
1356   if (failures) {
1357     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1358   } else if (pending_exception.not_null()) {
1359     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1360   }
1361 
1362   return failures;
1363 }
1364 
1365 // We're deoptimizing at the return of a call, inline type fields are
1366 // in registers. When we go back to the interpreter, it will expect a
1367 // reference to an inline type instance. Allocate and initialize it from
1368 // the register values here.
1369 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1370   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1371   if (new_vt == nullptr) {
1372     CLEAR_PENDING_EXCEPTION;
1373     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1374   }
1375   return_oops.clear();
1376   return_oops.push(Handle(THREAD, new_vt));
1377   return false;
1378 }
1379 
1380 #if INCLUDE_JVMCI
1381 /**
1382  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1383  * we need to somehow be able to recover the actual kind to be able to write the correct
1384  * amount of bytes.
1385  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1386  * the entries at index n + 1 to n + i are 'markers'.
1387  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1388  * expected form of the array would be:
1389  *
1390  * {b0, b1, b2, b3, INT, marker, b6, b7}
1391  *
1392  * Thus, in order to get back the size of the entry, we simply need to count the number
1393  * of marked entries
1394  *
1395  * @param virtualArray the virtualized byte array
1396  * @param i index of the virtual entry we are recovering
1397  * @return The number of bytes the entry spans
1398  */
1399 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1525       default:
1526         ShouldNotReachHere();
1527     }
1528     index++;
1529   }
1530 }
1531 
1532 // restore fields of an eliminated object array
1533 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1534   for (int i = 0; i < sv->field_size(); i++) {
1535     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1536     assert(value->type() == T_OBJECT, "object element expected");
1537     obj->obj_at_put(i, value->get_obj()());
1538   }
1539 }
1540 
1541 class ReassignedField {
1542 public:
1543   int _offset;
1544   BasicType _type;
1545   InstanceKlass* _klass;
1546   bool _is_flat;
1547   bool _is_null_free;
1548 public:
1549   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }



1550 };
1551 
1552 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1553 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1554   InstanceKlass* super = klass->superklass();
1555   if (super != nullptr) {
1556     get_reassigned_fields(super, fields, is_jvmci);
1557   }
1558   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1559     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1560       ReassignedField field;
1561       field._offset = fs.offset();
1562       field._type = Signature::basic_type(fs.signature());
1563       if (fs.is_flat()) {
1564         field._is_flat = true;
1565         field._is_null_free = fs.is_null_free_inline_type();
1566         // Resolve klass of flat inline type field
1567         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1568       }
1569       fields->append(field);
1570     }
1571   }
1572   return fields;
1573 }
1574 
1575 // Restore fields of an eliminated instance object employing the same field order used by the
1576 // compiler when it scalarizes an object at safepoints.
1577 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, TRAPS) {
1578   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1579   for (int i = 0; i < fields->length(); i++) {
1580     BasicType type = fields->at(i)._type;
1581     int offset = base_offset + fields->at(i)._offset;
1582     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1583     if (fields->at(i)._is_flat) {
1584       // Recursively re-assign flat inline type fields
1585       InstanceKlass* vk = fields->at(i)._klass;
1586       assert(vk != nullptr, "must be resolved");
1587       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1588       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, CHECK_0);
1589       if (!fields->at(i)._is_null_free) {
1590         ScopeValue* scope_field = sv->field_at(svIndex);
1591         StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1592         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1593         obj->bool_field_put(nm_offset, value->get_jint() & 1);
1594         svIndex++;
1595       }
1596       continue; // Continue because we don't need to increment svIndex
1597     }
1598 
1599     ScopeValue* scope_field = sv->field_at(svIndex);
1600     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1601     switch (type) {
1602       case T_OBJECT:
1603       case T_ARRAY:
1604         assert(value->type() == T_OBJECT, "Agreement.");
1605         obj->obj_field_put(offset, value->get_obj()());
1606         break;
1607 
1608       case T_INT: case T_FLOAT: { // 4 bytes.
1609         assert(value->type() == T_INT, "Agreement.");
1610         bool big_value = false;
1611         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1612           if (scope_field->is_location()) {
1613             Location::Type type = ((LocationValue*) scope_field)->location().type();
1614             if (type == Location::dbl || type == Location::lng) {
1615               big_value = true;
1616             }
1617           }
1618           if (scope_field->is_constant_int()) {
1619             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1620             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1621               big_value = true;
1622             }
1623           }

1654       case T_CHAR:
1655         assert(value->type() == T_INT, "Agreement.");
1656         obj->char_field_put(offset, (jchar)value->get_jint());
1657         break;
1658 
1659       case T_BYTE:
1660         assert(value->type() == T_INT, "Agreement.");
1661         obj->byte_field_put(offset, (jbyte)value->get_jint());
1662         break;
1663 
1664       case T_BOOLEAN:
1665         assert(value->type() == T_INT, "Agreement.");
1666         obj->bool_field_put(offset, (jboolean)value->get_jint());
1667         break;
1668 
1669       default:
1670         ShouldNotReachHere();
1671     }
1672     svIndex++;
1673   }
1674 
1675   return svIndex;
1676 }
1677 
1678 // restore fields of an eliminated inline type array
1679 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1680   InlineKlass* vk = vak->element_klass();
1681   assert(vk->maybe_flat_in_array(), "should only be used for flat inline type arrays");
1682   // Adjust offset to omit oop header
1683   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - InlineKlass::cast(vk)->payload_offset();
1684   // Initialize all elements of the flat inline type array
1685   for (int i = 0; i < sv->field_size(); i++) {
1686     ScopeValue* val = sv->field_at(i);
1687     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1688     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, is_jvmci, offset, CHECK);
1689   }
1690 }
1691 
1692 // restore fields of all eliminated objects and arrays
1693 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1694   for (int i = 0; i < objects->length(); i++) {
1695     assert(objects->at(i)->is_object(), "invalid debug information");
1696     ObjectValue* sv = (ObjectValue*) objects->at(i);
1697     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1698     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1699 
1700     Handle obj = sv->value();
1701     assert(obj.not_null() || realloc_failures || sv->has_properties(), "reallocation was missed");
1702 #ifndef PRODUCT
1703     if (PrintDeoptimizationDetails) {
1704       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1705     }
1706 #endif // !PRODUCT
1707 
1708     if (obj.is_null()) {
1709       continue;
1710     }
1711 
1712 #if INCLUDE_JVMCI
1713     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1714     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1715       continue;
1716     }
1717 #endif // INCLUDE_JVMCI
1718     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1719       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1720       ScopeValue* payload = sv->field_at(0);
1721       if (payload->is_location() &&
1722           payload->as_LocationValue()->location().type() == Location::vector) {
1723 #ifndef PRODUCT
1724         if (PrintDeoptimizationDetails) {
1725           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1726           if (Verbose) {
1727             Handle obj = sv->value();
1728             k->oop_print_on(obj(), tty);
1729           }
1730         }
1731 #endif // !PRODUCT
1732         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1733       }
1734       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1735       // which could be restored after vector object allocation.
1736     }
1737     if (k->is_instance_klass()) {
1738       InstanceKlass* ik = InstanceKlass::cast(k);
1739       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, CHECK);
1740     } else if (k->is_flatArray_klass()) {
1741       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1742       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1743     } else if (k->is_typeArray_klass()) {
1744       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1745       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1746     } else if (k->is_refArray_klass()) {
1747       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1748     }
1749   }
1750   // These objects may escape when we return to Interpreter after deoptimization.
1751   // We need barrier so that stores that initialize these objects can't be reordered
1752   // with subsequent stores that make these objects accessible by other threads.
1753   OrderAccess::storestore();
1754 }
1755 
1756 
1757 // relock objects for which synchronization was eliminated
1758 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1759                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1760   bool relocked_objects = false;
1761   for (int i = 0; i < monitors->length(); i++) {
1762     MonitorInfo* mon_info = monitors->at(i);
1763     if (mon_info->eliminated()) {
1764       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1765       relocked_objects = true;
1766       if (!mon_info->owner_is_scalar_replaced()) {

1918     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1919     nm->log_identity(xtty);
1920     xtty->end_head();
1921     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1922       xtty->begin_elem("jvms bci='%d'", sd->bci());
1923       xtty->method(sd->method());
1924       xtty->end_elem();
1925       if (sd->is_top())  break;
1926     }
1927     xtty->tail("deoptimized");
1928   }
1929 
1930   Continuation::notify_deopt(thread, fr.sp());
1931 
1932   // Patch the compiled method so that when execution returns to it we will
1933   // deopt the execution state and return to the interpreter.
1934   fr.deoptimize(thread);
1935 }
1936 
1937 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1938   // Deoptimize only if the frame comes from compiled code.
1939   // Do not deoptimize the frame which is already patched
1940   // during the execution of the loops below.
1941   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1942     return;
1943   }
1944   ResourceMark rm;
1945   deoptimize_single_frame(thread, fr, reason);
1946 }
1947 
1948 #if INCLUDE_JVMCI
1949 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1950   // there is no exception handler for this pc => deoptimize
1951   nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1952 
1953   // Use Deoptimization::deoptimize for all of its side-effects:
1954   // gathering traps statistics, logging...
1955   // it also patches the return pc but we do not care about that
1956   // since we return a continuation to the deopt_blob below.
1957   JavaThread* thread = JavaThread::current();
1958   RegisterMap reg_map(thread,
< prev index next >