< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"



  53 #include "oops/method.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/typeArrayOop.inline.hpp"
  58 #include "oops/verifyOopClosure.hpp"
  59 #include "prims/jvmtiDeferredUpdates.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "prims/jvmtiThreadState.hpp"
  62 #include "prims/methodHandles.hpp"
  63 #include "prims/vectorSupport.hpp"
  64 #include "runtime/atomicAccess.hpp"
  65 #include "runtime/basicLock.inline.hpp"
  66 #include "runtime/continuation.hpp"
  67 #include "runtime/continuationEntry.inline.hpp"
  68 #include "runtime/deoptimization.hpp"
  69 #include "runtime/escapeBarrier.hpp"
  70 #include "runtime/fieldDescriptor.inline.hpp"
  71 #include "runtime/frame.inline.hpp"
  72 #include "runtime/handles.inline.hpp"

 280 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 281 // which is called from the method fetch_unroll_info_helper below.
 282 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 283   // fetch_unroll_info() is called at the beginning of the deoptimization
 284   // handler. Note this fact before we start generating temporary frames
 285   // that can confuse an asynchronous stack walker. This counter is
 286   // decremented at the end of unpack_frames().
 287   current->inc_in_deopt_handler();
 288 
 289   if (exec_mode == Unpack_exception) {
 290     // When we get here, a callee has thrown an exception into a deoptimized
 291     // frame. That throw might have deferred stack watermark checking until
 292     // after unwinding. So we deal with such deferred requests here.
 293     StackWatermarkSet::after_unwind(current);
 294   }
 295 
 296   return fetch_unroll_info_helper(current, exec_mode);
 297 JRT_END
 298 
 299 #if COMPILER2_OR_JVMCI


















 300 // print information about reallocated objects
 301 static void print_objects(JavaThread* deoptee_thread,
 302                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 303   ResourceMark rm;
 304   stringStream st;  // change to logStream with logging
 305   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 306   fieldDescriptor fd;
 307 
 308   for (int i = 0; i < objects->length(); i++) {
 309     ObjectValue* sv = (ObjectValue*) objects->at(i);
 310     Handle obj = sv->value();
 311 
 312     if (obj.is_null()) {
 313       st.print_cr("     nullptr");
 314       continue;
 315     }
 316 
 317     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

 318 
 319     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 320     k->print_value_on(&st);
 321     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 322 
 323     if (Verbose && k != nullptr) {
 324       k->oop_print_on(obj(), &st);
 325     }
 326   }
 327   tty->print_raw(st.freeze());
 328 }
 329 
 330 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 331                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 332                                   bool& deoptimized_objects) {
 333   bool realloc_failures = false;
 334   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 335 
 336   JavaThread* deoptee_thread = chunk->at(0)->thread();
 337   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 338          "a frame can only be deoptimized by the owner thread");
 339 
 340   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 341 
 342   // The flag return_oop() indicates call sites which return oop
 343   // in compiled code. Such sites include java method calls,
 344   // runtime calls (for example, used to allocate new objects/arrays
 345   // on slow code path) and any other calls generated in compiled code.
 346   // It is not guaranteed that we can get such information here only
 347   // by analyzing bytecode in deoptimized frames. This is why this flag
 348   // is set during method compilation (see Compile::Process_OopMap_Node()).
 349   // If the previous frame was popped or if we are dispatching an exception,
 350   // we don't have an oop result.
 351   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 352   Handle return_value;











 353   if (save_oop_result) {
 354     // Reallocation may trigger GC. If deoptimization happened on return from
 355     // call which returns oop we need to save it since it is not in oopmap.
 356     oop result = deoptee.saved_oop_result(&map);
 357     assert(oopDesc::is_oop_or_null(result), "must be oop");
 358     return_value = Handle(thread, result);
 359     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 360     if (TraceDeoptimization) {
 361       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 362       tty->cr();
 363     }
 364   }
 365   if (objects != nullptr) {
 366     if (exec_mode == Deoptimization::Unpack_none) {
 367       assert(thread->thread_state() == _thread_in_vm, "assumption");
 368       JavaThread* THREAD = thread; // For exception macros.
 369       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 370       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));








 371       deoptimized_objects = true;
 372     } else {
 373       JavaThread* current = thread; // For JRT_BLOCK
 374       JRT_BLOCK
 375       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);








 376       JRT_END
 377     }
 378     guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 379     bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 380     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci);
 381     if (TraceDeoptimization) {
 382       print_objects(deoptee_thread, objects, realloc_failures);
 383     }
 384   }
 385   if (save_oop_result) {
 386     // Restore result.
 387     deoptee.set_saved_oop_result(&map, return_value());

 388   }
 389   return realloc_failures;
 390 }
 391 
 392 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 393                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 394   JavaThread* deoptee_thread = chunk->at(0)->thread();
 395   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 396   assert(thread == Thread::current(), "should be");
 397   HandleMark hm(thread);
 398 #ifndef PRODUCT
 399   bool first = true;
 400 #endif // !PRODUCT
 401   // Start locking from outermost/oldest frame
 402   for (int i = (chunk->length() - 1); i >= 0; i--) {
 403     compiledVFrame* cvf = chunk->at(i);
 404     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 405     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 406     if (monitors->is_nonempty()) {
 407       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 701   // its caller's stack by. If the caller is a compiled frame then
 702   // we pretend that the callee has no parameters so that the
 703   // extension counts for the full amount of locals and not just
 704   // locals-parms. This is because without a c2i adapter the parm
 705   // area as created by the compiled frame will not be usable by
 706   // the interpreter. (Depending on the calling convention there
 707   // may not even be enough space).
 708 
 709   // QQQ I'd rather see this pushed down into last_frame_adjust
 710   // and have it take the sender (aka caller).
 711 
 712   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 713     caller_adjustment = last_frame_adjust(0, callee_locals);
 714   } else if (callee_locals > callee_parameters) {
 715     // The caller frame may need extending to accommodate
 716     // non-parameter locals of the first unpacked interpreted frame.
 717     // Compute that adjustment.
 718     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 719   }
 720 
 721   // If the sender is deoptimized the we must retrieve the address of the handler
 722   // since the frame will "magically" show the original pc before the deopt
 723   // and we'd undo the deopt.
 724 
 725   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 726   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 727     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 728   }
 729 
 730   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 731 
 732 #if INCLUDE_JVMCI
 733   if (exceptionObject() != nullptr) {
 734     current->set_exception_oop(exceptionObject());
 735     exec_mode = Unpack_exception;
 736     assert(array->element(0)->rethrow_exception(), "must be");
 737   }
 738 #endif
 739 
 740   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 741     assert(current->has_pending_exception(), "should have thrown OOME");

1218        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1219        default:;
1220      }
1221    }
1222    return nullptr;
1223 }
1224 #endif // INCLUDE_JVMCI
1225 
1226 #if COMPILER2_OR_JVMCI
1227 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1228   Handle pending_exception(THREAD, thread->pending_exception());
1229   const char* exception_file = thread->exception_file();
1230   int exception_line = thread->exception_line();
1231   thread->clear_pending_exception();
1232 
1233   bool failures = false;
1234 
1235   for (int i = 0; i < objects->length(); i++) {
1236     assert(objects->at(i)->is_object(), "invalid debug information");
1237     ObjectValue* sv = (ObjectValue*) objects->at(i);
1238 
1239     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1240     oop obj = nullptr;









1241 

1242     bool cache_init_error = false;
1243     if (k->is_instance_klass()) {
1244 #if INCLUDE_JVMCI
1245       nmethod* nm = fr->cb()->as_nmethod_or_null();
1246       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1247         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1248         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1249         if (obj != nullptr) {
1250           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1251           abv->set_cached(true);
1252         } else if (cache_init_error) {
1253           // Results in an OOME which is valid (as opposed to a class initialization error)
1254           // and is fine for the rare case a cache initialization failing.
1255           failures = true;
1256         }
1257       }
1258 #endif // INCLUDE_JVMCI
1259 
1260       InstanceKlass* ik = InstanceKlass::cast(k);
1261       if (obj == nullptr && !cache_init_error) {
1262         InternalOOMEMark iom(THREAD);
1263         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1264           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1265         } else {
1266           obj = ik->allocate_instance(THREAD);
1267         }
1268       }




1269     } else if (k->is_typeArray_klass()) {
1270       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1271       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1272       int len = sv->field_size() / type2size[ak->element_type()];
1273       InternalOOMEMark iom(THREAD);
1274       obj = ak->allocate_instance(len, THREAD);
1275     } else if (k->is_objArray_klass()) {
1276       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1277       InternalOOMEMark iom(THREAD);
1278       obj = ak->allocate_instance(sv->field_size(), THREAD);
1279     }
1280 
1281     if (obj == nullptr) {
1282       failures = true;
1283     }
1284 
1285     assert(sv->value().is_null(), "redundant reallocation");
1286     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1287     CLEAR_PENDING_EXCEPTION;
1288     sv->set_value(obj);
1289   }
1290 
1291   if (failures) {
1292     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1293   } else if (pending_exception.not_null()) {
1294     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1295   }
1296 
1297   return failures;
1298 }
1299 















1300 #if INCLUDE_JVMCI
1301 /**
1302  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1303  * we need to somehow be able to recover the actual kind to be able to write the correct
1304  * amount of bytes.
1305  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1306  * the entries at index n + 1 to n + i are 'markers'.
1307  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1308  * expected form of the array would be:
1309  *
1310  * {b0, b1, b2, b3, INT, marker, b6, b7}
1311  *
1312  * Thus, in order to get back the size of the entry, we simply need to count the number
1313  * of marked entries
1314  *
1315  * @param virtualArray the virtualized byte array
1316  * @param i index of the virtual entry we are recovering
1317  * @return The number of bytes the entry spans
1318  */
1319 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1451       default:
1452         ShouldNotReachHere();
1453     }
1454     index++;
1455   }
1456 }
1457 
1458 // restore fields of an eliminated object array
1459 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1460   for (int i = 0; i < sv->field_size(); i++) {
1461     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1462     assert(value->type() == T_OBJECT, "object element expected");
1463     obj->obj_at_put(i, value->get_obj()());
1464   }
1465 }
1466 
1467 class ReassignedField {
1468 public:
1469   int _offset;
1470   BasicType _type;



1471 public:
1472   ReassignedField() {
1473     _offset = 0;
1474     _type = T_ILLEGAL;
1475   }
1476 };
1477 
1478 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1479 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1480   InstanceKlass* super = klass->super();
1481   if (super != nullptr) {
1482     get_reassigned_fields(super, fields, is_jvmci);
1483   }
1484   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1485     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1486       ReassignedField field;
1487       field._offset = fs.offset();
1488       field._type = Signature::basic_type(fs.signature());






1489       fields->append(field);
1490     }
1491   }
1492   return fields;
1493 }
1494 
1495 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1496 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci) {

1497   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1498   for (int i = 0; i < fields->length(); i++) {



















1499     ScopeValue* scope_field = sv->field_at(svIndex);
1500     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1501     int offset = fields->at(i)._offset;
1502     BasicType type = fields->at(i)._type;
1503     switch (type) {
1504       case T_OBJECT: case T_ARRAY:

1505         assert(value->type() == T_OBJECT, "Agreement.");
1506         obj->obj_field_put(offset, value->get_obj()());
1507         break;
1508 
1509       case T_INT: case T_FLOAT: { // 4 bytes.
1510         assert(value->type() == T_INT, "Agreement.");
1511         bool big_value = false;
1512         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1513           if (scope_field->is_location()) {
1514             Location::Type type = ((LocationValue*) scope_field)->location().type();
1515             if (type == Location::dbl || type == Location::lng) {
1516               big_value = true;
1517             }
1518           }
1519           if (scope_field->is_constant_int()) {
1520             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1521             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1522               big_value = true;
1523             }
1524           }

1555       case T_CHAR:
1556         assert(value->type() == T_INT, "Agreement.");
1557         obj->char_field_put(offset, (jchar)value->get_jint());
1558         break;
1559 
1560       case T_BYTE:
1561         assert(value->type() == T_INT, "Agreement.");
1562         obj->byte_field_put(offset, (jbyte)value->get_jint());
1563         break;
1564 
1565       case T_BOOLEAN:
1566         assert(value->type() == T_INT, "Agreement.");
1567         obj->bool_field_put(offset, (jboolean)value->get_jint());
1568         break;
1569 
1570       default:
1571         ShouldNotReachHere();
1572     }
1573     svIndex++;
1574   }

1575   return svIndex;
1576 }
1577 























1578 // restore fields of all eliminated objects and arrays
1579 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci) {
1580   for (int i = 0; i < objects->length(); i++) {
1581     assert(objects->at(i)->is_object(), "invalid debug information");
1582     ObjectValue* sv = (ObjectValue*) objects->at(i);
1583     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());


1584     Handle obj = sv->value();
1585     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1586 #ifndef PRODUCT
1587     if (PrintDeoptimizationDetails) {
1588       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1589     }
1590 #endif // !PRODUCT
1591 
1592     if (obj.is_null()) {
1593       continue;
1594     }
1595 
1596 #if INCLUDE_JVMCI
1597     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1598     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1599       continue;
1600     }
1601 #endif // INCLUDE_JVMCI
1602     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1603       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1604       ScopeValue* payload = sv->field_at(0);
1605       if (payload->is_location() &&
1606           payload->as_LocationValue()->location().type() == Location::vector) {
1607 #ifndef PRODUCT
1608         if (PrintDeoptimizationDetails) {
1609           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1610           if (Verbose) {
1611             Handle obj = sv->value();
1612             k->oop_print_on(obj(), tty);
1613           }
1614         }
1615 #endif // !PRODUCT
1616         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1617       }
1618       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1619       // which could be restored after vector object allocation.
1620     }
1621     if (k->is_instance_klass()) {
1622       InstanceKlass* ik = InstanceKlass::cast(k);
1623       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci);



1624     } else if (k->is_typeArray_klass()) {
1625       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1626       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1627     } else if (k->is_objArray_klass()) {
1628       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1629     }
1630   }
1631   // These objects may escape when we return to Interpreter after deoptimization.
1632   // We need barrier so that stores that initialize these objects can't be reordered
1633   // with subsequent stores that make these objects accessible by other threads.
1634   OrderAccess::storestore();
1635 }
1636 
1637 
1638 // relock objects for which synchronization was eliminated
1639 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1640                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1641   bool relocked_objects = false;
1642   for (int i = 0; i < monitors->length(); i++) {
1643     MonitorInfo* mon_info = monitors->at(i);
1644     if (mon_info->eliminated()) {
1645       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1646       relocked_objects = true;
1647       if (!mon_info->owner_is_scalar_replaced()) {

1785     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1786     nm->log_identity(xtty);
1787     xtty->end_head();
1788     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1789       xtty->begin_elem("jvms bci='%d'", sd->bci());
1790       xtty->method(sd->method());
1791       xtty->end_elem();
1792       if (sd->is_top())  break;
1793     }
1794     xtty->tail("deoptimized");
1795   }
1796 
1797   Continuation::notify_deopt(thread, fr.sp());
1798 
1799   // Patch the compiled method so that when execution returns to it we will
1800   // deopt the execution state and return to the interpreter.
1801   fr.deoptimize(thread);
1802 }
1803 
1804 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1805   // Deoptimize only if the frame comes from compile code.
1806   // Do not deoptimize the frame which is already patched
1807   // during the execution of the loops below.
1808   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1809     return;
1810   }
1811   ResourceMark rm;
1812   deoptimize_single_frame(thread, fr, reason);
1813 }
1814 
1815 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant) {
1816   // there is no exception handler for this pc => deoptimize
1817   if (make_not_entrant) {
1818     nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1819   }
1820 
1821   // Use Deoptimization::deoptimize for all of its side-effects:
1822   // gathering traps statistics, logging...
1823   // it also patches the return pc but we do not care about that
1824   // since we return a continuation to the deopt_blob below.
1825   JavaThread* thread = JavaThread::current();

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/inlineKlass.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/typeArrayOop.inline.hpp"
  61 #include "oops/verifyOopClosure.hpp"
  62 #include "prims/jvmtiDeferredUpdates.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "prims/jvmtiThreadState.hpp"
  65 #include "prims/methodHandles.hpp"
  66 #include "prims/vectorSupport.hpp"
  67 #include "runtime/atomicAccess.hpp"
  68 #include "runtime/basicLock.inline.hpp"
  69 #include "runtime/continuation.hpp"
  70 #include "runtime/continuationEntry.inline.hpp"
  71 #include "runtime/deoptimization.hpp"
  72 #include "runtime/escapeBarrier.hpp"
  73 #include "runtime/fieldDescriptor.inline.hpp"
  74 #include "runtime/frame.inline.hpp"
  75 #include "runtime/handles.inline.hpp"

 283 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 284 // which is called from the method fetch_unroll_info_helper below.
 285 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 286   // fetch_unroll_info() is called at the beginning of the deoptimization
 287   // handler. Note this fact before we start generating temporary frames
 288   // that can confuse an asynchronous stack walker. This counter is
 289   // decremented at the end of unpack_frames().
 290   current->inc_in_deopt_handler();
 291 
 292   if (exec_mode == Unpack_exception) {
 293     // When we get here, a callee has thrown an exception into a deoptimized
 294     // frame. That throw might have deferred stack watermark checking until
 295     // after unwinding. So we deal with such deferred requests here.
 296     StackWatermarkSet::after_unwind(current);
 297   }
 298 
 299   return fetch_unroll_info_helper(current, exec_mode);
 300 JRT_END
 301 
 302 #if COMPILER2_OR_JVMCI
 303 
 304 static Klass* get_refined_array_klass(Klass* k, frame* fr, RegisterMap* map, ObjectValue* sv, TRAPS) {
 305   // If it's an array, get the properties
 306   if (k->is_array_klass() && !k->is_typeArray_klass()) {
 307     assert(!k->is_refArray_klass() && !k->is_flatArray_klass(), "Unexpected refined klass");
 308     nmethod* nm = fr->cb()->as_nmethod_or_null();
 309     if (nm->is_compiled_by_c2()) {
 310       assert(sv->has_properties(), "Property information is missing");
 311       ArrayKlass::ArrayProperties props = static_cast<ArrayKlass::ArrayProperties>(StackValue::create_stack_value(fr, map, sv->properties())->get_jint());
 312       k = ObjArrayKlass::cast(k)->klass_with_properties(props, THREAD);
 313     } else {
 314       // TODO Graal needs to be fixed. Just go with the default properties for now
 315       k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayKlass::ArrayProperties::DEFAULT, THREAD);
 316     }
 317   }
 318   return k;
 319 }
 320 
 321 // print information about reallocated objects
 322 static void print_objects(JavaThread* deoptee_thread, frame* deoptee, RegisterMap* map,
 323                           GrowableArray<ScopeValue*>* objects, bool realloc_failures, TRAPS) {
 324   ResourceMark rm;
 325   stringStream st;  // change to logStream with logging
 326   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 327   fieldDescriptor fd;
 328 
 329   for (int i = 0; i < objects->length(); i++) {
 330     ObjectValue* sv = (ObjectValue*) objects->at(i);
 331     Handle obj = sv->value();
 332 
 333     if (obj.is_null()) {
 334       st.print_cr("     nullptr");
 335       continue;
 336     }
 337 
 338     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 339     k = get_refined_array_klass(k, deoptee, map, sv, THREAD);
 340 
 341     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 342     k->print_value_on(&st);
 343     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 344 
 345     if (Verbose && k != nullptr) {
 346       k->oop_print_on(obj(), &st);
 347     }
 348   }
 349   tty->print_raw(st.freeze());
 350 }
 351 
 352 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 353                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 354                                   bool& deoptimized_objects) {
 355   bool realloc_failures = false;
 356   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 357 
 358   JavaThread* deoptee_thread = chunk->at(0)->thread();
 359   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 360          "a frame can only be deoptimized by the owner thread");
 361 
 362   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 363 
 364   // The flag return_oop() indicates call sites which return oop
 365   // in compiled code. Such sites include java method calls,
 366   // runtime calls (for example, used to allocate new objects/arrays
 367   // on slow code path) and any other calls generated in compiled code.
 368   // It is not guaranteed that we can get such information here only
 369   // by analyzing bytecode in deoptimized frames. This is why this flag
 370   // is set during method compilation (see Compile::Process_OopMap_Node()).
 371   // If the previous frame was popped or if we are dispatching an exception,
 372   // we don't have an oop result.
 373   ScopeDesc* scope = chunk->at(0)->scope();
 374   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 375   // In case of the return of multiple values, we must take care
 376   // of all oop return values.
 377   GrowableArray<Handle> return_oops;
 378   InlineKlass* vk = nullptr;
 379   if (save_oop_result && scope->return_scalarized()) {
 380     vk = InlineKlass::returned_inline_klass(map);
 381     if (vk != nullptr) {
 382       vk->save_oop_fields(map, return_oops);
 383       save_oop_result = false;
 384     }
 385   }
 386   if (save_oop_result) {
 387     // Reallocation may trigger GC. If deoptimization happened on return from
 388     // call which returns oop we need to save it since it is not in oopmap.
 389     oop result = deoptee.saved_oop_result(&map);
 390     assert(oopDesc::is_oop_or_null(result), "must be oop");
 391     return_oops.push(Handle(thread, result));
 392     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 393     if (TraceDeoptimization) {
 394       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 395       tty->cr();
 396     }
 397   }
 398   if (objects != nullptr || vk != nullptr) {
 399     if (exec_mode == Deoptimization::Unpack_none) {
 400       assert(thread->thread_state() == _thread_in_vm, "assumption");
 401       JavaThread* THREAD = thread; // For exception macros.
 402       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 403       if (vk != nullptr) {
 404         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 405       }
 406       if (objects != nullptr) {
 407         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 408         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 409         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 410         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 411       }
 412       deoptimized_objects = true;
 413     } else {
 414       JavaThread* current = thread; // For JRT_BLOCK
 415       JRT_BLOCK
 416       if (vk != nullptr) {
 417         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 418       }
 419       if (objects != nullptr) {
 420         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 421         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 422         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 423         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 424       }
 425       JRT_END
 426     }
 427     if (TraceDeoptimization && objects != nullptr) {
 428       print_objects(deoptee_thread, &deoptee, &map, objects, realloc_failures, thread);



 429     }
 430   }
 431   if (save_oop_result || vk != nullptr) {
 432     // Restore result.
 433     assert(return_oops.length() == 1, "no inline type");
 434     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 435   }
 436   return realloc_failures;
 437 }
 438 
 439 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 440                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 441   JavaThread* deoptee_thread = chunk->at(0)->thread();
 442   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 443   assert(thread == Thread::current(), "should be");
 444   HandleMark hm(thread);
 445 #ifndef PRODUCT
 446   bool first = true;
 447 #endif // !PRODUCT
 448   // Start locking from outermost/oldest frame
 449   for (int i = (chunk->length() - 1); i >= 0; i--) {
 450     compiledVFrame* cvf = chunk->at(i);
 451     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 452     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 453     if (monitors->is_nonempty()) {
 454       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 748   // its caller's stack by. If the caller is a compiled frame then
 749   // we pretend that the callee has no parameters so that the
 750   // extension counts for the full amount of locals and not just
 751   // locals-parms. This is because without a c2i adapter the parm
 752   // area as created by the compiled frame will not be usable by
 753   // the interpreter. (Depending on the calling convention there
 754   // may not even be enough space).
 755 
 756   // QQQ I'd rather see this pushed down into last_frame_adjust
 757   // and have it take the sender (aka caller).
 758 
 759   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 760     caller_adjustment = last_frame_adjust(0, callee_locals);
 761   } else if (callee_locals > callee_parameters) {
 762     // The caller frame may need extending to accommodate
 763     // non-parameter locals of the first unpacked interpreted frame.
 764     // Compute that adjustment.
 765     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 766   }
 767 
 768   // If the sender is deoptimized we must retrieve the address of the handler
 769   // since the frame will "magically" show the original pc before the deopt
 770   // and we'd undo the deopt.
 771 
 772   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 773   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 774     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 775   }
 776 
 777   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 778 
 779 #if INCLUDE_JVMCI
 780   if (exceptionObject() != nullptr) {
 781     current->set_exception_oop(exceptionObject());
 782     exec_mode = Unpack_exception;
 783     assert(array->element(0)->rethrow_exception(), "must be");
 784   }
 785 #endif
 786 
 787   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 788     assert(current->has_pending_exception(), "should have thrown OOME");

1265        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1266        default:;
1267      }
1268    }
1269    return nullptr;
1270 }
1271 #endif // INCLUDE_JVMCI
1272 
1273 #if COMPILER2_OR_JVMCI
1274 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1275   Handle pending_exception(THREAD, thread->pending_exception());
1276   const char* exception_file = thread->exception_file();
1277   int exception_line = thread->exception_line();
1278   thread->clear_pending_exception();
1279 
1280   bool failures = false;
1281 
1282   for (int i = 0; i < objects->length(); i++) {
1283     assert(objects->at(i)->is_object(), "invalid debug information");
1284     ObjectValue* sv = (ObjectValue*) objects->at(i);

1285     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1286     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1287 
1288     // Check if the object may be null and has an additional null_marker input that needs
1289     // to be checked before using the field values. Skip re-allocation if it is null.
1290     if (k->is_inline_klass() && sv->has_properties()) {
1291       jint null_marker = StackValue::create_stack_value(fr, reg_map, sv->properties())->get_jint();
1292       if (null_marker == 0) {
1293         continue;
1294       }
1295     }
1296 
1297     oop obj = nullptr;
1298     bool cache_init_error = false;
1299     if (k->is_instance_klass()) {
1300 #if INCLUDE_JVMCI
1301       nmethod* nm = fr->cb()->as_nmethod_or_null();
1302       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1303         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1304         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1305         if (obj != nullptr) {
1306           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1307           abv->set_cached(true);
1308         } else if (cache_init_error) {
1309           // Results in an OOME which is valid (as opposed to a class initialization error)
1310           // and is fine for the rare case a cache initialization failing.
1311           failures = true;
1312         }
1313       }
1314 #endif // INCLUDE_JVMCI
1315 
1316       InstanceKlass* ik = InstanceKlass::cast(k);
1317       if (obj == nullptr && !cache_init_error) {
1318         InternalOOMEMark iom(THREAD);
1319         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1320           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1321         } else {
1322           obj = ik->allocate_instance(THREAD);
1323         }
1324       }
1325     } else if (k->is_flatArray_klass()) {
1326       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1327       // Inline type array must be zeroed because not all memory is reassigned
1328       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1329     } else if (k->is_typeArray_klass()) {
1330       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1331       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1332       int len = sv->field_size() / type2size[ak->element_type()];
1333       InternalOOMEMark iom(THREAD);
1334       obj = ak->allocate_instance(len, THREAD);
1335     } else if (k->is_refArray_klass()) {
1336       RefArrayKlass* ak = RefArrayKlass::cast(k);
1337       InternalOOMEMark iom(THREAD);
1338       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1339     }
1340 
1341     if (obj == nullptr) {
1342       failures = true;
1343     }
1344 
1345     assert(sv->value().is_null(), "redundant reallocation");
1346     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1347     CLEAR_PENDING_EXCEPTION;
1348     sv->set_value(obj);
1349   }
1350 
1351   if (failures) {
1352     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1353   } else if (pending_exception.not_null()) {
1354     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1355   }
1356 
1357   return failures;
1358 }
1359 
1360 // We're deoptimizing at the return of a call, inline type fields are
1361 // in registers. When we go back to the interpreter, it will expect a
1362 // reference to an inline type instance. Allocate and initialize it from
1363 // the register values here.
1364 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1365   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1366   if (new_vt == nullptr) {
1367     CLEAR_PENDING_EXCEPTION;
1368     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1369   }
1370   return_oops.clear();
1371   return_oops.push(Handle(THREAD, new_vt));
1372   return false;
1373 }
1374 
1375 #if INCLUDE_JVMCI
1376 /**
1377  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1378  * we need to somehow be able to recover the actual kind to be able to write the correct
1379  * amount of bytes.
1380  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1381  * the entries at index n + 1 to n + i are 'markers'.
1382  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1383  * expected form of the array would be:
1384  *
1385  * {b0, b1, b2, b3, INT, marker, b6, b7}
1386  *
1387  * Thus, in order to get back the size of the entry, we simply need to count the number
1388  * of marked entries
1389  *
1390  * @param virtualArray the virtualized byte array
1391  * @param i index of the virtual entry we are recovering
1392  * @return The number of bytes the entry spans
1393  */
1394 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1526       default:
1527         ShouldNotReachHere();
1528     }
1529     index++;
1530   }
1531 }
1532 
1533 // restore fields of an eliminated object array
1534 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1535   for (int i = 0; i < sv->field_size(); i++) {
1536     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1537     assert(value->type() == T_OBJECT, "object element expected");
1538     obj->obj_at_put(i, value->get_obj()());
1539   }
1540 }
1541 
1542 class ReassignedField {
1543 public:
1544   int _offset;
1545   BasicType _type;
1546   InstanceKlass* _klass;
1547   bool _is_flat;
1548   bool _is_null_free;
1549 public:
1550   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }



1551 };
1552 
1553 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1554 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1555   InstanceKlass* super = klass->super();
1556   if (super != nullptr) {
1557     get_reassigned_fields(super, fields, is_jvmci);
1558   }
1559   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1560     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1561       ReassignedField field;
1562       field._offset = fs.offset();
1563       field._type = Signature::basic_type(fs.signature());
1564       if (fs.is_flat()) {
1565         field._is_flat = true;
1566         field._is_null_free = fs.is_null_free_inline_type();
1567         // Resolve klass of flat inline type field
1568         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1569       }
1570       fields->append(field);
1571     }
1572   }
1573   return fields;
1574 }
1575 
1576 // Restore fields of an eliminated instance object employing the same field order used by the
1577 // compiler when it scalarizes an object at safepoints.
1578 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, TRAPS) {
1579   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1580   for (int i = 0; i < fields->length(); i++) {
1581     BasicType type = fields->at(i)._type;
1582     int offset = base_offset + fields->at(i)._offset;
1583     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1584     if (fields->at(i)._is_flat) {
1585       // Recursively re-assign flat inline type fields
1586       InstanceKlass* vk = fields->at(i)._klass;
1587       assert(vk != nullptr, "must be resolved");
1588       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1589       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, CHECK_0);
1590       if (!fields->at(i)._is_null_free) {
1591         ScopeValue* scope_field = sv->field_at(svIndex);
1592         StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1593         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1594         obj->bool_field_put(nm_offset, value->get_jint() & 1);
1595         svIndex++;
1596       }
1597       continue; // Continue because we don't need to increment svIndex
1598     }
1599 
1600     ScopeValue* scope_field = sv->field_at(svIndex);
1601     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1602     switch (type) {
1603       case T_OBJECT:
1604       case T_ARRAY:
1605         assert(value->type() == T_OBJECT, "Agreement.");
1606         obj->obj_field_put(offset, value->get_obj()());
1607         break;
1608 
1609       case T_INT: case T_FLOAT: { // 4 bytes.
1610         assert(value->type() == T_INT, "Agreement.");
1611         bool big_value = false;
1612         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1613           if (scope_field->is_location()) {
1614             Location::Type type = ((LocationValue*) scope_field)->location().type();
1615             if (type == Location::dbl || type == Location::lng) {
1616               big_value = true;
1617             }
1618           }
1619           if (scope_field->is_constant_int()) {
1620             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1621             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1622               big_value = true;
1623             }
1624           }

1655       case T_CHAR:
1656         assert(value->type() == T_INT, "Agreement.");
1657         obj->char_field_put(offset, (jchar)value->get_jint());
1658         break;
1659 
1660       case T_BYTE:
1661         assert(value->type() == T_INT, "Agreement.");
1662         obj->byte_field_put(offset, (jbyte)value->get_jint());
1663         break;
1664 
1665       case T_BOOLEAN:
1666         assert(value->type() == T_INT, "Agreement.");
1667         obj->bool_field_put(offset, (jboolean)value->get_jint());
1668         break;
1669 
1670       default:
1671         ShouldNotReachHere();
1672     }
1673     svIndex++;
1674   }
1675 
1676   return svIndex;
1677 }
1678 
1679 // restore fields of an eliminated inline type array
1680 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1681   InlineKlass* vk = vak->element_klass();
1682   assert(vk->maybe_flat_in_array(), "should only be used for flat inline type arrays");
1683   // Adjust offset to omit oop header
1684   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - vk->payload_offset();
1685   // Initialize all elements of the flat inline type array
1686   for (int i = 0; i < sv->field_size(); i++) {
1687     ObjectValue* val = sv->field_at(i)->as_ObjectValue();
1688     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1689     reassign_fields_by_klass(vk, fr, reg_map, val, 0, (oop)obj, is_jvmci, offset, CHECK);
1690     if (!obj->is_null_free_array()) {
1691       jboolean null_marker_value;
1692       if (val->has_properties()) {
1693         null_marker_value = StackValue::create_stack_value(fr, reg_map, val->properties())->get_jint() & 1;
1694       } else {
1695         null_marker_value = 1;
1696       }
1697       obj->bool_field_put(offset + vk->null_marker_offset(), null_marker_value);
1698     }
1699   }
1700 }
1701 
1702 // restore fields of all eliminated objects and arrays
1703 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1704   for (int i = 0; i < objects->length(); i++) {
1705     assert(objects->at(i)->is_object(), "invalid debug information");
1706     ObjectValue* sv = (ObjectValue*) objects->at(i);
1707     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1708     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1709 
1710     Handle obj = sv->value();
1711     assert(obj.not_null() || realloc_failures || sv->has_properties(), "reallocation was missed");
1712 #ifndef PRODUCT
1713     if (PrintDeoptimizationDetails) {
1714       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1715     }
1716 #endif // !PRODUCT
1717 
1718     if (obj.is_null()) {
1719       continue;
1720     }
1721 
1722 #if INCLUDE_JVMCI
1723     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1724     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1725       continue;
1726     }
1727 #endif // INCLUDE_JVMCI
1728     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1729       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1730       ScopeValue* payload = sv->field_at(0);
1731       if (payload->is_location() &&
1732           payload->as_LocationValue()->location().type() == Location::vector) {
1733 #ifndef PRODUCT
1734         if (PrintDeoptimizationDetails) {
1735           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1736           if (Verbose) {
1737             Handle obj = sv->value();
1738             k->oop_print_on(obj(), tty);
1739           }
1740         }
1741 #endif // !PRODUCT
1742         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1743       }
1744       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1745       // which could be restored after vector object allocation.
1746     }
1747     if (k->is_instance_klass()) {
1748       InstanceKlass* ik = InstanceKlass::cast(k);
1749       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, CHECK);
1750     } else if (k->is_flatArray_klass()) {
1751       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1752       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1753     } else if (k->is_typeArray_klass()) {
1754       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1755       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1756     } else if (k->is_refArray_klass()) {
1757       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1758     }
1759   }
1760   // These objects may escape when we return to Interpreter after deoptimization.
1761   // We need barrier so that stores that initialize these objects can't be reordered
1762   // with subsequent stores that make these objects accessible by other threads.
1763   OrderAccess::storestore();
1764 }
1765 
1766 
1767 // relock objects for which synchronization was eliminated
1768 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1769                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1770   bool relocked_objects = false;
1771   for (int i = 0; i < monitors->length(); i++) {
1772     MonitorInfo* mon_info = monitors->at(i);
1773     if (mon_info->eliminated()) {
1774       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1775       relocked_objects = true;
1776       if (!mon_info->owner_is_scalar_replaced()) {

1914     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1915     nm->log_identity(xtty);
1916     xtty->end_head();
1917     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1918       xtty->begin_elem("jvms bci='%d'", sd->bci());
1919       xtty->method(sd->method());
1920       xtty->end_elem();
1921       if (sd->is_top())  break;
1922     }
1923     xtty->tail("deoptimized");
1924   }
1925 
1926   Continuation::notify_deopt(thread, fr.sp());
1927 
1928   // Patch the compiled method so that when execution returns to it we will
1929   // deopt the execution state and return to the interpreter.
1930   fr.deoptimize(thread);
1931 }
1932 
1933 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1934   // Deoptimize only if the frame comes from compiled code.
1935   // Do not deoptimize the frame which is already patched
1936   // during the execution of the loops below.
1937   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1938     return;
1939   }
1940   ResourceMark rm;
1941   deoptimize_single_frame(thread, fr, reason);
1942 }
1943 
1944 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant) {
1945   // there is no exception handler for this pc => deoptimize
1946   if (make_not_entrant) {
1947     nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1948   }
1949 
1950   // Use Deoptimization::deoptimize for all of its side-effects:
1951   // gathering traps statistics, logging...
1952   // it also patches the return pc but we do not care about that
1953   // since we return a continuation to the deopt_blob below.
1954   JavaThread* thread = JavaThread::current();
< prev index next >