< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"



  53 #include "oops/method.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "oops/typeArrayOop.inline.hpp"
  58 #include "oops/verifyOopClosure.hpp"
  59 #include "prims/jvmtiDeferredUpdates.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "prims/jvmtiThreadState.hpp"
  62 #include "prims/methodHandles.hpp"
  63 #include "prims/vectorSupport.hpp"

  64 #include "runtime/atomicAccess.hpp"
  65 #include "runtime/basicLock.inline.hpp"
  66 #include "runtime/continuation.hpp"
  67 #include "runtime/continuationEntry.inline.hpp"
  68 #include "runtime/deoptimization.hpp"
  69 #include "runtime/escapeBarrier.hpp"
  70 #include "runtime/fieldDescriptor.inline.hpp"
  71 #include "runtime/frame.inline.hpp"
  72 #include "runtime/handles.inline.hpp"
  73 #include "runtime/interfaceSupport.inline.hpp"
  74 #include "runtime/javaThread.hpp"
  75 #include "runtime/jniHandles.inline.hpp"
  76 #include "runtime/keepStackGCProcessed.hpp"
  77 #include "runtime/lockStack.inline.hpp"
  78 #include "runtime/objectMonitor.inline.hpp"
  79 #include "runtime/osThread.hpp"
  80 #include "runtime/safepointVerifiers.hpp"
  81 #include "runtime/sharedRuntime.hpp"
  82 #include "runtime/signature.hpp"
  83 #include "runtime/stackFrameStream.inline.hpp"

 281 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 282 // which is called from the method fetch_unroll_info_helper below.
 283 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 284   // fetch_unroll_info() is called at the beginning of the deoptimization
 285   // handler. Note this fact before we start generating temporary frames
 286   // that can confuse an asynchronous stack walker. This counter is
 287   // decremented at the end of unpack_frames().
 288   current->inc_in_deopt_handler();
 289 
 290   if (exec_mode == Unpack_exception) {
 291     // When we get here, a callee has thrown an exception into a deoptimized
 292     // frame. That throw might have deferred stack watermark checking until
 293     // after unwinding. So we deal with such deferred requests here.
 294     StackWatermarkSet::after_unwind(current);
 295   }
 296 
 297   return fetch_unroll_info_helper(current, exec_mode);
 298 JRT_END
 299 
 300 #if COMPILER2_OR_JVMCI


















 301 // print information about reallocated objects
 302 static void print_objects(JavaThread* deoptee_thread,
 303                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 304   ResourceMark rm;
 305   stringStream st;  // change to logStream with logging
 306   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 307   fieldDescriptor fd;
 308 
 309   for (int i = 0; i < objects->length(); i++) {
 310     ObjectValue* sv = (ObjectValue*) objects->at(i);
 311     Handle obj = sv->value();
 312 
 313     if (obj.is_null()) {
 314       st.print_cr("     nullptr");
 315       continue;
 316     }
 317 
 318     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

 319 
 320     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 321     k->print_value_on(&st);
 322     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 323 
 324     if (Verbose && k != nullptr) {
 325       k->oop_print_on(obj(), &st);
 326     }
 327   }
 328   tty->print_raw(st.freeze());
 329 }
 330 
 331 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 332                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 333                                   bool& deoptimized_objects) {
 334   bool realloc_failures = false;
 335   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 336 
 337   JavaThread* deoptee_thread = chunk->at(0)->thread();
 338   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 339          "a frame can only be deoptimized by the owner thread");
 340 
 341   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 342 
 343   // The flag return_oop() indicates call sites which return oop
 344   // in compiled code. Such sites include java method calls,
 345   // runtime calls (for example, used to allocate new objects/arrays
 346   // on slow code path) and any other calls generated in compiled code.
 347   // It is not guaranteed that we can get such information here only
 348   // by analyzing bytecode in deoptimized frames. This is why this flag
 349   // is set during method compilation (see Compile::Process_OopMap_Node()).
 350   // If the previous frame was popped or if we are dispatching an exception,
 351   // we don't have an oop result.
 352   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 353   Handle return_value;











 354   if (save_oop_result) {
 355     // Reallocation may trigger GC. If deoptimization happened on return from
 356     // call which returns oop we need to save it since it is not in oopmap.
 357     oop result = deoptee.saved_oop_result(&map);
 358     assert(oopDesc::is_oop_or_null(result), "must be oop");
 359     return_value = Handle(thread, result);
 360     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 361     if (TraceDeoptimization) {
 362       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 363       tty->cr();
 364     }
 365   }
 366   if (objects != nullptr) {
 367     if (exec_mode == Deoptimization::Unpack_none) {
 368       assert(thread->thread_state() == _thread_in_vm, "assumption");
 369       JavaThread* THREAD = thread; // For exception macros.
 370       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 371       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));








 372       deoptimized_objects = true;
 373     } else {
 374       JavaThread* current = thread; // For JRT_BLOCK
 375       JRT_BLOCK
 376       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);








 377       JRT_END
 378     }
 379     guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 380     bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 381     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci);
 382     if (TraceDeoptimization) {
 383       print_objects(deoptee_thread, objects, realloc_failures);
 384     }
 385   }
 386   if (save_oop_result) {
 387     // Restore result.
 388     deoptee.set_saved_oop_result(&map, return_value());

 389   }
 390   return realloc_failures;
 391 }
 392 
 393 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 394                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 395   JavaThread* deoptee_thread = chunk->at(0)->thread();
 396   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 397   assert(thread == Thread::current(), "should be");
 398   HandleMark hm(thread);
 399 #ifndef PRODUCT
 400   bool first = true;
 401 #endif // !PRODUCT
 402   // Start locking from outermost/oldest frame
 403   for (int i = (chunk->length() - 1); i >= 0; i--) {
 404     compiledVFrame* cvf = chunk->at(i);
 405     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 406     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 407     if (monitors->is_nonempty()) {
 408       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 437         tty->print_raw(st.freeze());
 438       }
 439 #endif // !PRODUCT
 440     }
 441   }
 442 }
 443 
 444 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
 445 // The given vframes cover one physical frame.
 446 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
 447                                                  bool& realloc_failures) {
 448   frame deoptee = chunk->at(0)->fr();
 449   JavaThread* deoptee_thread = chunk->at(0)->thread();
 450   nmethod* nm = deoptee.cb()->as_nmethod_or_null();
 451   RegisterMap map(chunk->at(0)->register_map());
 452   bool deoptimized_objects = false;
 453 
 454   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 455 
 456   // Reallocate the non-escaping objects and restore their fields.
 457   if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
 458                                       || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
 459     realloc_failures = rematerialize_objects(thread, Unpack_none, nm, deoptee, map, chunk, deoptimized_objects);
 460   }
 461 
 462   // MonitorInfo structures used in eliminate_locks are not GC safe.
 463   NoSafepointVerifier no_safepoint;
 464 
 465   // Now relock objects if synchronization on them was eliminated.
 466   if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks))) {
 467     restore_eliminated_locks(thread, chunk, realloc_failures, deoptee, Unpack_none, deoptimized_objects);
 468   }
 469   return deoptimized_objects;
 470 }
 471 #endif // COMPILER2_OR_JVMCI
 472 
 473 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 474 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) {
 475   JFR_ONLY(Jfr::check_and_process_sample_request(current);)
 476   // When we get here we are about to unwind the deoptee frame. In order to
 477   // catch not yet safe to use frames, the following stack watermark barrier

 514   // Create a growable array of VFrames where each VFrame represents an inlined
 515   // Java frame.  This storage is allocated with the usual system arena.
 516   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 517   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 518   vframe* vf = vframe::new_vframe(&deoptee, &map, current);
 519   while (!vf->is_top()) {
 520     assert(vf->is_compiled_frame(), "Wrong frame type");
 521     chunk->push(compiledVFrame::cast(vf));
 522     vf = vf->sender();
 523   }
 524   assert(vf->is_compiled_frame(), "Wrong frame type");
 525   chunk->push(compiledVFrame::cast(vf));
 526 
 527   bool realloc_failures = false;
 528 
 529 #if COMPILER2_OR_JVMCI
 530   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 531 
 532   // Reallocate the non-escaping objects and restore their fields. Then
 533   // relock objects if synchronization on them was eliminated.
 534   if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations)
 535                                        || EliminateAutoBox || EnableVectorAggressiveReboxing )) {
 536     bool unused;
 537     realloc_failures = rematerialize_objects(current, exec_mode, nm, deoptee, map, chunk, unused);
 538   }
 539 #endif // COMPILER2_OR_JVMCI
 540 
 541   // Ensure that no safepoint is taken after pointers have been stored
 542   // in fields of rematerialized objects.  If a safepoint occurs from here on
 543   // out the java state residing in the vframeArray will be missed.
 544   // Locks may be rebaised in a safepoint.
 545   NoSafepointVerifier no_safepoint;
 546 
 547 #if COMPILER2_OR_JVMCI
 548   if ((jvmci_enabled COMPILER2_PRESENT( || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks) ))
 549       && !EscapeBarrier::objs_are_deoptimized(current, deoptee.id())) {
 550     bool unused = false;
 551     restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused);
 552   }
 553 #endif // COMPILER2_OR_JVMCI
 554 
 555   ScopeDesc* trap_scope = chunk->at(0)->scope();

 702   // its caller's stack by. If the caller is a compiled frame then
 703   // we pretend that the callee has no parameters so that the
 704   // extension counts for the full amount of locals and not just
 705   // locals-parms. This is because without a c2i adapter the parm
 706   // area as created by the compiled frame will not be usable by
 707   // the interpreter. (Depending on the calling convention there
 708   // may not even be enough space).
 709 
 710   // QQQ I'd rather see this pushed down into last_frame_adjust
 711   // and have it take the sender (aka caller).
 712 
 713   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 714     caller_adjustment = last_frame_adjust(0, callee_locals);
 715   } else if (callee_locals > callee_parameters) {
 716     // The caller frame may need extending to accommodate
 717     // non-parameter locals of the first unpacked interpreted frame.
 718     // Compute that adjustment.
 719     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 720   }
 721 
 722   // If the sender is deoptimized the we must retrieve the address of the handler
 723   // since the frame will "magically" show the original pc before the deopt
 724   // and we'd undo the deopt.
 725 
 726   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 727   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 728     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 729   }
 730 
 731   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 732 
 733 #if INCLUDE_JVMCI
 734   if (exceptionObject() != nullptr) {
 735     current->set_exception_oop(exceptionObject());
 736     exec_mode = Unpack_exception;
 737     assert(array->element(0)->rethrow_exception(), "must be");
 738   }
 739 #endif
 740 
 741   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 742     assert(current->has_pending_exception(), "should have thrown OOME");

1080   static InstanceKlass* find_cache_klass(Thread* thread, Symbol* klass_name) {
1081     ResourceMark rm(thread);
1082     char* klass_name_str = klass_name->as_C_string();
1083     InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle());
1084     guarantee(ik != nullptr, "%s must be loaded", klass_name_str);
1085     if (!ik->is_in_error_state()) {
1086       guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
1087       CacheType::compute_offsets(ik);
1088     }
1089     return ik;
1090   }
1091 };
1092 
1093 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache  : public BoxCacheBase<CacheType> {
1094   PrimitiveType _low;
1095   PrimitiveType _high;
1096   jobject _cache;
1097 protected:
1098   static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton;
1099   BoxCache(Thread* thread) {

1100     InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(thread, CacheType::symbol());
1101     if (ik->is_in_error_state()) {
1102       _low = 1;
1103       _high = 0;
1104       _cache = nullptr;
1105     } else {
1106       objArrayOop cache = CacheType::cache(ik);
1107       assert(cache->length() > 0, "Empty cache");
1108       _low = BoxType::value(cache->obj_at(0));
1109       _high = checked_cast<PrimitiveType>(_low + cache->length() - 1);
1110       _cache = JNIHandles::make_global(Handle(thread, cache));
1111     }
1112   }
1113   ~BoxCache() {
1114     JNIHandles::destroy_global(_cache);
1115   }
1116 public:
1117   static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
1118     if (_singleton == nullptr) {
1119       BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
1120       if (!AtomicAccess::replace_if_null(&_singleton, s)) {
1121         delete s;
1122       }
1123     }
1124     return _singleton;
1125   }
1126   oop lookup(PrimitiveType value) {
1127     if (_low <= value && value <= _high) {
1128       int offset = checked_cast<int>(value - _low);
1129       return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
1130     }
1131     return nullptr;
1132   }
1133   oop lookup_raw(intptr_t raw_value, bool& cache_init_error) {
1134     if (_cache == nullptr) {
1135       cache_init_error = true;
1136       return nullptr;
1137     }
1138     // Have to cast to avoid little/big-endian problems.
1139     if (sizeof(PrimitiveType) > sizeof(jint)) {
1140       jlong value = (jlong)raw_value;
1141       return lookup(value);
1142     }
1143     PrimitiveType value = (PrimitiveType)*((jint*)&raw_value);
1144     return lookup(value);
1145   }
1146 };
1147 
1148 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache;
1149 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache;

1189   oop lookup_raw(intptr_t raw_value, bool& cache_in_error) {
1190     if (_true_cache == nullptr) {
1191       cache_in_error = true;
1192       return nullptr;
1193     }
1194     // Have to cast to avoid little/big-endian problems.
1195     jboolean value = (jboolean)*((jint*)&raw_value);
1196     return lookup(value);
1197   }
1198   oop lookup(jboolean value) {
1199     if (value != 0) {
1200       return JNIHandles::resolve_non_null(_true_cache);
1201     }
1202     return JNIHandles::resolve_non_null(_false_cache);
1203   }
1204 };
1205 
1206 BooleanBoxCache* BooleanBoxCache::_singleton = nullptr;
1207 
1208 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS) {




1209    Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
1210    BasicType box_type = vmClasses::box_klass_type(k);
1211    if (box_type != T_OBJECT) {
1212      StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0));
1213      switch(box_type) {
1214        case T_INT:     return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1215        case T_CHAR:    return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1216        case T_SHORT:   return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1217        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1218        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1219        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1220        default:;
1221      }
1222    }
1223    return nullptr;
1224 }
1225 #endif // INCLUDE_JVMCI
1226 
1227 #if COMPILER2_OR_JVMCI
1228 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1229   Handle pending_exception(THREAD, thread->pending_exception());
1230   const char* exception_file = thread->exception_file();
1231   int exception_line = thread->exception_line();
1232   thread->clear_pending_exception();
1233 
1234   bool failures = false;
1235 
1236   for (int i = 0; i < objects->length(); i++) {
1237     assert(objects->at(i)->is_object(), "invalid debug information");
1238     ObjectValue* sv = (ObjectValue*) objects->at(i);
1239 
1240     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1241     oop obj = nullptr;









1242 

1243     bool cache_init_error = false;
1244     if (k->is_instance_klass()) {
1245 #if INCLUDE_JVMCI
1246       nmethod* nm = fr->cb()->as_nmethod_or_null();
1247       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1248         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1249         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1250         if (obj != nullptr) {
1251           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1252           abv->set_cached(true);
1253         } else if (cache_init_error) {
1254           // Results in an OOME which is valid (as opposed to a class initialization error)
1255           // and is fine for the rare case a cache initialization failing.
1256           failures = true;
1257         }
1258       }
1259 #endif // INCLUDE_JVMCI
1260 
1261       InstanceKlass* ik = InstanceKlass::cast(k);
1262       if (obj == nullptr && !cache_init_error) {
1263         InternalOOMEMark iom(THREAD);
1264         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1265           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1266         } else {
1267           obj = ik->allocate_instance(THREAD);
1268         }
1269       }





1270     } else if (k->is_typeArray_klass()) {
1271       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1272       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1273       int len = sv->field_size() / type2size[ak->element_type()];
1274       InternalOOMEMark iom(THREAD);
1275       obj = ak->allocate_instance(len, THREAD);
1276     } else if (k->is_objArray_klass()) {
1277       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1278       InternalOOMEMark iom(THREAD);
1279       obj = ak->allocate_instance(sv->field_size(), THREAD);
1280     }
1281 
1282     if (obj == nullptr) {
1283       failures = true;
1284     }
1285 
1286     assert(sv->value().is_null(), "redundant reallocation");
1287     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1288     CLEAR_PENDING_EXCEPTION;
1289     sv->set_value(obj);
1290   }
1291 
1292   if (failures) {
1293     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1294   } else if (pending_exception.not_null()) {
1295     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1296   }
1297 
1298   return failures;
1299 }
1300 















1301 #if INCLUDE_JVMCI
1302 /**
1303  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1304  * we need to somehow be able to recover the actual kind to be able to write the correct
1305  * amount of bytes.
1306  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1307  * the entries at index n + 1 to n + i are 'markers'.
1308  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1309  * expected form of the array would be:
1310  *
1311  * {b0, b1, b2, b3, INT, marker, b6, b7}
1312  *
1313  * Thus, in order to get back the size of the entry, we simply need to count the number
1314  * of marked entries
1315  *
1316  * @param virtualArray the virtualized byte array
1317  * @param i index of the virtual entry we are recovering
1318  * @return The number of bytes the entry spans
1319  */
1320 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1452       default:
1453         ShouldNotReachHere();
1454     }
1455     index++;
1456   }
1457 }
1458 
1459 // restore fields of an eliminated object array
1460 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1461   for (int i = 0; i < sv->field_size(); i++) {
1462     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1463     assert(value->type() == T_OBJECT, "object element expected");
1464     obj->obj_at_put(i, value->get_obj()());
1465   }
1466 }
1467 
1468 class ReassignedField {
1469 public:
1470   int _offset;
1471   BasicType _type;



1472 public:
1473   ReassignedField() {
1474     _offset = 0;
1475     _type = T_ILLEGAL;
1476   }
1477 };
1478 
1479 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1480 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1481   InstanceKlass* super = klass->super();
1482   if (super != nullptr) {
1483     get_reassigned_fields(super, fields, is_jvmci);
1484   }
1485   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1486     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1487       ReassignedField field;
1488       field._offset = fs.offset();
1489       field._type = Signature::basic_type(fs.signature());






1490       fields->append(field);
1491     }
1492   }
1493   return fields;
1494 }
1495 
1496 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1497 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci) {

1498   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1499   for (int i = 0; i < fields->length(); i++) {



















1500     ScopeValue* scope_field = sv->field_at(svIndex);
1501     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1502     int offset = fields->at(i)._offset;
1503     BasicType type = fields->at(i)._type;
1504     switch (type) {
1505       case T_OBJECT: case T_ARRAY:

1506         assert(value->type() == T_OBJECT, "Agreement.");
1507         obj->obj_field_put(offset, value->get_obj()());
1508         break;
1509 
1510       case T_INT: case T_FLOAT: { // 4 bytes.
1511         assert(value->type() == T_INT, "Agreement.");
1512         bool big_value = false;
1513         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1514           if (scope_field->is_location()) {
1515             Location::Type type = ((LocationValue*) scope_field)->location().type();
1516             if (type == Location::dbl || type == Location::lng) {
1517               big_value = true;
1518             }
1519           }
1520           if (scope_field->is_constant_int()) {
1521             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1522             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1523               big_value = true;
1524             }
1525           }

1556       case T_CHAR:
1557         assert(value->type() == T_INT, "Agreement.");
1558         obj->char_field_put(offset, (jchar)value->get_jint());
1559         break;
1560 
1561       case T_BYTE:
1562         assert(value->type() == T_INT, "Agreement.");
1563         obj->byte_field_put(offset, (jbyte)value->get_jint());
1564         break;
1565 
1566       case T_BOOLEAN:
1567         assert(value->type() == T_INT, "Agreement.");
1568         obj->bool_field_put(offset, (jboolean)value->get_jint());
1569         break;
1570 
1571       default:
1572         ShouldNotReachHere();
1573     }
1574     svIndex++;
1575   }

1576   return svIndex;
1577 }
1578 























1579 // restore fields of all eliminated objects and arrays
1580 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci) {
1581   for (int i = 0; i < objects->length(); i++) {
1582     assert(objects->at(i)->is_object(), "invalid debug information");
1583     ObjectValue* sv = (ObjectValue*) objects->at(i);
1584     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());


1585     Handle obj = sv->value();
1586     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1587 #ifndef PRODUCT
1588     if (PrintDeoptimizationDetails) {
1589       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1590     }
1591 #endif // !PRODUCT
1592 
1593     if (obj.is_null()) {
1594       continue;
1595     }
1596 
1597 #if INCLUDE_JVMCI
1598     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1599     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1600       continue;
1601     }
1602 #endif // INCLUDE_JVMCI
1603     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1604       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1605       ScopeValue* payload = sv->field_at(0);
1606       if (payload->is_location() &&
1607           payload->as_LocationValue()->location().type() == Location::vector) {
1608 #ifndef PRODUCT
1609         if (PrintDeoptimizationDetails) {
1610           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1611           if (Verbose) {
1612             Handle obj = sv->value();
1613             k->oop_print_on(obj(), tty);
1614           }
1615         }
1616 #endif // !PRODUCT
1617         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1618       }
1619       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1620       // which could be restored after vector object allocation.
1621     }
1622     if (k->is_instance_klass()) {
1623       InstanceKlass* ik = InstanceKlass::cast(k);
1624       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci);



1625     } else if (k->is_typeArray_klass()) {
1626       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1627       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1628     } else if (k->is_objArray_klass()) {
1629       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1630     }
1631   }
1632   // These objects may escape when we return to Interpreter after deoptimization.
1633   // We need barrier so that stores that initialize these objects can't be reordered
1634   // with subsequent stores that make these objects accessible by other threads.
1635   OrderAccess::storestore();
1636 }
1637 
1638 
1639 // relock objects for which synchronization was eliminated
1640 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1641                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1642   bool relocked_objects = false;
1643   for (int i = 0; i < monitors->length(); i++) {
1644     MonitorInfo* mon_info = monitors->at(i);
1645     if (mon_info->eliminated()) {
1646       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1647       relocked_objects = true;
1648       if (!mon_info->owner_is_scalar_replaced()) {

1786     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1787     nm->log_identity(xtty);
1788     xtty->end_head();
1789     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1790       xtty->begin_elem("jvms bci='%d'", sd->bci());
1791       xtty->method(sd->method());
1792       xtty->end_elem();
1793       if (sd->is_top())  break;
1794     }
1795     xtty->tail("deoptimized");
1796   }
1797 
1798   Continuation::notify_deopt(thread, fr.sp());
1799 
1800   // Patch the compiled method so that when execution returns to it we will
1801   // deopt the execution state and return to the interpreter.
1802   fr.deoptimize(thread);
1803 }
1804 
1805 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1806   // Deoptimize only if the frame comes from compile code.
1807   // Do not deoptimize the frame which is already patched
1808   // during the execution of the loops below.
1809   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1810     return;
1811   }
1812   ResourceMark rm;
1813   deoptimize_single_frame(thread, fr, reason);
1814 }
1815 
1816 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant) {
1817   // there is no exception handler for this pc => deoptimize
1818   if (make_not_entrant) {
1819     nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1820   }
1821 
1822   // Use Deoptimization::deoptimize for all of its side-effects:
1823   // gathering traps statistics, logging...
1824   // it also patches the return pc but we do not care about that
1825   // since we return a continuation to the deopt_blob below.
1826   JavaThread* thread = JavaThread::current();

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/inlineKlass.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/typeArrayOop.inline.hpp"
  61 #include "oops/verifyOopClosure.hpp"
  62 #include "prims/jvmtiDeferredUpdates.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "prims/jvmtiThreadState.hpp"
  65 #include "prims/methodHandles.hpp"
  66 #include "prims/vectorSupport.hpp"
  67 #include "runtime/arguments.hpp"
  68 #include "runtime/atomicAccess.hpp"
  69 #include "runtime/basicLock.inline.hpp"
  70 #include "runtime/continuation.hpp"
  71 #include "runtime/continuationEntry.inline.hpp"
  72 #include "runtime/deoptimization.hpp"
  73 #include "runtime/escapeBarrier.hpp"
  74 #include "runtime/fieldDescriptor.inline.hpp"
  75 #include "runtime/frame.inline.hpp"
  76 #include "runtime/handles.inline.hpp"
  77 #include "runtime/interfaceSupport.inline.hpp"
  78 #include "runtime/javaThread.hpp"
  79 #include "runtime/jniHandles.inline.hpp"
  80 #include "runtime/keepStackGCProcessed.hpp"
  81 #include "runtime/lockStack.inline.hpp"
  82 #include "runtime/objectMonitor.inline.hpp"
  83 #include "runtime/osThread.hpp"
  84 #include "runtime/safepointVerifiers.hpp"
  85 #include "runtime/sharedRuntime.hpp"
  86 #include "runtime/signature.hpp"
  87 #include "runtime/stackFrameStream.inline.hpp"

 285 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 286 // which is called from the method fetch_unroll_info_helper below.
 287 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 288   // fetch_unroll_info() is called at the beginning of the deoptimization
 289   // handler. Note this fact before we start generating temporary frames
 290   // that can confuse an asynchronous stack walker. This counter is
 291   // decremented at the end of unpack_frames().
 292   current->inc_in_deopt_handler();
 293 
 294   if (exec_mode == Unpack_exception) {
 295     // When we get here, a callee has thrown an exception into a deoptimized
 296     // frame. That throw might have deferred stack watermark checking until
 297     // after unwinding. So we deal with such deferred requests here.
 298     StackWatermarkSet::after_unwind(current);
 299   }
 300 
 301   return fetch_unroll_info_helper(current, exec_mode);
 302 JRT_END
 303 
 304 #if COMPILER2_OR_JVMCI
 305 
 306 static Klass* get_refined_array_klass(Klass* k, frame* fr, RegisterMap* map, ObjectValue* sv, TRAPS) {
 307   // If it's an array, get the properties
 308   if (k->is_array_klass() && !k->is_typeArray_klass()) {
 309     assert(k->is_unrefined_objArray_klass(), "Expected unrefined array klass");
 310     nmethod* nm = fr->cb()->as_nmethod_or_null();
 311     if (nm->is_compiled_by_c2()) {
 312       assert(sv->has_properties(), "Property information is missing");
 313       ArrayProperties props(checked_cast<ArrayProperties::Type>(StackValue::create_stack_value(fr, map, sv->properties())->get_jint()));
 314       k = ObjArrayKlass::cast(k)->klass_with_properties(props, THREAD);
 315     } else {
 316       // TODO Graal needs to be fixed. Just go with the default properties for now
 317       k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayProperties::Default(), THREAD);
 318     }
 319   }
 320   return k;
 321 }
 322 
 323 // print information about reallocated objects
 324 static void print_objects(JavaThread* deoptee_thread, frame* deoptee, RegisterMap* map,
 325                           GrowableArray<ScopeValue*>* objects, bool realloc_failures, TRAPS) {
 326   ResourceMark rm;
 327   stringStream st;  // change to logStream with logging
 328   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 329   fieldDescriptor fd;
 330 
 331   for (int i = 0; i < objects->length(); i++) {
 332     ObjectValue* sv = (ObjectValue*) objects->at(i);
 333     Handle obj = sv->value();
 334 
 335     if (obj.is_null()) {
 336       st.print_cr("     nullptr");
 337       continue;
 338     }
 339 
 340     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 341     k = get_refined_array_klass(k, deoptee, map, sv, THREAD);
 342 
 343     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 344     k->print_value_on(&st);
 345     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 346 
 347     if (Verbose && k != nullptr) {
 348       k->oop_print_on(obj(), &st);
 349     }
 350   }
 351   tty->print_raw(st.freeze());
 352 }
 353 
 354 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 355                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 356                                   bool& deoptimized_objects) {
 357   bool realloc_failures = false;
 358   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 359 
 360   JavaThread* deoptee_thread = chunk->at(0)->thread();
 361   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 362          "a frame can only be deoptimized by the owner thread");
 363 
 364   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 365 
 366   // The flag return_oop() indicates call sites which return oop
 367   // in compiled code. Such sites include java method calls,
 368   // runtime calls (for example, used to allocate new objects/arrays
 369   // on slow code path) and any other calls generated in compiled code.
 370   // It is not guaranteed that we can get such information here only
 371   // by analyzing bytecode in deoptimized frames. This is why this flag
 372   // is set during method compilation (see Compile::Process_OopMap_Node()).
 373   // If the previous frame was popped or if we are dispatching an exception,
 374   // we don't have an oop result.
 375   ScopeDesc* scope = chunk->at(0)->scope();
 376   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 377   // In case of the return of multiple values, we must take care
 378   // of all oop return values.
 379   GrowableArray<Handle> return_oops;
 380   InlineKlass* vk = nullptr;
 381   if (save_oop_result && scope->return_scalarized()) {
 382     vk = InlineKlass::returned_inline_klass(map);
 383     if (vk != nullptr) {
 384       vk->save_oop_fields(map, return_oops);
 385       save_oop_result = false;
 386     }
 387   }
 388   if (save_oop_result) {
 389     // Reallocation may trigger GC. If deoptimization happened on return from
 390     // call which returns oop we need to save it since it is not in oopmap.
 391     oop result = deoptee.saved_oop_result(&map);
 392     assert(oopDesc::is_oop_or_null(result), "must be oop");
 393     return_oops.push(Handle(thread, result));
 394     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 395     if (TraceDeoptimization) {
 396       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 397       tty->cr();
 398     }
 399   }
 400   if (objects != nullptr || vk != nullptr) {
 401     if (exec_mode == Deoptimization::Unpack_none) {
 402       assert(thread->thread_state() == _thread_in_vm, "assumption");
 403       JavaThread* THREAD = thread; // For exception macros.
 404       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 405       if (vk != nullptr) {
 406         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 407       }
 408       if (objects != nullptr) {
 409         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 410         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 411         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 412         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 413       }
 414       deoptimized_objects = true;
 415     } else {
 416       JavaThread* current = thread; // For JRT_BLOCK
 417       JRT_BLOCK
 418       if (vk != nullptr) {
 419         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 420       }
 421       if (objects != nullptr) {
 422         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 423         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 424         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 425         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 426       }
 427       JRT_END
 428     }
 429     if (TraceDeoptimization && objects != nullptr) {
 430       print_objects(deoptee_thread, &deoptee, &map, objects, realloc_failures, thread);



 431     }
 432   }
 433   if (save_oop_result || vk != nullptr) {
 434     // Restore result.
 435     assert(return_oops.length() == 1, "no inline type");
 436     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 437   }
 438   return realloc_failures;
 439 }
 440 
 441 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 442                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 443   JavaThread* deoptee_thread = chunk->at(0)->thread();
 444   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 445   assert(thread == Thread::current(), "should be");
 446   HandleMark hm(thread);
 447 #ifndef PRODUCT
 448   bool first = true;
 449 #endif // !PRODUCT
 450   // Start locking from outermost/oldest frame
 451   for (int i = (chunk->length() - 1); i >= 0; i--) {
 452     compiledVFrame* cvf = chunk->at(i);
 453     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 454     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 455     if (monitors->is_nonempty()) {
 456       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 485         tty->print_raw(st.freeze());
 486       }
 487 #endif // !PRODUCT
 488     }
 489   }
 490 }
 491 
 492 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
 493 // The given vframes cover one physical frame.
 494 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
 495                                                  bool& realloc_failures) {
 496   frame deoptee = chunk->at(0)->fr();
 497   JavaThread* deoptee_thread = chunk->at(0)->thread();
 498   nmethod* nm = deoptee.cb()->as_nmethod_or_null();
 499   RegisterMap map(chunk->at(0)->register_map());
 500   bool deoptimized_objects = false;
 501 
 502   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 503 
 504   // Reallocate the non-escaping objects and restore their fields.
 505   if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || Arguments::is_valhalla_enabled()) && EliminateAllocations)
 506                                       || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
 507     realloc_failures = rematerialize_objects(thread, Unpack_none, nm, deoptee, map, chunk, deoptimized_objects);
 508   }
 509 
 510   // MonitorInfo structures used in eliminate_locks are not GC safe.
 511   NoSafepointVerifier no_safepoint;
 512 
 513   // Now relock objects if synchronization on them was eliminated.
 514   if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks))) {
 515     restore_eliminated_locks(thread, chunk, realloc_failures, deoptee, Unpack_none, deoptimized_objects);
 516   }
 517   return deoptimized_objects;
 518 }
 519 #endif // COMPILER2_OR_JVMCI
 520 
 521 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 522 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) {
 523   JFR_ONLY(Jfr::check_and_process_sample_request(current);)
 524   // When we get here we are about to unwind the deoptee frame. In order to
 525   // catch not yet safe to use frames, the following stack watermark barrier

 562   // Create a growable array of VFrames where each VFrame represents an inlined
 563   // Java frame.  This storage is allocated with the usual system arena.
 564   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 565   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 566   vframe* vf = vframe::new_vframe(&deoptee, &map, current);
 567   while (!vf->is_top()) {
 568     assert(vf->is_compiled_frame(), "Wrong frame type");
 569     chunk->push(compiledVFrame::cast(vf));
 570     vf = vf->sender();
 571   }
 572   assert(vf->is_compiled_frame(), "Wrong frame type");
 573   chunk->push(compiledVFrame::cast(vf));
 574 
 575   bool realloc_failures = false;
 576 
 577 #if COMPILER2_OR_JVMCI
 578   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 579 
 580   // Reallocate the non-escaping objects and restore their fields. Then
 581   // relock objects if synchronization on them was eliminated.
 582   if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || Arguments::is_valhalla_enabled()) && EliminateAllocations)
 583                                       || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
 584     bool unused;
 585     realloc_failures = rematerialize_objects(current, exec_mode, nm, deoptee, map, chunk, unused);
 586   }
 587 #endif // COMPILER2_OR_JVMCI
 588 
 589   // Ensure that no safepoint is taken after pointers have been stored
 590   // in fields of rematerialized objects.  If a safepoint occurs from here on
 591   // out the java state residing in the vframeArray will be missed.
 592   // Locks may be rebaised in a safepoint.
 593   NoSafepointVerifier no_safepoint;
 594 
 595 #if COMPILER2_OR_JVMCI
 596   if ((jvmci_enabled COMPILER2_PRESENT( || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks) ))
 597       && !EscapeBarrier::objs_are_deoptimized(current, deoptee.id())) {
 598     bool unused = false;
 599     restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused);
 600   }
 601 #endif // COMPILER2_OR_JVMCI
 602 
 603   ScopeDesc* trap_scope = chunk->at(0)->scope();

 750   // its caller's stack by. If the caller is a compiled frame then
 751   // we pretend that the callee has no parameters so that the
 752   // extension counts for the full amount of locals and not just
 753   // locals-parms. This is because without a c2i adapter the parm
 754   // area as created by the compiled frame will not be usable by
 755   // the interpreter. (Depending on the calling convention there
 756   // may not even be enough space).
 757 
 758   // QQQ I'd rather see this pushed down into last_frame_adjust
 759   // and have it take the sender (aka caller).
 760 
 761   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 762     caller_adjustment = last_frame_adjust(0, callee_locals);
 763   } else if (callee_locals > callee_parameters) {
 764     // The caller frame may need extending to accommodate
 765     // non-parameter locals of the first unpacked interpreted frame.
 766     // Compute that adjustment.
 767     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 768   }
 769 
 770   // If the sender is deoptimized we must retrieve the address of the handler
 771   // since the frame will "magically" show the original pc before the deopt
 772   // and we'd undo the deopt.
 773 
 774   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 775   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 776     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 777   }
 778 
 779   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 780 
 781 #if INCLUDE_JVMCI
 782   if (exceptionObject() != nullptr) {
 783     current->set_exception_oop(exceptionObject());
 784     exec_mode = Unpack_exception;
 785     assert(array->element(0)->rethrow_exception(), "must be");
 786   }
 787 #endif
 788 
 789   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 790     assert(current->has_pending_exception(), "should have thrown OOME");

1128   static InstanceKlass* find_cache_klass(Thread* thread, Symbol* klass_name) {
1129     ResourceMark rm(thread);
1130     char* klass_name_str = klass_name->as_C_string();
1131     InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle());
1132     guarantee(ik != nullptr, "%s must be loaded", klass_name_str);
1133     if (!ik->is_in_error_state()) {
1134       guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
1135       CacheType::compute_offsets(ik);
1136     }
1137     return ik;
1138   }
1139 };
1140 
1141 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache  : public BoxCacheBase<CacheType> {
1142   PrimitiveType _low;
1143   PrimitiveType _high;
1144   jobject _cache;
1145 protected:
1146   static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton;
1147   BoxCache(Thread* thread) {
1148     assert(!Arguments::is_valhalla_enabled(), "Should not use box caches with enable preview");
1149     InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(thread, CacheType::symbol());
1150     if (ik->is_in_error_state()) {
1151       _low = 1;
1152       _high = 0;
1153       _cache = nullptr;
1154     } else {
1155       refArrayOop cache = CacheType::cache(ik);
1156       assert(cache->length() > 0, "Empty cache");
1157       _low = BoxType::value(cache->obj_at(0));
1158       _high = checked_cast<PrimitiveType>(_low + cache->length() - 1);
1159       _cache = JNIHandles::make_global(Handle(thread, cache));
1160     }
1161   }
1162   ~BoxCache() {
1163     JNIHandles::destroy_global(_cache);
1164   }
1165 public:
1166   static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
1167     if (_singleton == nullptr) {
1168       BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
1169       if (!AtomicAccess::replace_if_null(&_singleton, s)) {
1170         delete s;
1171       }
1172     }
1173     return _singleton;
1174   }
1175   oop lookup(PrimitiveType value) {
1176     if (_low <= value && value <= _high) {
1177       int offset = checked_cast<int>(value - _low);
1178       return refArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
1179     }
1180     return nullptr;
1181   }
1182   oop lookup_raw(intptr_t raw_value, bool& cache_init_error) {
1183     if (_cache == nullptr) {
1184       cache_init_error = true;
1185       return nullptr;
1186     }
1187     // Have to cast to avoid little/big-endian problems.
1188     if (sizeof(PrimitiveType) > sizeof(jint)) {
1189       jlong value = (jlong)raw_value;
1190       return lookup(value);
1191     }
1192     PrimitiveType value = (PrimitiveType)*((jint*)&raw_value);
1193     return lookup(value);
1194   }
1195 };
1196 
1197 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache;
1198 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache;

1238   oop lookup_raw(intptr_t raw_value, bool& cache_in_error) {
1239     if (_true_cache == nullptr) {
1240       cache_in_error = true;
1241       return nullptr;
1242     }
1243     // Have to cast to avoid little/big-endian problems.
1244     jboolean value = (jboolean)*((jint*)&raw_value);
1245     return lookup(value);
1246   }
1247   oop lookup(jboolean value) {
1248     if (value != 0) {
1249       return JNIHandles::resolve_non_null(_true_cache);
1250     }
1251     return JNIHandles::resolve_non_null(_false_cache);
1252   }
1253 };
1254 
1255 BooleanBoxCache* BooleanBoxCache::_singleton = nullptr;
1256 
1257 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS) {
1258   if (Arguments::enable_preview()) {
1259     // Box caches are not used with enable preview.
1260     return nullptr;
1261   }
1262    Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
1263    BasicType box_type = vmClasses::box_klass_type(k);
1264    if (box_type != T_OBJECT) {
1265      StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0));
1266      switch(box_type) {
1267        case T_INT:     return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1268        case T_CHAR:    return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1269        case T_SHORT:   return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1270        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1271        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1272        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1273        default:;
1274      }
1275    }
1276    return nullptr;
1277 }
1278 #endif // INCLUDE_JVMCI
1279 
1280 #if COMPILER2_OR_JVMCI
1281 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1282   Handle pending_exception(THREAD, thread->pending_exception());
1283   const char* exception_file = thread->exception_file();
1284   int exception_line = thread->exception_line();
1285   thread->clear_pending_exception();
1286 
1287   bool failures = false;
1288 
1289   for (int i = 0; i < objects->length(); i++) {
1290     assert(objects->at(i)->is_object(), "invalid debug information");
1291     ObjectValue* sv = (ObjectValue*) objects->at(i);

1292     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1293     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1294 
1295     // Check if the object may be null and has an additional null_marker input that needs
1296     // to be checked before using the field values. Skip re-allocation if it is null.
1297     if (k->is_inline_klass() && sv->has_properties()) {
1298       jint null_marker = StackValue::create_stack_value(fr, reg_map, sv->properties())->get_jint();
1299       if (null_marker == 0) {
1300         continue;
1301       }
1302     }
1303 
1304     oop obj = nullptr;
1305     bool cache_init_error = false;
1306     if (k->is_instance_klass()) {
1307 #if INCLUDE_JVMCI
1308       nmethod* nm = fr->cb()->as_nmethod_or_null();
1309       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1310         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1311         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1312         if (obj != nullptr) {
1313           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1314           abv->set_cached(true);
1315         } else if (cache_init_error) {
1316           // Results in an OOME which is valid (as opposed to a class initialization error)
1317           // and is fine for the rare case a cache initialization failing.
1318           failures = true;
1319         }
1320       }
1321 #endif // INCLUDE_JVMCI
1322 
1323       InstanceKlass* ik = InstanceKlass::cast(k);
1324       if (obj == nullptr && !cache_init_error) {
1325         InternalOOMEMark iom(THREAD);
1326         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1327           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1328         } else {
1329           obj = ik->allocate_instance(THREAD);
1330         }
1331       }
1332     } else if (k->is_flatArray_klass()) {
1333       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1334       // Inline type array must be zeroed because not all memory is reassigned
1335       InternalOOMEMark iom(THREAD);
1336       obj = ak->allocate_instance(sv->field_size(), THREAD);
1337     } else if (k->is_typeArray_klass()) {
1338       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1339       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1340       int len = sv->field_size() / type2size[ak->element_type()];
1341       InternalOOMEMark iom(THREAD);
1342       obj = ak->allocate_instance(len, THREAD);
1343     } else if (k->is_refArray_klass()) {
1344       RefArrayKlass* ak = RefArrayKlass::cast(k);
1345       InternalOOMEMark iom(THREAD);
1346       obj = ak->allocate_instance(sv->field_size(), THREAD);
1347     }
1348 
1349     if (obj == nullptr) {
1350       failures = true;
1351     }
1352 
1353     assert(sv->value().is_null(), "redundant reallocation");
1354     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1355     CLEAR_PENDING_EXCEPTION;
1356     sv->set_value(obj);
1357   }
1358 
1359   if (failures) {
1360     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1361   } else if (pending_exception.not_null()) {
1362     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1363   }
1364 
1365   return failures;
1366 }
1367 
1368 // We're deoptimizing at the return of a call, inline type fields are
1369 // in registers. When we go back to the interpreter, it will expect a
1370 // reference to an inline type instance. Allocate and initialize it from
1371 // the register values here.
1372 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1373   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1374   if (new_vt == nullptr) {
1375     CLEAR_PENDING_EXCEPTION;
1376     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1377   }
1378   return_oops.clear();
1379   return_oops.push(Handle(THREAD, new_vt));
1380   return false;
1381 }
1382 
1383 #if INCLUDE_JVMCI
1384 /**
1385  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1386  * we need to somehow be able to recover the actual kind to be able to write the correct
1387  * amount of bytes.
1388  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1389  * the entries at index n + 1 to n + i are 'markers'.
1390  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1391  * expected form of the array would be:
1392  *
1393  * {b0, b1, b2, b3, INT, marker, b6, b7}
1394  *
1395  * Thus, in order to get back the size of the entry, we simply need to count the number
1396  * of marked entries
1397  *
1398  * @param virtualArray the virtualized byte array
1399  * @param i index of the virtual entry we are recovering
1400  * @return The number of bytes the entry spans
1401  */
1402 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1534       default:
1535         ShouldNotReachHere();
1536     }
1537     index++;
1538   }
1539 }
1540 
1541 // restore fields of an eliminated object array
1542 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1543   for (int i = 0; i < sv->field_size(); i++) {
1544     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1545     assert(value->type() == T_OBJECT, "object element expected");
1546     obj->obj_at_put(i, value->get_obj()());
1547   }
1548 }
1549 
1550 class ReassignedField {
1551 public:
1552   int _offset;
1553   BasicType _type;
1554   InstanceKlass* _klass;
1555   bool _is_flat;
1556   bool _is_null_free;
1557 public:
1558   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }



1559 };
1560 
1561 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1562 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1563   InstanceKlass* super = klass->super();
1564   if (super != nullptr) {
1565     get_reassigned_fields(super, fields, is_jvmci);
1566   }
1567   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1568     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1569       ReassignedField field;
1570       field._offset = fs.offset();
1571       field._type = Signature::basic_type(fs.signature());
1572       if (fs.is_flat()) {
1573         field._is_flat = true;
1574         field._is_null_free = fs.is_null_free_inline_type();
1575         // Resolve klass of flat inline type field
1576         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1577       }
1578       fields->append(field);
1579     }
1580   }
1581   return fields;
1582 }
1583 
1584 // Restore fields of an eliminated instance object employing the same field order used by the
1585 // compiler when it scalarizes an object at safepoints.
1586 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, TRAPS) {
1587   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1588   for (int i = 0; i < fields->length(); i++) {
1589     BasicType type = fields->at(i)._type;
1590     int offset = base_offset + fields->at(i)._offset;
1591     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1592     if (fields->at(i)._is_flat) {
1593       // Recursively re-assign flat inline type fields
1594       InstanceKlass* vk = fields->at(i)._klass;
1595       assert(vk != nullptr, "must be resolved");
1596       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1597       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, CHECK_0);
1598       if (!fields->at(i)._is_null_free) {
1599         ScopeValue* scope_field = sv->field_at(svIndex);
1600         StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1601         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1602         obj->bool_field_put(nm_offset, value->get_jint() & 1);
1603         svIndex++;
1604       }
1605       continue; // Continue because we don't need to increment svIndex
1606     }
1607 
1608     ScopeValue* scope_field = sv->field_at(svIndex);
1609     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1610     switch (type) {
1611       case T_OBJECT:
1612       case T_ARRAY:
1613         assert(value->type() == T_OBJECT, "Agreement.");
1614         obj->obj_field_put(offset, value->get_obj()());
1615         break;
1616 
1617       case T_INT: case T_FLOAT: { // 4 bytes.
1618         assert(value->type() == T_INT, "Agreement.");
1619         bool big_value = false;
1620         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1621           if (scope_field->is_location()) {
1622             Location::Type type = ((LocationValue*) scope_field)->location().type();
1623             if (type == Location::dbl || type == Location::lng) {
1624               big_value = true;
1625             }
1626           }
1627           if (scope_field->is_constant_int()) {
1628             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1629             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1630               big_value = true;
1631             }
1632           }

1663       case T_CHAR:
1664         assert(value->type() == T_INT, "Agreement.");
1665         obj->char_field_put(offset, (jchar)value->get_jint());
1666         break;
1667 
1668       case T_BYTE:
1669         assert(value->type() == T_INT, "Agreement.");
1670         obj->byte_field_put(offset, (jbyte)value->get_jint());
1671         break;
1672 
1673       case T_BOOLEAN:
1674         assert(value->type() == T_INT, "Agreement.");
1675         obj->bool_field_put(offset, (jboolean)value->get_jint());
1676         break;
1677 
1678       default:
1679         ShouldNotReachHere();
1680     }
1681     svIndex++;
1682   }
1683 
1684   return svIndex;
1685 }
1686 
1687 // restore fields of an eliminated inline type array
1688 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1689   InlineKlass* vk = vak->element_klass();
1690   assert(vk->maybe_flat_in_array(), "should only be used for flat inline type arrays");
1691   // Adjust offset to omit oop header
1692   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - vk->payload_offset();
1693   // Initialize all elements of the flat inline type array
1694   for (int i = 0; i < sv->field_size(); i++) {
1695     ObjectValue* val = sv->field_at(i)->as_ObjectValue();
1696     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1697     reassign_fields_by_klass(vk, fr, reg_map, val, 0, (oop)obj, is_jvmci, offset, CHECK);
1698     if (!obj->is_null_free_array()) {
1699       jboolean null_marker_value;
1700       if (val->has_properties()) {
1701         null_marker_value = StackValue::create_stack_value(fr, reg_map, val->properties())->get_jint() & 1;
1702       } else {
1703         null_marker_value = 1;
1704       }
1705       obj->bool_field_put(offset + vk->null_marker_offset(), null_marker_value);
1706     }
1707   }
1708 }
1709 
1710 // restore fields of all eliminated objects and arrays
1711 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1712   for (int i = 0; i < objects->length(); i++) {
1713     assert(objects->at(i)->is_object(), "invalid debug information");
1714     ObjectValue* sv = (ObjectValue*) objects->at(i);
1715     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1716     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1717 
1718     Handle obj = sv->value();
1719     assert(obj.not_null() || realloc_failures || sv->has_properties(), "reallocation was missed");
1720 #ifndef PRODUCT
1721     if (PrintDeoptimizationDetails) {
1722       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1723     }
1724 #endif // !PRODUCT
1725 
1726     if (obj.is_null()) {
1727       continue;
1728     }
1729 
1730 #if INCLUDE_JVMCI
1731     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1732     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1733       continue;
1734     }
1735 #endif // INCLUDE_JVMCI
1736     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1737       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1738       ScopeValue* payload = sv->field_at(0);
1739       if (payload->is_location() &&
1740           payload->as_LocationValue()->location().type() == Location::vector) {
1741 #ifndef PRODUCT
1742         if (PrintDeoptimizationDetails) {
1743           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1744           if (Verbose) {
1745             Handle obj = sv->value();
1746             k->oop_print_on(obj(), tty);
1747           }
1748         }
1749 #endif // !PRODUCT
1750         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1751       }
1752       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1753       // which could be restored after vector object allocation.
1754     }
1755     if (k->is_instance_klass()) {
1756       InstanceKlass* ik = InstanceKlass::cast(k);
1757       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, CHECK);
1758     } else if (k->is_flatArray_klass()) {
1759       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1760       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1761     } else if (k->is_typeArray_klass()) {
1762       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1763       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1764     } else if (k->is_refArray_klass()) {
1765       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1766     }
1767   }
1768   // These objects may escape when we return to Interpreter after deoptimization.
1769   // We need barrier so that stores that initialize these objects can't be reordered
1770   // with subsequent stores that make these objects accessible by other threads.
1771   OrderAccess::storestore();
1772 }
1773 
1774 
1775 // relock objects for which synchronization was eliminated
1776 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1777                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1778   bool relocked_objects = false;
1779   for (int i = 0; i < monitors->length(); i++) {
1780     MonitorInfo* mon_info = monitors->at(i);
1781     if (mon_info->eliminated()) {
1782       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1783       relocked_objects = true;
1784       if (!mon_info->owner_is_scalar_replaced()) {

1922     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1923     nm->log_identity(xtty);
1924     xtty->end_head();
1925     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1926       xtty->begin_elem("jvms bci='%d'", sd->bci());
1927       xtty->method(sd->method());
1928       xtty->end_elem();
1929       if (sd->is_top())  break;
1930     }
1931     xtty->tail("deoptimized");
1932   }
1933 
1934   Continuation::notify_deopt(thread, fr.sp());
1935 
1936   // Patch the compiled method so that when execution returns to it we will
1937   // deopt the execution state and return to the interpreter.
1938   fr.deoptimize(thread);
1939 }
1940 
1941 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1942   // Deoptimize only if the frame comes from compiled code.
1943   // Do not deoptimize the frame which is already patched
1944   // during the execution of the loops below.
1945   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1946     return;
1947   }
1948   ResourceMark rm;
1949   deoptimize_single_frame(thread, fr, reason);
1950 }
1951 
1952 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant) {
1953   // there is no exception handler for this pc => deoptimize
1954   if (make_not_entrant) {
1955     nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1956   }
1957 
1958   // Use Deoptimization::deoptimize for all of its side-effects:
1959   // gathering traps statistics, logging...
1960   // it also patches the return pc but we do not care about that
1961   // since we return a continuation to the deopt_blob below.
1962   JavaThread* thread = JavaThread::current();
< prev index next >