1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/javaClasses.inline.hpp"
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/debugInfoRec.hpp"
  31 #include "code/nmethod.hpp"
  32 #include "code/pcDesc.hpp"
  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.inline.hpp"
  39 #include "interpreter/bytecodeStream.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "jvm.h"
  43 #include "logging/log.hpp"
  44 #include "logging/logLevel.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/allocation.inline.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/constantPool.hpp"
  52 #include "oops/fieldStreams.inline.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/inlineKlass.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/typeArrayOop.inline.hpp"
  61 #include "oops/verifyOopClosure.hpp"
  62 #include "prims/jvmtiDeferredUpdates.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "prims/jvmtiThreadState.hpp"
  65 #include "prims/methodHandles.hpp"
  66 #include "prims/vectorSupport.hpp"
  67 #include "runtime/atomicAccess.hpp"
  68 #include "runtime/basicLock.inline.hpp"
  69 #include "runtime/continuation.hpp"
  70 #include "runtime/continuationEntry.inline.hpp"
  71 #include "runtime/deoptimization.hpp"
  72 #include "runtime/escapeBarrier.hpp"
  73 #include "runtime/fieldDescriptor.inline.hpp"
  74 #include "runtime/frame.inline.hpp"
  75 #include "runtime/handles.inline.hpp"
  76 #include "runtime/interfaceSupport.inline.hpp"
  77 #include "runtime/javaThread.hpp"
  78 #include "runtime/jniHandles.inline.hpp"
  79 #include "runtime/keepStackGCProcessed.hpp"
  80 #include "runtime/lightweightSynchronizer.hpp"
  81 #include "runtime/lockStack.inline.hpp"
  82 #include "runtime/objectMonitor.inline.hpp"
  83 #include "runtime/osThread.hpp"
  84 #include "runtime/safepointVerifiers.hpp"
  85 #include "runtime/sharedRuntime.hpp"
  86 #include "runtime/signature.hpp"
  87 #include "runtime/stackFrameStream.inline.hpp"
  88 #include "runtime/stackValue.hpp"
  89 #include "runtime/stackWatermarkSet.hpp"
  90 #include "runtime/stubRoutines.hpp"
  91 #include "runtime/synchronizer.inline.hpp"
  92 #include "runtime/threadSMR.hpp"
  93 #include "runtime/threadWXSetters.inline.hpp"
  94 #include "runtime/vframe.hpp"
  95 #include "runtime/vframe_hp.hpp"
  96 #include "runtime/vframeArray.hpp"
  97 #include "runtime/vmOperations.hpp"
  98 #include "utilities/checkedCast.hpp"
  99 #include "utilities/events.hpp"
 100 #include "utilities/growableArray.hpp"
 101 #include "utilities/macros.hpp"
 102 #include "utilities/preserveException.hpp"
 103 #include "utilities/xmlstream.hpp"
 104 #if INCLUDE_JFR
 105 #include "jfr/jfr.inline.hpp"
 106 #include "jfr/jfrEvents.hpp"
 107 #include "jfr/metadata/jfrSerializer.hpp"
 108 #endif
 109 
 110 uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
 111 uint64_t DeoptimizationScope::_active_deopt_gen    = 1;
 112 bool     DeoptimizationScope::_committing_in_progress = false;
 113 
 114 DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
 115   DEBUG_ONLY(_deopted = false;)
 116 
 117   MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
 118   // If there is nothing to deopt _required_gen is the same as comitted.
 119   _required_gen = DeoptimizationScope::_committed_deopt_gen;
 120 }
 121 
 122 DeoptimizationScope::~DeoptimizationScope() {
 123   assert(_deopted, "Deopt not executed");
 124 }
 125 
 126 void DeoptimizationScope::mark(nmethod* nm, bool inc_recompile_counts) {
 127   if (!nm->can_be_deoptimized()) {
 128     return;
 129   }
 130 
 131   ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
 132 
 133   // If it's already marked but we still need it to be deopted.
 134   if (nm->is_marked_for_deoptimization()) {
 135     dependent(nm);
 136     return;
 137   }
 138 
 139   nmethod::DeoptimizationStatus status =
 140     inc_recompile_counts ? nmethod::deoptimize : nmethod::deoptimize_noupdate;
 141   AtomicAccess::store(&nm->_deoptimization_status, status);
 142 
 143   // Make sure active is not committed
 144   assert(DeoptimizationScope::_committed_deopt_gen < DeoptimizationScope::_active_deopt_gen, "Must be");
 145   assert(nm->_deoptimization_generation == 0, "Is already marked");
 146 
 147   nm->_deoptimization_generation = DeoptimizationScope::_active_deopt_gen;
 148   _required_gen                  = DeoptimizationScope::_active_deopt_gen;
 149 }
 150 
 151 void DeoptimizationScope::dependent(nmethod* nm) {
 152   ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
 153 
 154   // A method marked by someone else may have a _required_gen lower than what we marked with.
 155   // Therefore only store it if it's higher than _required_gen.
 156   if (_required_gen < nm->_deoptimization_generation) {
 157     _required_gen = nm->_deoptimization_generation;
 158   }
 159 }
 160 
 161 void DeoptimizationScope::deoptimize_marked() {
 162   assert(!_deopted, "Already deopted");
 163 
 164   // We are not alive yet.
 165   if (!Universe::is_fully_initialized()) {
 166     DEBUG_ONLY(_deopted = true;)
 167     return;
 168   }
 169 
 170   // Safepoints are a special case, handled here.
 171   if (SafepointSynchronize::is_at_safepoint()) {
 172     DeoptimizationScope::_committed_deopt_gen = DeoptimizationScope::_active_deopt_gen;
 173     DeoptimizationScope::_active_deopt_gen++;
 174     Deoptimization::deoptimize_all_marked();
 175     DEBUG_ONLY(_deopted = true;)
 176     return;
 177   }
 178 
 179   uint64_t comitting = 0;
 180   bool wait = false;
 181   while (true) {
 182     {
 183       ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
 184 
 185       // First we check if we or someone else already deopted the gen we want.
 186       if (DeoptimizationScope::_committed_deopt_gen >= _required_gen) {
 187         DEBUG_ONLY(_deopted = true;)
 188         return;
 189       }
 190       if (!_committing_in_progress) {
 191         // The version we are about to commit.
 192         comitting = DeoptimizationScope::_active_deopt_gen;
 193         // Make sure new marks use a higher gen.
 194         DeoptimizationScope::_active_deopt_gen++;
 195         _committing_in_progress = true;
 196         wait = false;
 197       } else {
 198         // Another thread is handshaking and committing a gen.
 199         wait = true;
 200       }
 201     }
 202     if (wait) {
 203       // Wait and let the concurrent handshake be performed.
 204       ThreadBlockInVM tbivm(JavaThread::current());
 205       os::naked_yield();
 206     } else {
 207       // Performs the handshake.
 208       Deoptimization::deoptimize_all_marked(); // May safepoint and an additional deopt may have occurred.
 209       DEBUG_ONLY(_deopted = true;)
 210       {
 211         ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
 212 
 213         // Make sure that committed doesn't go backwards.
 214         // Should only happen if we did a deopt during a safepoint above.
 215         if (DeoptimizationScope::_committed_deopt_gen < comitting) {
 216           DeoptimizationScope::_committed_deopt_gen = comitting;
 217         }
 218         _committing_in_progress = false;
 219 
 220         assert(DeoptimizationScope::_committed_deopt_gen >= _required_gen, "Must be");
 221 
 222         return;
 223       }
 224     }
 225   }
 226 }
 227 
 228 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
 229                                          int  caller_adjustment,
 230                                          int  caller_actual_parameters,
 231                                          int  number_of_frames,
 232                                          intptr_t* frame_sizes,
 233                                          address* frame_pcs,
 234                                          BasicType return_type,
 235                                          int exec_mode) {
 236   _size_of_deoptimized_frame = size_of_deoptimized_frame;
 237   _caller_adjustment         = caller_adjustment;
 238   _caller_actual_parameters  = caller_actual_parameters;
 239   _number_of_frames          = number_of_frames;
 240   _frame_sizes               = frame_sizes;
 241   _frame_pcs                 = frame_pcs;
 242   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
 243   _return_type               = return_type;
 244   _initial_info              = 0;
 245   // PD (x86 only)
 246   _counter_temp              = 0;
 247   _unpack_kind               = exec_mode;
 248   _sender_sp_temp            = 0;
 249 
 250   _total_frame_sizes         = size_of_frames();
 251   assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode");
 252 }
 253 
 254 Deoptimization::UnrollBlock::~UnrollBlock() {
 255   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
 256   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
 257   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
 258 }
 259 
 260 int Deoptimization::UnrollBlock::size_of_frames() const {
 261   // Account first for the adjustment of the initial frame
 262   intptr_t result = _caller_adjustment;
 263   for (int index = 0; index < number_of_frames(); index++) {
 264     result += frame_sizes()[index];
 265   }
 266   return checked_cast<int>(result);
 267 }
 268 
 269 void Deoptimization::UnrollBlock::print() {
 270   ResourceMark rm;
 271   stringStream st;
 272   st.print_cr("UnrollBlock");
 273   st.print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 274   st.print(   "  frame_sizes: ");
 275   for (int index = 0; index < number_of_frames(); index++) {
 276     st.print("%zd ", frame_sizes()[index]);
 277   }
 278   st.cr();
 279   tty->print_raw(st.freeze());
 280 }
 281 
 282 // In order to make fetch_unroll_info work properly with escape
 283 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY.
 284 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 285 // which is called from the method fetch_unroll_info_helper below.
 286 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 287   // fetch_unroll_info() is called at the beginning of the deoptimization
 288   // handler. Note this fact before we start generating temporary frames
 289   // that can confuse an asynchronous stack walker. This counter is
 290   // decremented at the end of unpack_frames().
 291   current->inc_in_deopt_handler();
 292 
 293   if (exec_mode == Unpack_exception) {
 294     // When we get here, a callee has thrown an exception into a deoptimized
 295     // frame. That throw might have deferred stack watermark checking until
 296     // after unwinding. So we deal with such deferred requests here.
 297     StackWatermarkSet::after_unwind(current);
 298   }
 299 
 300   return fetch_unroll_info_helper(current, exec_mode);
 301 JRT_END
 302 
 303 #if COMPILER2_OR_JVMCI
 304 
 305 static Klass* get_refined_array_klass(Klass* k, frame* fr, RegisterMap* map, ObjectValue* sv, TRAPS) {
 306   // If it's an array, get the properties
 307   if (k->is_array_klass() && !k->is_typeArray_klass()) {
 308     assert(!k->is_refArray_klass() && !k->is_flatArray_klass(), "Unexpected refined klass");
 309     nmethod* nm = fr->cb()->as_nmethod_or_null();
 310     if (nm->is_compiled_by_c2()) {
 311       assert(sv->has_properties(), "Property information is missing");
 312       ArrayKlass::ArrayProperties props = static_cast<ArrayKlass::ArrayProperties>(StackValue::create_stack_value(fr, map, sv->properties())->get_jint());
 313       k = ObjArrayKlass::cast(k)->klass_with_properties(props, THREAD);
 314     } else {
 315       // TODO Graal needs to be fixed. Just go with the default properties for now
 316       k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayKlass::ArrayProperties::DEFAULT, THREAD);
 317     }
 318   }
 319   return k;
 320 }
 321 
 322 // print information about reallocated objects
 323 static void print_objects(JavaThread* deoptee_thread, frame* deoptee, RegisterMap* map,
 324                           GrowableArray<ScopeValue*>* objects, bool realloc_failures, TRAPS) {
 325   ResourceMark rm;
 326   stringStream st;  // change to logStream with logging
 327   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 328   fieldDescriptor fd;
 329 
 330   for (int i = 0; i < objects->length(); i++) {
 331     ObjectValue* sv = (ObjectValue*) objects->at(i);
 332     Handle obj = sv->value();
 333 
 334     if (obj.is_null()) {
 335       st.print_cr("     nullptr");
 336       continue;
 337     }
 338 
 339     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 340     k = get_refined_array_klass(k, deoptee, map, sv, THREAD);
 341 
 342     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 343     k->print_value_on(&st);
 344     st.print_cr(" allocated (%zu bytes)", obj->size() * HeapWordSize);
 345 
 346     if (Verbose && k != nullptr) {
 347       k->oop_print_on(obj(), &st);
 348     }
 349   }
 350   tty->print_raw(st.freeze());
 351 }
 352 
 353 static bool rematerialize_objects(JavaThread* thread, int exec_mode, nmethod* compiled_method,
 354                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 355                                   bool& deoptimized_objects) {
 356   bool realloc_failures = false;
 357   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 358 
 359   JavaThread* deoptee_thread = chunk->at(0)->thread();
 360   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 361          "a frame can only be deoptimized by the owner thread");
 362 
 363   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 364 
 365   // The flag return_oop() indicates call sites which return oop
 366   // in compiled code. Such sites include java method calls,
 367   // runtime calls (for example, used to allocate new objects/arrays
 368   // on slow code path) and any other calls generated in compiled code.
 369   // It is not guaranteed that we can get such information here only
 370   // by analyzing bytecode in deoptimized frames. This is why this flag
 371   // is set during method compilation (see Compile::Process_OopMap_Node()).
 372   // If the previous frame was popped or if we are dispatching an exception,
 373   // we don't have an oop result.
 374   ScopeDesc* scope = chunk->at(0)->scope();
 375   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 376   // In case of the return of multiple values, we must take care
 377   // of all oop return values.
 378   GrowableArray<Handle> return_oops;
 379   InlineKlass* vk = nullptr;
 380   if (save_oop_result && scope->return_scalarized()) {
 381     vk = InlineKlass::returned_inline_klass(map);
 382     if (vk != nullptr) {
 383       vk->save_oop_fields(map, return_oops);
 384       save_oop_result = false;
 385     }
 386   }
 387   if (save_oop_result) {
 388     // Reallocation may trigger GC. If deoptimization happened on return from
 389     // call which returns oop we need to save it since it is not in oopmap.
 390     oop result = deoptee.saved_oop_result(&map);
 391     assert(oopDesc::is_oop_or_null(result), "must be oop");
 392     return_oops.push(Handle(thread, result));
 393     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 394     if (TraceDeoptimization) {
 395       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 396       tty->cr();
 397     }
 398   }
 399   if (objects != nullptr || vk != nullptr) {
 400     if (exec_mode == Deoptimization::Unpack_none) {
 401       assert(thread->thread_state() == _thread_in_vm, "assumption");
 402       JavaThread* THREAD = thread; // For exception macros.
 403       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 404       if (vk != nullptr) {
 405         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 406       }
 407       if (objects != nullptr) {
 408         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 409         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 410         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 411         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 412       }
 413       deoptimized_objects = true;
 414     } else {
 415       JavaThread* current = thread; // For JRT_BLOCK
 416       JRT_BLOCK
 417       if (vk != nullptr) {
 418         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 419       }
 420       if (objects != nullptr) {
 421         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 422         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 423         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 424         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 425       }
 426       JRT_END
 427     }
 428     if (TraceDeoptimization && objects != nullptr) {
 429       print_objects(deoptee_thread, &deoptee, &map, objects, realloc_failures, thread);
 430     }
 431   }
 432   if (save_oop_result || vk != nullptr) {
 433     // Restore result.
 434     assert(return_oops.length() == 1, "no inline type");
 435     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 436   }
 437   return realloc_failures;
 438 }
 439 
 440 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 441                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 442   JavaThread* deoptee_thread = chunk->at(0)->thread();
 443   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 444   assert(thread == Thread::current(), "should be");
 445   HandleMark hm(thread);
 446 #ifndef PRODUCT
 447   bool first = true;
 448 #endif // !PRODUCT
 449   // Start locking from outermost/oldest frame
 450   for (int i = (chunk->length() - 1); i >= 0; i--) {
 451     compiledVFrame* cvf = chunk->at(i);
 452     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 453     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 454     if (monitors->is_nonempty()) {
 455       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 456                                                      exec_mode, realloc_failures);
 457       deoptimized_objects = deoptimized_objects || relocked;
 458 #ifndef PRODUCT
 459       if (PrintDeoptimizationDetails) {
 460         ResourceMark rm;
 461         stringStream st;
 462         for (int j = 0; j < monitors->length(); j++) {
 463           MonitorInfo* mi = monitors->at(j);
 464           if (mi->eliminated()) {
 465             if (first) {
 466               first = false;
 467               st.print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
 468             }
 469             if (exec_mode == Deoptimization::Unpack_none) {
 470               ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor();
 471               if (monitor != nullptr && monitor->object() == mi->owner()) {
 472                 st.print_cr("     object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner()));
 473                 continue;
 474               }
 475             }
 476             if (mi->owner_is_scalar_replaced()) {
 477               Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
 478               st.print_cr("     failed reallocation for klass %s", k->external_name());
 479             } else {
 480               st.print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
 481             }
 482           }
 483         }
 484         tty->print_raw(st.freeze());
 485       }
 486 #endif // !PRODUCT
 487     }
 488   }
 489 }
 490 
 491 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
 492 // The given vframes cover one physical frame.
 493 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
 494                                                  bool& realloc_failures) {
 495   frame deoptee = chunk->at(0)->fr();
 496   JavaThread* deoptee_thread = chunk->at(0)->thread();
 497   nmethod* nm = deoptee.cb()->as_nmethod_or_null();
 498   RegisterMap map(chunk->at(0)->register_map());
 499   bool deoptimized_objects = false;
 500 
 501   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 502 
 503   // Reallocate the non-escaping objects and restore their fields.
 504   if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
 505                                       || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
 506     realloc_failures = rematerialize_objects(thread, Unpack_none, nm, deoptee, map, chunk, deoptimized_objects);
 507   }
 508 
 509   // MonitorInfo structures used in eliminate_locks are not GC safe.
 510   NoSafepointVerifier no_safepoint;
 511 
 512   // Now relock objects if synchronization on them was eliminated.
 513   if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks))) {
 514     restore_eliminated_locks(thread, chunk, realloc_failures, deoptee, Unpack_none, deoptimized_objects);
 515   }
 516   return deoptimized_objects;
 517 }
 518 #endif // COMPILER2_OR_JVMCI
 519 
 520 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 521 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) {
 522   JFR_ONLY(Jfr::check_and_process_sample_request(current);)
 523   // When we get here we are about to unwind the deoptee frame. In order to
 524   // catch not yet safe to use frames, the following stack watermark barrier
 525   // poll will make such frames safe to use.
 526   StackWatermarkSet::before_unwind(current);
 527 
 528   // Note: there is a safepoint safety issue here. No matter whether we enter
 529   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 530   // the vframeArray is created.
 531   //
 532 
 533   // Allocate our special deoptimization ResourceMark
 534   DeoptResourceMark* dmark = new DeoptResourceMark(current);
 535   assert(current->deopt_mark() == nullptr, "Pending deopt!");
 536   current->set_deopt_mark(dmark);
 537 
 538   frame stub_frame = current->last_frame(); // Makes stack walkable as side effect
 539   RegisterMap map(current,
 540                   RegisterMap::UpdateMap::include,
 541                   RegisterMap::ProcessFrames::include,
 542                   RegisterMap::WalkContinuation::skip);
 543   RegisterMap dummy_map(current,
 544                         RegisterMap::UpdateMap::skip,
 545                         RegisterMap::ProcessFrames::include,
 546                         RegisterMap::WalkContinuation::skip);
 547   // Now get the deoptee with a valid map
 548   frame deoptee = stub_frame.sender(&map);
 549   if (exec_mode == Unpack_deopt) {
 550     assert(deoptee.is_deoptimized_frame(), "frame is not marked for deoptimization");
 551   }
 552   // Set the deoptee nmethod
 553   assert(current->deopt_compiled_method() == nullptr, "Pending deopt!");
 554   nmethod* nm = deoptee.cb()->as_nmethod_or_null();
 555   current->set_deopt_compiled_method(nm);
 556 
 557   if (VerifyStack) {
 558     current->validate_frame_layout();
 559   }
 560 
 561   // Create a growable array of VFrames where each VFrame represents an inlined
 562   // Java frame.  This storage is allocated with the usual system arena.
 563   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 564   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 565   vframe* vf = vframe::new_vframe(&deoptee, &map, current);
 566   while (!vf->is_top()) {
 567     assert(vf->is_compiled_frame(), "Wrong frame type");
 568     chunk->push(compiledVFrame::cast(vf));
 569     vf = vf->sender();
 570   }
 571   assert(vf->is_compiled_frame(), "Wrong frame type");
 572   chunk->push(compiledVFrame::cast(vf));
 573 
 574   bool realloc_failures = false;
 575 
 576 #if COMPILER2_OR_JVMCI
 577   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 578 
 579   // Reallocate the non-escaping objects and restore their fields. Then
 580   // relock objects if synchronization on them was eliminated.
 581   if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations)
 582                                        || EliminateAutoBox || EnableVectorAggressiveReboxing )) {
 583     bool unused;
 584     realloc_failures = rematerialize_objects(current, exec_mode, nm, deoptee, map, chunk, unused);
 585   }
 586 #endif // COMPILER2_OR_JVMCI
 587 
 588   // Ensure that no safepoint is taken after pointers have been stored
 589   // in fields of rematerialized objects.  If a safepoint occurs from here on
 590   // out the java state residing in the vframeArray will be missed.
 591   // Locks may be rebaised in a safepoint.
 592   NoSafepointVerifier no_safepoint;
 593 
 594 #if COMPILER2_OR_JVMCI
 595   if ((jvmci_enabled COMPILER2_PRESENT( || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks) ))
 596       && !EscapeBarrier::objs_are_deoptimized(current, deoptee.id())) {
 597     bool unused = false;
 598     restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused);
 599   }
 600 #endif // COMPILER2_OR_JVMCI
 601 
 602   ScopeDesc* trap_scope = chunk->at(0)->scope();
 603   Handle exceptionObject;
 604   if (trap_scope->rethrow_exception()) {
 605 #ifndef PRODUCT
 606     if (PrintDeoptimizationDetails) {
 607       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 608     }
 609 #endif // !PRODUCT
 610 
 611     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 612     guarantee(expressions != nullptr && expressions->length() == 1, "should have only exception on stack");
 613     guarantee(exec_mode != Unpack_exception, "rethrow_exception set with Unpack_exception");
 614     ScopeValue* topOfStack = expressions->top();
 615     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 616     guarantee(exceptionObject() != nullptr, "exception oop can not be null");
 617   }
 618 
 619   vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures);
 620 #if COMPILER2_OR_JVMCI
 621   if (realloc_failures) {
 622     // This destroys all ScopedValue bindings.
 623     current->clear_scopedValueBindings();
 624     pop_frames_failed_reallocs(current, array);
 625   }
 626 #endif
 627 
 628   assert(current->vframe_array_head() == nullptr, "Pending deopt!");
 629   current->set_vframe_array_head(array);
 630 
 631   // Now that the vframeArray has been created if we have any deferred local writes
 632   // added by jvmti then we can free up that structure as the data is now in the
 633   // vframeArray
 634 
 635   JvmtiDeferredUpdates::delete_updates_for_frame(current, array->original().id());
 636 
 637   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 638   CodeBlob* cb = stub_frame.cb();
 639   // Verify we have the right vframeArray
 640   assert(cb->frame_size() >= 0, "Unexpected frame size");
 641   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 642   assert(unpack_sp == deoptee.unextended_sp(), "must be");
 643 
 644 #ifdef ASSERT
 645   assert(cb->is_deoptimization_stub() ||
 646          cb->is_uncommon_trap_stub() ||
 647          strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 ||
 648          strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
 649          "unexpected code blob: %s", cb->name());
 650 #endif
 651 
 652   // This is a guarantee instead of an assert because if vframe doesn't match
 653   // we will unpack the wrong deoptimized frame and wind up in strange places
 654   // where it will be very difficult to figure out what went wrong. Better
 655   // to die an early death here than some very obscure death later when the
 656   // trail is cold.
 657   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
 658 
 659   int number_of_frames = array->frames();
 660 
 661   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
 662   // virtual activation, which is the reverse of the elements in the vframes array.
 663   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
 664   // +1 because we always have an interpreter return address for the final slot.
 665   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
 666   int popframe_extra_args = 0;
 667   // Create an interpreter return address for the stub to use as its return
 668   // address so the skeletal frames are perfectly walkable
 669   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
 670 
 671   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
 672   // activation be put back on the expression stack of the caller for reexecution
 673   if (JvmtiExport::can_pop_frame() && current->popframe_forcing_deopt_reexecution()) {
 674     popframe_extra_args = in_words(current->popframe_preserved_args_size_in_words());
 675   }
 676 
 677   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
 678   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
 679   // than simply use array->sender.pc(). This requires us to walk the current set of frames
 680   //
 681   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 682   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 683 
 684   // It's possible that the number of parameters at the call site is
 685   // different than number of arguments in the callee when method
 686   // handles are used.  If the caller is interpreted get the real
 687   // value so that the proper amount of space can be added to it's
 688   // frame.
 689   bool caller_was_method_handle = false;
 690   if (deopt_sender.is_interpreted_frame()) {
 691     methodHandle method(current, deopt_sender.interpreter_frame_method());
 692     Bytecode_invoke cur(method, deopt_sender.interpreter_frame_bci());
 693     if (cur.has_member_arg()) {
 694       // This should cover all real-world cases.  One exception is a pathological chain of
 695       // MH.linkToXXX() linker calls, which only trusted code could do anyway.  To handle that case, we
 696       // would need to get the size from the resolved method entry.  Another exception would
 697       // be an invokedynamic with an adapter that is really a MethodHandle linker.
 698       caller_was_method_handle = true;
 699     }
 700   }
 701 
 702   //
 703   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 704   // frame_sizes/frame_pcs[1] next oldest frame (int)
 705   // frame_sizes/frame_pcs[n] youngest frame (int)
 706   //
 707   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 708   // owns the space for the return address to it's caller).  Confusing ain't it.
 709   //
 710   // The vframe array can address vframes with indices running from
 711   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 712   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 713   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 714   // so things look a little strange in this loop.
 715   //
 716   int callee_parameters = 0;
 717   int callee_locals = 0;
 718   for (int index = 0; index < array->frames(); index++ ) {
 719     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 720     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 721     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
 722     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
 723                                                                                                     callee_locals,
 724                                                                                                     index == 0,
 725                                                                                                     popframe_extra_args);
 726     // This pc doesn't have to be perfect just good enough to identify the frame
 727     // as interpreted so the skeleton frame will be walkable
 728     // The correct pc will be set when the skeleton frame is completely filled out
 729     // The final pc we store in the loop is wrong and will be overwritten below
 730     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
 731 
 732     callee_parameters = array->element(index)->method()->size_of_parameters();
 733     callee_locals = array->element(index)->method()->max_locals();
 734     popframe_extra_args = 0;
 735   }
 736 
 737   // Compute whether the root vframe returns a float or double value.
 738   BasicType return_type;
 739   {
 740     methodHandle method(current, array->element(0)->method());
 741     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 742     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 743   }
 744 
 745   // Compute information for handling adapters and adjusting the frame size of the caller.
 746   int caller_adjustment = 0;
 747 
 748   // Compute the amount the oldest interpreter frame will have to adjust
 749   // its caller's stack by. If the caller is a compiled frame then
 750   // we pretend that the callee has no parameters so that the
 751   // extension counts for the full amount of locals and not just
 752   // locals-parms. This is because without a c2i adapter the parm
 753   // area as created by the compiled frame will not be usable by
 754   // the interpreter. (Depending on the calling convention there
 755   // may not even be enough space).
 756 
 757   // QQQ I'd rather see this pushed down into last_frame_adjust
 758   // and have it take the sender (aka caller).
 759 
 760   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 761     caller_adjustment = last_frame_adjust(0, callee_locals);
 762   } else if (callee_locals > callee_parameters) {
 763     // The caller frame may need extending to accommodate
 764     // non-parameter locals of the first unpacked interpreted frame.
 765     // Compute that adjustment.
 766     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 767   }
 768 
 769   // If the sender is deoptimized we must retrieve the address of the handler
 770   // since the frame will "magically" show the original pc before the deopt
 771   // and we'd undo the deopt.
 772 
 773   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 774   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 775     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 776   }
 777 
 778   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 779 
 780 #if INCLUDE_JVMCI
 781   if (exceptionObject() != nullptr) {
 782     current->set_exception_oop(exceptionObject());
 783     exec_mode = Unpack_exception;
 784     assert(array->element(0)->rethrow_exception(), "must be");
 785   }
 786 #endif
 787 
 788   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 789     assert(current->has_pending_exception(), "should have thrown OOME");
 790     current->set_exception_oop(current->pending_exception());
 791     current->clear_pending_exception();
 792     exec_mode = Unpack_exception;
 793   }
 794 
 795 #if INCLUDE_JVMCI
 796   if (current->frames_to_pop_failed_realloc() > 0) {
 797     current->set_pending_monitorenter(false);
 798   }
 799 #endif
 800 
 801   int caller_actual_parameters = -1; // value not used except for interpreted frames, see below
 802   if (deopt_sender.is_interpreted_frame()) {
 803     caller_actual_parameters = callee_parameters + (caller_was_method_handle ? 1 : 0);
 804   }
 805 
 806   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 807                                       caller_adjustment * BytesPerWord,
 808                                       caller_actual_parameters,
 809                                       number_of_frames,
 810                                       frame_sizes,
 811                                       frame_pcs,
 812                                       return_type,
 813                                       exec_mode);
 814   // On some platforms, we need a way to pass some platform dependent
 815   // information to the unpacking code so the skeletal frames come out
 816   // correct (initial fp value, unextended sp, ...)
 817   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 818 
 819   if (array->frames() > 1) {
 820     if (VerifyStack && TraceDeoptimization) {
 821       tty->print_cr("Deoptimizing method containing inlining");
 822     }
 823   }
 824 
 825   array->set_unroll_block(info);
 826   return info;
 827 }
 828 
 829 // Called to cleanup deoptimization data structures in normal case
 830 // after unpacking to stack and when stack overflow error occurs
 831 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 832                                         vframeArray *array) {
 833 
 834   // Get array if coming from exception
 835   if (array == nullptr) {
 836     array = thread->vframe_array_head();
 837   }
 838   thread->set_vframe_array_head(nullptr);
 839 
 840   // Free the previous UnrollBlock
 841   vframeArray* old_array = thread->vframe_array_last();
 842   thread->set_vframe_array_last(array);
 843 
 844   if (old_array != nullptr) {
 845     UnrollBlock* old_info = old_array->unroll_block();
 846     old_array->set_unroll_block(nullptr);
 847     delete old_info;
 848     delete old_array;
 849   }
 850 
 851   // Deallocate any resource creating in this routine and any ResourceObjs allocated
 852   // inside the vframeArray (StackValueCollections)
 853 
 854   delete thread->deopt_mark();
 855   thread->set_deopt_mark(nullptr);
 856   thread->set_deopt_compiled_method(nullptr);
 857 
 858 
 859   if (JvmtiExport::can_pop_frame()) {
 860     // Regardless of whether we entered this routine with the pending
 861     // popframe condition bit set, we should always clear it now
 862     thread->clear_popframe_condition();
 863   }
 864 
 865   // unpack_frames() is called at the end of the deoptimization handler
 866   // and (in C2) at the end of the uncommon trap handler. Note this fact
 867   // so that an asynchronous stack walker can work again. This counter is
 868   // incremented at the beginning of fetch_unroll_info() and (in C2) at
 869   // the beginning of uncommon_trap().
 870   thread->dec_in_deopt_handler();
 871 }
 872 
 873 // Moved from cpu directories because none of the cpus has callee save values.
 874 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
 875 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 876 
 877   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
 878   // the days we had adapter frames. When we deoptimize a situation where a
 879   // compiled caller calls a compiled caller will have registers it expects
 880   // to survive the call to the callee. If we deoptimize the callee the only
 881   // way we can restore these registers is to have the oldest interpreter
 882   // frame that we create restore these values. That is what this routine
 883   // will accomplish.
 884 
 885   // At the moment we have modified c2 to not have any callee save registers
 886   // so this problem does not exist and this routine is just a place holder.
 887 
 888   assert(f->is_interpreted_frame(), "must be interpreted");
 889 }
 890 
 891 #ifndef PRODUCT
 892 #ifdef ASSERT
 893 // Return true if the execution after the provided bytecode continues at the
 894 // next bytecode in the code. This is not the case for gotos, returns, and
 895 // throws.
 896 static bool falls_through(Bytecodes::Code bc) {
 897   switch (bc) {
 898     case Bytecodes::_goto:
 899     case Bytecodes::_goto_w:
 900     case Bytecodes::_athrow:
 901     case Bytecodes::_areturn:
 902     case Bytecodes::_dreturn:
 903     case Bytecodes::_freturn:
 904     case Bytecodes::_ireturn:
 905     case Bytecodes::_lreturn:
 906     case Bytecodes::_jsr:
 907     case Bytecodes::_ret:
 908     case Bytecodes::_return:
 909     case Bytecodes::_lookupswitch:
 910     case Bytecodes::_tableswitch:
 911       return false;
 912     default:
 913       return true;
 914   }
 915 }
 916 #endif
 917 #endif
 918 
 919 // Return BasicType of value being returned
 920 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 921   assert(thread == JavaThread::current(), "pre-condition");
 922 
 923   // We are already active in the special DeoptResourceMark any ResourceObj's we
 924   // allocate will be freed at the end of the routine.
 925 
 926   // JRT_LEAF methods don't normally allocate handles and there is a
 927   // NoHandleMark to enforce that. It is actually safe to use Handles
 928   // in a JRT_LEAF method, and sometimes desirable, but to do so we
 929   // must use ResetNoHandleMark to bypass the NoHandleMark, and
 930   // then use a HandleMark to ensure any Handles we do create are
 931   // cleaned up in this scope.
 932   ResetNoHandleMark rnhm;
 933   HandleMark hm(thread);
 934 
 935   frame stub_frame = thread->last_frame();
 936 
 937   Continuation::notify_deopt(thread, stub_frame.sp());
 938 
 939   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 940   // must point to the vframeArray for the unpack frame.
 941   vframeArray* array = thread->vframe_array_head();
 942   UnrollBlock* info = array->unroll_block();
 943 
 944   // We set the last_Java frame. But the stack isn't really parsable here. So we
 945   // clear it to make sure JFR understands not to try and walk stacks from events
 946   // in here.
 947   intptr_t* sp = thread->frame_anchor()->last_Java_sp();
 948   thread->frame_anchor()->set_last_Java_sp(nullptr);
 949 
 950   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 951   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
 952 
 953   thread->frame_anchor()->set_last_Java_sp(sp);
 954 
 955   BasicType bt = info->return_type();
 956 
 957   // If we have an exception pending, claim that the return type is an oop
 958   // so the deopt_blob does not overwrite the exception_oop.
 959 
 960   if (exec_mode == Unpack_exception)
 961     bt = T_OBJECT;
 962 
 963   // Cleanup thread deopt data
 964   cleanup_deopt_info(thread, array);
 965 
 966 #ifndef PRODUCT
 967   if (VerifyStack) {
 968     ResourceMark res_mark;
 969     // Clear pending exception to not break verification code (restored afterwards)
 970     PreserveExceptionMark pm(thread);
 971 
 972     thread->validate_frame_layout();
 973 
 974     // Verify that the just-unpacked frames match the interpreter's
 975     // notions of expression stack and locals
 976     vframeArray* cur_array = thread->vframe_array_last();
 977     RegisterMap rm(thread,
 978                    RegisterMap::UpdateMap::skip,
 979                    RegisterMap::ProcessFrames::include,
 980                    RegisterMap::WalkContinuation::skip);
 981     rm.set_include_argument_oops(false);
 982     int callee_size_of_parameters = 0;
 983     for (int frame_idx = 0; frame_idx < cur_array->frames(); frame_idx++) {
 984       bool is_top_frame = (frame_idx == 0);
 985       vframeArrayElement* el = cur_array->element(frame_idx);
 986       frame* iframe = el->iframe();
 987       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
 988       methodHandle mh(thread, iframe->interpreter_frame_method());
 989       bool reexecute = el->should_reexecute();
 990 
 991       int cur_invoke_parameter_size = 0;
 992       int top_frame_expression_stack_adjustment = 0;
 993       int max_bci = mh->code_size();
 994       BytecodeStream str(mh, iframe->interpreter_frame_bci());
 995       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
 996       Bytecodes::Code cur_code = str.next();
 997 
 998       if (!reexecute && !Bytecodes::is_invoke(cur_code)) {
 999         // We can only compute OopMaps for the before state, so we need to roll forward
1000         // to the next bytecode.
1001         assert(is_top_frame, "must be");
1002         assert(falls_through(cur_code), "must be");
1003         assert(cur_code != Bytecodes::_illegal, "illegal bytecode");
1004         assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
1005 
1006         // Need to subtract off the size of the result type of
1007         // the bytecode because this is not described in the
1008         // debug info but returned to the interpreter in the TOS
1009         // caching register
1010         BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
1011         if (bytecode_result_type != T_ILLEGAL) {
1012           top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
1013         }
1014         assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive");
1015 
1016         cur_code = str.next();
1017         // Reflect the fact that we have rolled forward and now need
1018         // top_frame_expression_stack_adjustment
1019         reexecute = true;
1020       }
1021 
1022       assert(cur_code != Bytecodes::_illegal, "illegal bytecode");
1023       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
1024 
1025       // Get the oop map for this bci
1026       InterpreterOopMap mask;
1027       OopMapCache::compute_one_oop_map(mh, str.bci(), &mask);
1028       // Check to see if we can grab the number of outgoing arguments
1029       // at an uncommon trap for an invoke (where the compiler
1030       // generates debug info before the invoke has executed)
1031       if (Bytecodes::is_invoke(cur_code)) {
1032         Bytecode_invoke invoke(mh, str.bci());
1033         cur_invoke_parameter_size = invoke.size_of_parameters();
1034         if (!is_top_frame && invoke.has_member_arg()) {
1035           callee_size_of_parameters++;
1036         }
1037       }
1038 
1039       // Verify stack depth and oops in frame
1040       auto match = [&]() {
1041         int iframe_expr_ssize = iframe->interpreter_frame_expression_stack_size();
1042 #if INCLUDE_JVMCI
1043         if (is_top_frame && el->rethrow_exception()) {
1044           return iframe_expr_ssize == 1;
1045         }
1046 #endif
1047         // This should only be needed for C1
1048         if (is_top_frame && exec_mode == Unpack_exception && iframe_expr_ssize == 0) {
1049           return true;
1050         }
1051         if (reexecute) {
1052           int expr_ssize_before = iframe_expr_ssize + top_frame_expression_stack_adjustment;
1053           int oopmap_expr_invoke_ssize = mask.expression_stack_size() + cur_invoke_parameter_size;
1054           return expr_ssize_before == oopmap_expr_invoke_ssize;
1055         } else {
1056           int oopmap_expr_callee_ssize = mask.expression_stack_size() + callee_size_of_parameters;
1057           return iframe_expr_ssize == oopmap_expr_callee_ssize;
1058         }
1059       };
1060       if (!match()) {
1061         // Print out some information that will help us debug the problem
1062         tty->print_cr("Wrong number of expression stack elements during deoptimization");
1063         tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", frame_idx, cur_array->frames() - 1);
1064         tty->print_cr("  Current code %s", Bytecodes::name(cur_code));
1065         tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
1066                       iframe->interpreter_frame_expression_stack_size());
1067         tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
1068         tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
1069         tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
1070         tty->print_cr("  exec_mode = %d", exec_mode);
1071         tty->print_cr("  original should_reexecute = %s", el->should_reexecute() ? "true" : "false");
1072         tty->print_cr("  reexecute = %s%s", reexecute ? "true" : "false",
1073                       (reexecute != el->should_reexecute()) ? " (changed)" : "");
1074 #if INCLUDE_JVMCI
1075         tty->print_cr("  rethrow_exception = %s", el->rethrow_exception() ? "true" : "false");
1076 #endif
1077         tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
1078         tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
1079         tty->print_cr("  Interpreted frames:");
1080         for (int k = 0; k < cur_array->frames(); k++) {
1081           vframeArrayElement* el = cur_array->element(k);
1082           tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
1083         }
1084         cur_array->print_on_2(tty);
1085         guarantee(false, "wrong number of expression stack elements during deopt");
1086       }
1087       VerifyOopClosure verify;
1088       iframe->oops_interpreted_do(&verify, &rm, false);
1089       callee_size_of_parameters = mh->size_of_parameters();
1090     }
1091   }
1092 #endif // !PRODUCT
1093 
1094   return bt;
1095 JRT_END
1096 
1097 class DeoptimizeMarkedHandshakeClosure : public HandshakeClosure {
1098  public:
1099   DeoptimizeMarkedHandshakeClosure() : HandshakeClosure("Deoptimize") {}
1100   void do_thread(Thread* thread) {
1101     JavaThread* jt = JavaThread::cast(thread);
1102     jt->deoptimize_marked_methods();
1103   }
1104 };
1105 
1106 void Deoptimization::deoptimize_all_marked() {
1107   ResourceMark rm;
1108 
1109   // Make the dependent methods not entrant
1110   CodeCache::make_marked_nmethods_deoptimized();
1111 
1112   DeoptimizeMarkedHandshakeClosure deopt;
1113   if (SafepointSynchronize::is_at_safepoint()) {
1114     Threads::java_threads_do(&deopt);
1115   } else {
1116     Handshake::execute(&deopt);
1117   }
1118 }
1119 
1120 Deoptimization::DeoptAction Deoptimization::_unloaded_action
1121   = Deoptimization::Action_reinterpret;
1122 
1123 #if INCLUDE_JVMCI
1124 template<typename CacheType>
1125 class BoxCacheBase : public CHeapObj<mtCompiler> {
1126 protected:
1127   static InstanceKlass* find_cache_klass(Thread* thread, Symbol* klass_name) {
1128     ResourceMark rm(thread);
1129     char* klass_name_str = klass_name->as_C_string();
1130     InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle());
1131     guarantee(ik != nullptr, "%s must be loaded", klass_name_str);
1132     if (!ik->is_in_error_state()) {
1133       guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
1134       CacheType::compute_offsets(ik);
1135     }
1136     return ik;
1137   }
1138 };
1139 
1140 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache  : public BoxCacheBase<CacheType> {
1141   PrimitiveType _low;
1142   PrimitiveType _high;
1143   jobject _cache;
1144 protected:
1145   static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton;
1146   BoxCache(Thread* thread) {
1147     InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(thread, CacheType::symbol());
1148     if (ik->is_in_error_state()) {
1149       _low = 1;
1150       _high = 0;
1151       _cache = nullptr;
1152     } else {
1153       objArrayOop cache = CacheType::cache(ik);
1154       assert(cache->length() > 0, "Empty cache");
1155       _low = BoxType::value(cache->obj_at(0));
1156       _high = checked_cast<PrimitiveType>(_low + cache->length() - 1);
1157       _cache = JNIHandles::make_global(Handle(thread, cache));
1158     }
1159   }
1160   ~BoxCache() {
1161     JNIHandles::destroy_global(_cache);
1162   }
1163 public:
1164   static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
1165     if (_singleton == nullptr) {
1166       BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
1167       if (!AtomicAccess::replace_if_null(&_singleton, s)) {
1168         delete s;
1169       }
1170     }
1171     return _singleton;
1172   }
1173   oop lookup(PrimitiveType value) {
1174     if (_low <= value && value <= _high) {
1175       int offset = checked_cast<int>(value - _low);
1176       return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
1177     }
1178     return nullptr;
1179   }
1180   oop lookup_raw(intptr_t raw_value, bool& cache_init_error) {
1181     if (_cache == nullptr) {
1182       cache_init_error = true;
1183       return nullptr;
1184     }
1185     // Have to cast to avoid little/big-endian problems.
1186     if (sizeof(PrimitiveType) > sizeof(jint)) {
1187       jlong value = (jlong)raw_value;
1188       return lookup(value);
1189     }
1190     PrimitiveType value = (PrimitiveType)*((jint*)&raw_value);
1191     return lookup(value);
1192   }
1193 };
1194 
1195 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache;
1196 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache;
1197 typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character> CharacterBoxCache;
1198 typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache;
1199 typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache;
1200 
1201 template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = nullptr;
1202 template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = nullptr;
1203 template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = nullptr;
1204 template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = nullptr;
1205 template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = nullptr;
1206 
1207 class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> {
1208   jobject _true_cache;
1209   jobject _false_cache;
1210 protected:
1211   static BooleanBoxCache *_singleton;
1212   BooleanBoxCache(Thread *thread) {
1213     InstanceKlass* ik = find_cache_klass(thread, java_lang_Boolean::symbol());
1214     if (ik->is_in_error_state()) {
1215       _true_cache = nullptr;
1216       _false_cache = nullptr;
1217     } else {
1218       _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik)));
1219       _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik)));
1220     }
1221   }
1222   ~BooleanBoxCache() {
1223     JNIHandles::destroy_global(_true_cache);
1224     JNIHandles::destroy_global(_false_cache);
1225   }
1226 public:
1227   static BooleanBoxCache* singleton(Thread* thread) {
1228     if (_singleton == nullptr) {
1229       BooleanBoxCache* s = new BooleanBoxCache(thread);
1230       if (!AtomicAccess::replace_if_null(&_singleton, s)) {
1231         delete s;
1232       }
1233     }
1234     return _singleton;
1235   }
1236   oop lookup_raw(intptr_t raw_value, bool& cache_in_error) {
1237     if (_true_cache == nullptr) {
1238       cache_in_error = true;
1239       return nullptr;
1240     }
1241     // Have to cast to avoid little/big-endian problems.
1242     jboolean value = (jboolean)*((jint*)&raw_value);
1243     return lookup(value);
1244   }
1245   oop lookup(jboolean value) {
1246     if (value != 0) {
1247       return JNIHandles::resolve_non_null(_true_cache);
1248     }
1249     return JNIHandles::resolve_non_null(_false_cache);
1250   }
1251 };
1252 
1253 BooleanBoxCache* BooleanBoxCache::_singleton = nullptr;
1254 
1255 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS) {
1256    Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
1257    BasicType box_type = vmClasses::box_klass_type(k);
1258    if (box_type != T_OBJECT) {
1259      StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0));
1260      switch(box_type) {
1261        case T_INT:     return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1262        case T_CHAR:    return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1263        case T_SHORT:   return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1264        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1265        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1266        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1267        default:;
1268      }
1269    }
1270    return nullptr;
1271 }
1272 #endif // INCLUDE_JVMCI
1273 
1274 #if COMPILER2_OR_JVMCI
1275 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1276   Handle pending_exception(THREAD, thread->pending_exception());
1277   const char* exception_file = thread->exception_file();
1278   int exception_line = thread->exception_line();
1279   thread->clear_pending_exception();
1280 
1281   bool failures = false;
1282 
1283   for (int i = 0; i < objects->length(); i++) {
1284     assert(objects->at(i)->is_object(), "invalid debug information");
1285     ObjectValue* sv = (ObjectValue*) objects->at(i);
1286     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1287     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1288 
1289     // Check if the object may be null and has an additional null_marker input that needs
1290     // to be checked before using the field values. Skip re-allocation if it is null.
1291     if (k->is_inline_klass() && sv->has_properties()) {
1292       jint null_marker = StackValue::create_stack_value(fr, reg_map, sv->properties())->get_jint();
1293       if (null_marker == 0) {
1294         continue;
1295       }
1296     }
1297 
1298     oop obj = nullptr;
1299     bool cache_init_error = false;
1300     if (k->is_instance_klass()) {
1301 #if INCLUDE_JVMCI
1302       nmethod* nm = fr->cb()->as_nmethod_or_null();
1303       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1304         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1305         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1306         if (obj != nullptr) {
1307           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1308           abv->set_cached(true);
1309         } else if (cache_init_error) {
1310           // Results in an OOME which is valid (as opposed to a class initialization error)
1311           // and is fine for the rare case a cache initialization failing.
1312           failures = true;
1313         }
1314       }
1315 #endif // INCLUDE_JVMCI
1316 
1317       InstanceKlass* ik = InstanceKlass::cast(k);
1318       if (obj == nullptr && !cache_init_error) {
1319         InternalOOMEMark iom(THREAD);
1320         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1321           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1322         } else {
1323           obj = ik->allocate_instance(THREAD);
1324         }
1325       }
1326     } else if (k->is_flatArray_klass()) {
1327       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1328       // Inline type array must be zeroed because not all memory is reassigned
1329       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1330     } else if (k->is_typeArray_klass()) {
1331       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1332       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1333       int len = sv->field_size() / type2size[ak->element_type()];
1334       InternalOOMEMark iom(THREAD);
1335       obj = ak->allocate_instance(len, THREAD);
1336     } else if (k->is_refArray_klass()) {
1337       RefArrayKlass* ak = RefArrayKlass::cast(k);
1338       InternalOOMEMark iom(THREAD);
1339       obj = ak->allocate_instance(sv->field_size(), ak->properties(), THREAD);
1340     }
1341 
1342     if (obj == nullptr) {
1343       failures = true;
1344     }
1345 
1346     assert(sv->value().is_null(), "redundant reallocation");
1347     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1348     CLEAR_PENDING_EXCEPTION;
1349     sv->set_value(obj);
1350   }
1351 
1352   if (failures) {
1353     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1354   } else if (pending_exception.not_null()) {
1355     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1356   }
1357 
1358   return failures;
1359 }
1360 
1361 // We're deoptimizing at the return of a call, inline type fields are
1362 // in registers. When we go back to the interpreter, it will expect a
1363 // reference to an inline type instance. Allocate and initialize it from
1364 // the register values here.
1365 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1366   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1367   if (new_vt == nullptr) {
1368     CLEAR_PENDING_EXCEPTION;
1369     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1370   }
1371   return_oops.clear();
1372   return_oops.push(Handle(THREAD, new_vt));
1373   return false;
1374 }
1375 
1376 #if INCLUDE_JVMCI
1377 /**
1378  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1379  * we need to somehow be able to recover the actual kind to be able to write the correct
1380  * amount of bytes.
1381  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1382  * the entries at index n + 1 to n + i are 'markers'.
1383  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1384  * expected form of the array would be:
1385  *
1386  * {b0, b1, b2, b3, INT, marker, b6, b7}
1387  *
1388  * Thus, in order to get back the size of the entry, we simply need to count the number
1389  * of marked entries
1390  *
1391  * @param virtualArray the virtualized byte array
1392  * @param i index of the virtual entry we are recovering
1393  * @return The number of bytes the entry spans
1394  */
1395 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1396   int index = i;
1397   while (++index < virtualArray->field_size() &&
1398            virtualArray->field_at(index)->is_marker()) {}
1399   return index - i;
1400 }
1401 
1402 /**
1403  * If there was a guarantee for byte array to always start aligned to a long, we could
1404  * do a simple check on the parity of the index. Unfortunately, that is not always the
1405  * case. Thus, we check alignment of the actual address we are writing to.
1406  * In the unlikely case index 0 is 5-aligned for example, it would then be possible to
1407  * write a long to index 3.
1408  */
1409 static jbyte* check_alignment_get_addr(typeArrayOop obj, int index, int expected_alignment) {
1410     jbyte* res = obj->byte_at_addr(index);
1411     assert((((intptr_t) res) % expected_alignment) == 0, "Non-aligned write");
1412     return res;
1413 }
1414 
1415 static void byte_array_put(typeArrayOop obj, StackValue* value, int index, int byte_count) {
1416   switch (byte_count) {
1417     case 1:
1418       obj->byte_at_put(index, (jbyte) value->get_jint());
1419       break;
1420     case 2:
1421       *((jshort *) check_alignment_get_addr(obj, index, 2)) = (jshort) value->get_jint();
1422       break;
1423     case 4:
1424       *((jint *) check_alignment_get_addr(obj, index, 4)) = value->get_jint();
1425       break;
1426     case 8:
1427       *((jlong *) check_alignment_get_addr(obj, index, 8)) = (jlong) value->get_intptr();
1428       break;
1429     default:
1430       ShouldNotReachHere();
1431   }
1432 }
1433 #endif // INCLUDE_JVMCI
1434 
1435 
1436 // restore elements of an eliminated type array
1437 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
1438   int index = 0;
1439 
1440   for (int i = 0; i < sv->field_size(); i++) {
1441     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1442     switch(type) {
1443     case T_LONG: case T_DOUBLE: {
1444       assert(value->type() == T_INT, "Agreement.");
1445       StackValue* low =
1446         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1447 #ifdef _LP64
1448       jlong res = (jlong)low->get_intptr();
1449 #else
1450       jlong res = jlong_from(value->get_jint(), low->get_jint());
1451 #endif
1452       obj->long_at_put(index, res);
1453       break;
1454     }
1455 
1456     case T_INT: case T_FLOAT: { // 4 bytes.
1457       assert(value->type() == T_INT, "Agreement.");
1458 #if INCLUDE_JVMCI
1459       // big_value allows encoding double/long value as e.g. [int = 0, long], and storing
1460       // the value in two array elements.
1461       bool big_value = false;
1462       if (i + 1 < sv->field_size() && type == T_INT) {
1463         if (sv->field_at(i)->is_location()) {
1464           Location::Type type = ((LocationValue*) sv->field_at(i))->location().type();
1465           if (type == Location::dbl || type == Location::lng) {
1466             big_value = true;
1467           }
1468         } else if (sv->field_at(i)->is_constant_int()) {
1469           ScopeValue* next_scope_field = sv->field_at(i + 1);
1470           if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1471             big_value = true;
1472           }
1473         }
1474       }
1475 
1476       if (big_value) {
1477         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1478   #ifdef _LP64
1479         jlong res = (jlong)low->get_intptr();
1480   #else
1481         jlong res = jlong_from(value->get_jint(), low->get_jint());
1482   #endif
1483         obj->int_at_put(index, *(jint*)&res);
1484         obj->int_at_put(++index, *((jint*)&res + 1));
1485       } else {
1486         obj->int_at_put(index, value->get_jint());
1487       }
1488 #else // not INCLUDE_JVMCI
1489       obj->int_at_put(index, value->get_jint());
1490 #endif // INCLUDE_JVMCI
1491       break;
1492     }
1493 
1494     case T_SHORT:
1495       assert(value->type() == T_INT, "Agreement.");
1496       obj->short_at_put(index, (jshort)value->get_jint());
1497       break;
1498 
1499     case T_CHAR:
1500       assert(value->type() == T_INT, "Agreement.");
1501       obj->char_at_put(index, (jchar)value->get_jint());
1502       break;
1503 
1504     case T_BYTE: {
1505       assert(value->type() == T_INT, "Agreement.");
1506 #if INCLUDE_JVMCI
1507       // The value we get is erased as a regular int. We will need to find its actual byte count 'by hand'.
1508       int byte_count = count_number_of_bytes_for_entry(sv, i);
1509       byte_array_put(obj, value, index, byte_count);
1510       // According to byte_count contract, the values from i + 1 to i + byte_count are illegal values. Skip.
1511       i += byte_count - 1; // Balance the loop counter.
1512       index += byte_count;
1513       // index has been updated so continue at top of loop
1514       continue;
1515 #else
1516       obj->byte_at_put(index, (jbyte)value->get_jint());
1517       break;
1518 #endif // INCLUDE_JVMCI
1519     }
1520 
1521     case T_BOOLEAN: {
1522       assert(value->type() == T_INT, "Agreement.");
1523       obj->bool_at_put(index, (jboolean)value->get_jint());
1524       break;
1525     }
1526 
1527       default:
1528         ShouldNotReachHere();
1529     }
1530     index++;
1531   }
1532 }
1533 
1534 // restore fields of an eliminated object array
1535 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1536   for (int i = 0; i < sv->field_size(); i++) {
1537     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1538     assert(value->type() == T_OBJECT, "object element expected");
1539     obj->obj_at_put(i, value->get_obj()());
1540   }
1541 }
1542 
1543 class ReassignedField {
1544 public:
1545   int _offset;
1546   BasicType _type;
1547   InstanceKlass* _klass;
1548   bool _is_flat;
1549   bool _is_null_free;
1550 public:
1551   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }
1552 };
1553 
1554 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1555 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1556   InstanceKlass* super = klass->super();
1557   if (super != nullptr) {
1558     get_reassigned_fields(super, fields, is_jvmci);
1559   }
1560   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1561     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1562       ReassignedField field;
1563       field._offset = fs.offset();
1564       field._type = Signature::basic_type(fs.signature());
1565       if (fs.is_flat()) {
1566         field._is_flat = true;
1567         field._is_null_free = fs.is_null_free_inline_type();
1568         // Resolve klass of flat inline type field
1569         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1570       }
1571       fields->append(field);
1572     }
1573   }
1574   return fields;
1575 }
1576 
1577 // Restore fields of an eliminated instance object employing the same field order used by the
1578 // compiler when it scalarizes an object at safepoints.
1579 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, TRAPS) {
1580   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1581   for (int i = 0; i < fields->length(); i++) {
1582     BasicType type = fields->at(i)._type;
1583     int offset = base_offset + fields->at(i)._offset;
1584     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1585     if (fields->at(i)._is_flat) {
1586       // Recursively re-assign flat inline type fields
1587       InstanceKlass* vk = fields->at(i)._klass;
1588       assert(vk != nullptr, "must be resolved");
1589       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1590       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, CHECK_0);
1591       if (!fields->at(i)._is_null_free) {
1592         ScopeValue* scope_field = sv->field_at(svIndex);
1593         StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1594         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1595         obj->bool_field_put(nm_offset, value->get_jint() & 1);
1596         svIndex++;
1597       }
1598       continue; // Continue because we don't need to increment svIndex
1599     }
1600 
1601     ScopeValue* scope_field = sv->field_at(svIndex);
1602     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1603     switch (type) {
1604       case T_OBJECT:
1605       case T_ARRAY:
1606         assert(value->type() == T_OBJECT, "Agreement.");
1607         obj->obj_field_put(offset, value->get_obj()());
1608         break;
1609 
1610       case T_INT: case T_FLOAT: { // 4 bytes.
1611         assert(value->type() == T_INT, "Agreement.");
1612         bool big_value = false;
1613         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1614           if (scope_field->is_location()) {
1615             Location::Type type = ((LocationValue*) scope_field)->location().type();
1616             if (type == Location::dbl || type == Location::lng) {
1617               big_value = true;
1618             }
1619           }
1620           if (scope_field->is_constant_int()) {
1621             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1622             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1623               big_value = true;
1624             }
1625           }
1626         }
1627 
1628         if (big_value) {
1629           i++;
1630           assert(i < fields->length(), "second T_INT field needed");
1631           assert(fields->at(i)._type == T_INT, "T_INT field needed");
1632         } else {
1633           obj->int_field_put(offset, value->get_jint());
1634           break;
1635         }
1636       }
1637         /* no break */
1638 
1639       case T_LONG: case T_DOUBLE: {
1640         assert(value->type() == T_INT, "Agreement.");
1641         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex));
1642 #ifdef _LP64
1643         jlong res = (jlong)low->get_intptr();
1644 #else
1645         jlong res = jlong_from(value->get_jint(), low->get_jint());
1646 #endif
1647         obj->long_field_put(offset, res);
1648         break;
1649       }
1650 
1651       case T_SHORT:
1652         assert(value->type() == T_INT, "Agreement.");
1653         obj->short_field_put(offset, (jshort)value->get_jint());
1654         break;
1655 
1656       case T_CHAR:
1657         assert(value->type() == T_INT, "Agreement.");
1658         obj->char_field_put(offset, (jchar)value->get_jint());
1659         break;
1660 
1661       case T_BYTE:
1662         assert(value->type() == T_INT, "Agreement.");
1663         obj->byte_field_put(offset, (jbyte)value->get_jint());
1664         break;
1665 
1666       case T_BOOLEAN:
1667         assert(value->type() == T_INT, "Agreement.");
1668         obj->bool_field_put(offset, (jboolean)value->get_jint());
1669         break;
1670 
1671       default:
1672         ShouldNotReachHere();
1673     }
1674     svIndex++;
1675   }
1676 
1677   return svIndex;
1678 }
1679 
1680 // restore fields of an eliminated inline type array
1681 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1682   InlineKlass* vk = vak->element_klass();
1683   assert(vk->maybe_flat_in_array(), "should only be used for flat inline type arrays");
1684   // Adjust offset to omit oop header
1685   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - vk->payload_offset();
1686   // Initialize all elements of the flat inline type array
1687   for (int i = 0; i < sv->field_size(); i++) {
1688     ObjectValue* val = sv->field_at(i)->as_ObjectValue();
1689     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1690     reassign_fields_by_klass(vk, fr, reg_map, val, 0, (oop)obj, is_jvmci, offset, CHECK);
1691     if (!obj->is_null_free_array()) {
1692       jboolean null_marker_value;
1693       if (val->has_properties()) {
1694         null_marker_value = StackValue::create_stack_value(fr, reg_map, val->properties())->get_jint() & 1;
1695       } else {
1696         null_marker_value = 1;
1697       }
1698       obj->bool_field_put(offset + vk->null_marker_offset(), null_marker_value);
1699     }
1700   }
1701 }
1702 
1703 // restore fields of all eliminated objects and arrays
1704 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1705   for (int i = 0; i < objects->length(); i++) {
1706     assert(objects->at(i)->is_object(), "invalid debug information");
1707     ObjectValue* sv = (ObjectValue*) objects->at(i);
1708     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1709     k = get_refined_array_klass(k, fr, reg_map, sv, THREAD);
1710 
1711     Handle obj = sv->value();
1712     assert(obj.not_null() || realloc_failures || sv->has_properties(), "reallocation was missed");
1713 #ifndef PRODUCT
1714     if (PrintDeoptimizationDetails) {
1715       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1716     }
1717 #endif // !PRODUCT
1718 
1719     if (obj.is_null()) {
1720       continue;
1721     }
1722 
1723 #if INCLUDE_JVMCI
1724     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1725     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1726       continue;
1727     }
1728 #endif // INCLUDE_JVMCI
1729     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1730       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1731       ScopeValue* payload = sv->field_at(0);
1732       if (payload->is_location() &&
1733           payload->as_LocationValue()->location().type() == Location::vector) {
1734 #ifndef PRODUCT
1735         if (PrintDeoptimizationDetails) {
1736           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1737           if (Verbose) {
1738             Handle obj = sv->value();
1739             k->oop_print_on(obj(), tty);
1740           }
1741         }
1742 #endif // !PRODUCT
1743         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1744       }
1745       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1746       // which could be restored after vector object allocation.
1747     }
1748     if (k->is_instance_klass()) {
1749       InstanceKlass* ik = InstanceKlass::cast(k);
1750       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, CHECK);
1751     } else if (k->is_flatArray_klass()) {
1752       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1753       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1754     } else if (k->is_typeArray_klass()) {
1755       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1756       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1757     } else if (k->is_refArray_klass()) {
1758       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1759     }
1760   }
1761   // These objects may escape when we return to Interpreter after deoptimization.
1762   // We need barrier so that stores that initialize these objects can't be reordered
1763   // with subsequent stores that make these objects accessible by other threads.
1764   OrderAccess::storestore();
1765 }
1766 
1767 
1768 // relock objects for which synchronization was eliminated
1769 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1770                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1771   bool relocked_objects = false;
1772   for (int i = 0; i < monitors->length(); i++) {
1773     MonitorInfo* mon_info = monitors->at(i);
1774     if (mon_info->eliminated()) {
1775       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1776       relocked_objects = true;
1777       if (!mon_info->owner_is_scalar_replaced()) {
1778         Handle obj(thread, mon_info->owner());
1779         markWord mark = obj->mark();
1780         if (exec_mode == Unpack_none) {
1781           if (mark.has_monitor()) {
1782             // defer relocking if the deoptee thread is currently waiting for obj
1783             ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1784             if (waiting_monitor != nullptr && waiting_monitor->object() == obj()) {
1785               assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1786               if (UseObjectMonitorTable) {
1787                 mon_info->lock()->clear_object_monitor_cache();
1788               }
1789 #ifdef ASSERT
1790               else {
1791                 assert(!UseObjectMonitorTable, "must be");
1792                 mon_info->lock()->set_bad_monitor_deopt();
1793               }
1794 #endif
1795               JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1796               continue;
1797             }
1798           }
1799         }
1800         BasicLock* lock = mon_info->lock();
1801         // We have lost information about the correct state of the lock stack.
1802         // Entering may create an invalid lock stack. Inflate the lock if it
1803         // was fast_locked to restore the valid lock stack.
1804         if (UseObjectMonitorTable) {
1805           // UseObjectMonitorTable expects the BasicLock cache to be either a
1806           // valid ObjectMonitor* or nullptr. Right now it is garbage, set it
1807           // to nullptr.
1808           lock->clear_object_monitor_cache();
1809         }
1810         ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
1811         if (deoptee_thread->lock_stack().contains(obj())) {
1812             LightweightSynchronizer::inflate_fast_locked_object(obj(), ObjectSynchronizer::InflateCause::inflate_cause_vm_internal,
1813                                                               deoptee_thread, thread);
1814         }
1815         assert(mon_info->owner()->is_locked(), "object must be locked now");
1816         assert(obj->mark().has_monitor(), "must be");
1817         assert(!deoptee_thread->lock_stack().contains(obj()), "must be");
1818         assert(ObjectSynchronizer::read_monitor(thread, obj(), obj->mark())->has_owner(deoptee_thread), "must be");
1819       }
1820     }
1821   }
1822   return relocked_objects;
1823 }
1824 #endif // COMPILER2_OR_JVMCI
1825 
1826 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1827   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1828 
1829   // Register map for next frame (used for stack crawl).  We capture
1830   // the state of the deopt'ing frame's caller.  Thus if we need to
1831   // stuff a C2I adapter we can properly fill in the callee-save
1832   // register locations.
1833   frame caller = fr.sender(reg_map);
1834   int frame_size = pointer_delta_as_int(caller.sp(), fr.sp());
1835 
1836   frame sender = caller;
1837 
1838   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1839   // the vframeArray containing the unpacking information is allocated in the C heap.
1840   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1841   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
1842 
1843   // Compare the vframeArray to the collected vframes
1844   assert(array->structural_compare(thread, chunk), "just checking");
1845 
1846   if (TraceDeoptimization) {
1847     ResourceMark rm;
1848     stringStream st;
1849     st.print_cr("DEOPT PACKING thread=" INTPTR_FORMAT " vframeArray=" INTPTR_FORMAT, p2i(thread), p2i(array));
1850     st.print("   ");
1851     fr.print_on(&st);
1852     st.print_cr("   Virtual frames (innermost/newest first):");
1853     for (int index = 0; index < chunk->length(); index++) {
1854       compiledVFrame* vf = chunk->at(index);
1855       int bci = vf->raw_bci();
1856       const char* code_name;
1857       if (bci == SynchronizationEntryBCI) {
1858         code_name = "sync entry";
1859       } else {
1860         Bytecodes::Code code = vf->method()->code_at(bci);
1861         code_name = Bytecodes::name(code);
1862       }
1863 
1864       st.print("      VFrame %d (" INTPTR_FORMAT ")", index, p2i(vf));
1865       st.print(" - %s", vf->method()->name_and_sig_as_C_string());
1866       st.print(" - %s", code_name);
1867       st.print_cr(" @ bci=%d ", bci);
1868     }
1869     tty->print_raw(st.freeze());
1870     tty->cr();
1871   }
1872 
1873   return array;
1874 }
1875 
1876 #if COMPILER2_OR_JVMCI
1877 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1878   // Reallocation of some scalar replaced objects failed. Record
1879   // that we need to pop all the interpreter frames for the
1880   // deoptimized compiled frame.
1881   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1882   thread->set_frames_to_pop_failed_realloc(array->frames());
1883   // Unlock all monitors here otherwise the interpreter will see a
1884   // mix of locked and unlocked monitors (because of failed
1885   // reallocations of synchronized objects) and be confused.
1886   for (int i = 0; i < array->frames(); i++) {
1887     MonitorChunk* monitors = array->element(i)->monitors();
1888     if (monitors != nullptr) {
1889       // Unlock in reverse order starting from most nested monitor.
1890       for (int j = (monitors->number_of_monitors() - 1); j >= 0; j--) {
1891         BasicObjectLock* src = monitors->at(j);
1892         if (src->obj() != nullptr) {
1893           ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
1894         }
1895       }
1896       array->element(i)->free_monitors();
1897 #ifdef ASSERT
1898       array->element(i)->set_removed_monitors();
1899 #endif
1900     }
1901   }
1902 }
1903 #endif
1904 
1905 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1906   assert(fr.can_be_deoptimized(), "checking frame type");
1907 
1908   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1909 
1910   if (LogCompilation && xtty != nullptr) {
1911     nmethod* nm = fr.cb()->as_nmethod_or_null();
1912     assert(nm != nullptr, "only compiled methods can deopt");
1913 
1914     ttyLocker ttyl;
1915     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1916     nm->log_identity(xtty);
1917     xtty->end_head();
1918     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1919       xtty->begin_elem("jvms bci='%d'", sd->bci());
1920       xtty->method(sd->method());
1921       xtty->end_elem();
1922       if (sd->is_top())  break;
1923     }
1924     xtty->tail("deoptimized");
1925   }
1926 
1927   Continuation::notify_deopt(thread, fr.sp());
1928 
1929   // Patch the compiled method so that when execution returns to it we will
1930   // deopt the execution state and return to the interpreter.
1931   fr.deoptimize(thread);
1932 }
1933 
1934 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1935   // Deoptimize only if the frame comes from compiled code.
1936   // Do not deoptimize the frame which is already patched
1937   // during the execution of the loops below.
1938   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1939     return;
1940   }
1941   ResourceMark rm;
1942   deoptimize_single_frame(thread, fr, reason);
1943 }
1944 
1945 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant) {
1946   // there is no exception handler for this pc => deoptimize
1947   if (make_not_entrant) {
1948     nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
1949   }
1950 
1951   // Use Deoptimization::deoptimize for all of its side-effects:
1952   // gathering traps statistics, logging...
1953   // it also patches the return pc but we do not care about that
1954   // since we return a continuation to the deopt_blob below.
1955   JavaThread* thread = JavaThread::current();
1956   RegisterMap reg_map(thread,
1957                       RegisterMap::UpdateMap::skip,
1958                       RegisterMap::ProcessFrames::include,
1959                       RegisterMap::WalkContinuation::skip);
1960   frame runtime_frame = thread->last_frame();
1961   frame caller_frame = runtime_frame.sender(&reg_map);
1962   assert(caller_frame.cb()->as_nmethod_or_null() == nm, "expect top frame compiled method");
1963 
1964   Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
1965 
1966   if (!nm->is_compiled_by_jvmci()) {
1967     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
1968   }
1969 
1970 #if INCLUDE_JVMCI
1971   // JVMCI support
1972   vframe* vf = vframe::new_vframe(&caller_frame, &reg_map, thread);
1973   compiledVFrame* cvf = compiledVFrame::cast(vf);
1974   ScopeDesc* imm_scope = cvf->scope();
1975   MethodData* imm_mdo = get_method_data(thread, methodHandle(thread, imm_scope->method()), true);
1976   if (imm_mdo != nullptr) {
1977     // Lock to read ProfileData, and ensure lock is not broken by a safepoint
1978     MutexLocker ml(imm_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
1979 
1980     ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), nullptr);
1981     if (pdata != nullptr && pdata->is_BitData()) {
1982       BitData* bit_data = (BitData*) pdata;
1983       bit_data->set_exception_seen();
1984     }
1985   }
1986 
1987 
1988   MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, nm->method()), true);
1989   if (trap_mdo != nullptr) {
1990     trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
1991   }
1992 #endif
1993 
1994   return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
1995 }
1996 
1997 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1998   assert(thread == Thread::current() ||
1999          thread->is_handshake_safe_for(Thread::current()) ||
2000          SafepointSynchronize::is_at_safepoint(),
2001          "can only deoptimize other thread at a safepoint/handshake");
2002   // Compute frame and register map based on thread and sp.
2003   RegisterMap reg_map(thread,
2004                       RegisterMap::UpdateMap::skip,
2005                       RegisterMap::ProcessFrames::include,
2006                       RegisterMap::WalkContinuation::skip);
2007   frame fr = thread->last_frame();
2008   while (fr.id() != id) {
2009     fr = fr.sender(&reg_map);
2010   }
2011   deoptimize(thread, fr, reason);
2012 }
2013 
2014 
2015 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) {
2016   Thread* current = Thread::current();
2017   if (thread == current || thread->is_handshake_safe_for(current)) {
2018     Deoptimization::deoptimize_frame_internal(thread, id, reason);
2019   } else {
2020     VM_DeoptimizeFrame deopt(thread, id, reason);
2021     VMThread::execute(&deopt);
2022   }
2023 }
2024 
2025 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
2026   deoptimize_frame(thread, id, Reason_constraint);
2027 }
2028 
2029 // JVMTI PopFrame support
2030 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
2031 {
2032   assert(thread == JavaThread::current(), "pre-condition");
2033   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
2034 }
2035 JRT_END
2036 
2037 MethodData*
2038 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m,
2039                                 bool create_if_missing) {
2040   JavaThread* THREAD = thread; // For exception macros.
2041   MethodData* mdo = m()->method_data();
2042   if (mdo == nullptr && create_if_missing && !HAS_PENDING_EXCEPTION) {
2043     // Build an MDO.  Ignore errors like OutOfMemory;
2044     // that simply means we won't have an MDO to update.
2045     Method::build_profiling_method_data(m, THREAD);
2046     if (HAS_PENDING_EXCEPTION) {
2047       // Only metaspace OOM is expected. No Java code executed.
2048       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
2049       CLEAR_PENDING_EXCEPTION;
2050     }
2051     mdo = m()->method_data();
2052   }
2053   return mdo;
2054 }
2055 
2056 #if COMPILER2_OR_JVMCI
2057 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
2058   // In case of an unresolved klass entry, load the class.
2059   // This path is exercised from case _ldc in Parse::do_one_bytecode,
2060   // and probably nowhere else.
2061   // Even that case would benefit from simply re-interpreting the
2062   // bytecode, without paying special attention to the class index.
2063   // So this whole "class index" feature should probably be removed.
2064 
2065   if (constant_pool->tag_at(index).is_unresolved_klass()) {
2066     Klass* tk = constant_pool->klass_at(index, THREAD);
2067     if (HAS_PENDING_EXCEPTION) {
2068       // Exception happened during classloading. We ignore the exception here, since it
2069       // is going to be rethrown since the current activation is going to be deoptimized and
2070       // the interpreter will re-execute the bytecode.
2071       // Do not clear probable Async Exceptions.
2072       CLEAR_PENDING_NONASYNC_EXCEPTION;
2073       // Class loading called java code which may have caused a stack
2074       // overflow. If the exception was thrown right before the return
2075       // to the runtime the stack is no longer guarded. Reguard the
2076       // stack otherwise if we return to the uncommon trap blob and the
2077       // stack bang causes a stack overflow we crash.
2078       JavaThread* jt = THREAD;
2079       bool guard_pages_enabled = jt->stack_overflow_state()->reguard_stack_if_needed();
2080       assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
2081     }
2082     return;
2083   }
2084 
2085   assert(!constant_pool->tag_at(index).is_symbol(),
2086          "no symbolic names here, please");
2087 }
2088 
2089 #if INCLUDE_JFR
2090 
2091 class DeoptReasonSerializer : public JfrSerializer {
2092  public:
2093   void serialize(JfrCheckpointWriter& writer) {
2094     writer.write_count((u4)(Deoptimization::Reason_LIMIT + 1)); // + Reason::many (-1)
2095     for (int i = -1; i < Deoptimization::Reason_LIMIT; ++i) {
2096       writer.write_key((u8)i);
2097       writer.write(Deoptimization::trap_reason_name(i));
2098     }
2099   }
2100 };
2101 
2102 class DeoptActionSerializer : public JfrSerializer {
2103  public:
2104   void serialize(JfrCheckpointWriter& writer) {
2105     static const u4 nof_actions = Deoptimization::Action_LIMIT;
2106     writer.write_count(nof_actions);
2107     for (u4 i = 0; i < Deoptimization::Action_LIMIT; ++i) {
2108       writer.write_key(i);
2109       writer.write(Deoptimization::trap_action_name((int)i));
2110     }
2111   }
2112 };
2113 
2114 static void register_serializers() {
2115   static int critical_section = 0;
2116   if (1 == critical_section || AtomicAccess::cmpxchg(&critical_section, 0, 1) == 1) {
2117     return;
2118   }
2119   JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONREASON, true, new DeoptReasonSerializer());
2120   JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONACTION, true, new DeoptActionSerializer());
2121 }
2122 
2123 static void post_deoptimization_event(nmethod* nm,
2124                                       const Method* method,
2125                                       int trap_bci,
2126                                       int instruction,
2127                                       Deoptimization::DeoptReason reason,
2128                                       Deoptimization::DeoptAction action) {
2129   assert(nm != nullptr, "invariant");
2130   assert(method != nullptr, "invariant");
2131   if (EventDeoptimization::is_enabled()) {
2132     static bool serializers_registered = false;
2133     if (!serializers_registered) {
2134       register_serializers();
2135       serializers_registered = true;
2136     }
2137     EventDeoptimization event;
2138     event.set_compileId(nm->compile_id());
2139     event.set_compiler(nm->compiler_type());
2140     event.set_method(method);
2141     event.set_lineNumber(method->line_number_from_bci(trap_bci));
2142     event.set_bci(trap_bci);
2143     event.set_instruction(instruction);
2144     event.set_reason(reason);
2145     event.set_action(action);
2146     event.commit();
2147   }
2148 }
2149 
2150 #endif // INCLUDE_JFR
2151 
2152 static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci,
2153                               const char* reason_name, const char* reason_action) {
2154   LogTarget(Debug, deoptimization) lt;
2155   if (lt.is_enabled()) {
2156     LogStream ls(lt);
2157     bool is_osr = nm->is_osr_method();
2158     ls.print("cid=%4d %s level=%d",
2159              nm->compile_id(), (is_osr ? "osr" : "   "), nm->comp_level());
2160     ls.print(" %s", tm->name_and_sig_as_C_string());
2161     ls.print(" trap_bci=%d ", trap_bci);
2162     if (is_osr) {
2163       ls.print("osr_bci=%d ", nm->osr_entry_bci());
2164     }
2165     ls.print("%s ", reason_name);
2166     ls.print("%s ", reason_action);
2167     ls.print_cr("pc=" INTPTR_FORMAT " relative_pc=" INTPTR_FORMAT,
2168              pc, fr.pc() - nm->code_begin());
2169   }
2170 }
2171 
2172 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint trap_request)) {
2173   HandleMark hm(current);
2174 
2175   // uncommon_trap() is called at the beginning of the uncommon trap
2176   // handler. Note this fact before we start generating temporary frames
2177   // that can confuse an asynchronous stack walker. This counter is
2178   // decremented at the end of unpack_frames().
2179 
2180   current->inc_in_deopt_handler();
2181 
2182 #if INCLUDE_JVMCI
2183   // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
2184   RegisterMap reg_map(current,
2185                       RegisterMap::UpdateMap::include,
2186                       RegisterMap::ProcessFrames::include,
2187                       RegisterMap::WalkContinuation::skip);
2188 #else
2189   RegisterMap reg_map(current,
2190                       RegisterMap::UpdateMap::skip,
2191                       RegisterMap::ProcessFrames::include,
2192                       RegisterMap::WalkContinuation::skip);
2193 #endif
2194   frame stub_frame = current->last_frame();
2195   frame fr = stub_frame.sender(&reg_map);
2196 
2197   // Log a message
2198   Events::log_deopt_message(current, "Uncommon trap: trap_request=" INT32_FORMAT_X_0 " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT,
2199               trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin());
2200 
2201   {
2202     ResourceMark rm;
2203 
2204     DeoptReason reason = trap_request_reason(trap_request);
2205     DeoptAction action = trap_request_action(trap_request);
2206 #if INCLUDE_JVMCI
2207     int debug_id = trap_request_debug_id(trap_request);
2208 #endif
2209     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
2210 
2211     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, current);
2212     compiledVFrame* cvf = compiledVFrame::cast(vf);
2213 
2214     nmethod* nm = cvf->code();
2215 
2216     ScopeDesc*      trap_scope  = cvf->scope();
2217 
2218     bool is_receiver_constraint_failure = COMPILER2_PRESENT(VerifyReceiverTypes &&) (reason == Deoptimization::Reason_receiver_constraint);
2219 
2220     if (is_receiver_constraint_failure) {
2221       tty->print_cr("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"),
2222                     trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string()
2223                     JVMCI_ONLY(COMMA debug_id));
2224     }
2225 
2226     methodHandle    trap_method(current, trap_scope->method());
2227     int             trap_bci    = trap_scope->bci();
2228 #if INCLUDE_JVMCI
2229     jlong           speculation = current->pending_failed_speculation();
2230     if (nm->is_compiled_by_jvmci()) {
2231       nm->update_speculation(current);
2232     } else {
2233       assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
2234     }
2235 
2236     if (trap_bci == SynchronizationEntryBCI) {
2237       trap_bci = 0;
2238       current->set_pending_monitorenter(true);
2239     }
2240 
2241     if (reason == Deoptimization::Reason_transfer_to_interpreter) {
2242       current->set_pending_transfer_to_interpreter(true);
2243     }
2244 #endif
2245 
2246     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
2247     // Record this event in the histogram.
2248     gather_statistics(reason, action, trap_bc);
2249 
2250     // Ensure that we can record deopt. history:
2251     bool create_if_missing = ProfileTraps;
2252 
2253     methodHandle profiled_method;
2254 #if INCLUDE_JVMCI
2255     if (nm->is_compiled_by_jvmci()) {
2256       profiled_method = methodHandle(current, nm->method());
2257     } else {
2258       profiled_method = trap_method;
2259     }
2260 #else
2261     profiled_method = trap_method;
2262 #endif
2263 
2264     MethodData* trap_mdo =
2265       get_method_data(current, profiled_method, create_if_missing);
2266 
2267     { // Log Deoptimization event for JFR, UL and event system
2268       Method* tm = trap_method();
2269       const char* reason_name = trap_reason_name(reason);
2270       const char* reason_action = trap_action_name(action);
2271       intptr_t pc = p2i(fr.pc());
2272 
2273       JFR_ONLY(post_deoptimization_event(nm, tm, trap_bci, trap_bc, reason, action);)
2274       log_deopt(nm, tm, pc, fr, trap_bci, reason_name, reason_action);
2275       Events::log_deopt_message(current, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s",
2276                                 reason_name, reason_action, pc,
2277                                 tm->name_and_sig_as_C_string(), trap_bci, nm->compiler_name());
2278     }
2279 
2280     // Print a bunch of diagnostics, if requested.
2281     if (TraceDeoptimization || LogCompilation || is_receiver_constraint_failure) {
2282       ResourceMark rm;
2283 
2284       // Lock to read ProfileData, and ensure lock is not broken by a safepoint
2285       // We must do this already now, since we cannot acquire this lock while
2286       // holding the tty lock (lock ordering by rank).
2287       MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
2288 
2289       ttyLocker ttyl;
2290 
2291       char buf[100];
2292       if (xtty != nullptr) {
2293         xtty->begin_head("uncommon_trap thread='%zu' %s",
2294                          os::current_thread_id(),
2295                          format_trap_request(buf, sizeof(buf), trap_request));
2296 #if INCLUDE_JVMCI
2297         if (speculation != 0) {
2298           xtty->print(" speculation='" JLONG_FORMAT "'", speculation);
2299         }
2300 #endif
2301         nm->log_identity(xtty);
2302       }
2303       Symbol* class_name = nullptr;
2304       bool unresolved = false;
2305       if (unloaded_class_index >= 0) {
2306         constantPoolHandle constants (current, trap_method->constants());
2307         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
2308           class_name = constants->klass_name_at(unloaded_class_index);
2309           unresolved = true;
2310           if (xtty != nullptr)
2311             xtty->print(" unresolved='1'");
2312         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
2313           class_name = constants->symbol_at(unloaded_class_index);
2314         }
2315         if (xtty != nullptr)
2316           xtty->name(class_name);
2317       }
2318       if (xtty != nullptr && trap_mdo != nullptr && (int)reason < (int)MethodData::_trap_hist_limit) {
2319         // Dump the relevant MDO state.
2320         // This is the deopt count for the current reason, any previous
2321         // reasons or recompiles seen at this point.
2322         int dcnt = trap_mdo->trap_count(reason);
2323         if (dcnt != 0)
2324           xtty->print(" count='%d'", dcnt);
2325 
2326         // We need to lock to read the ProfileData. But to keep the locks ordered, we need to
2327         // lock extra_data_lock before the tty lock.
2328         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
2329         int dos = (pdata == nullptr)? 0: pdata->trap_state();
2330         if (dos != 0) {
2331           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
2332           if (trap_state_is_recompiled(dos)) {
2333             int recnt2 = trap_mdo->overflow_recompile_count();
2334             if (recnt2 != 0)
2335               xtty->print(" recompiles2='%d'", recnt2);
2336           }
2337         }
2338       }
2339       if (xtty != nullptr) {
2340         xtty->stamp();
2341         xtty->end_head();
2342       }
2343       if (TraceDeoptimization) {  // make noise on the tty
2344         stringStream st;
2345         st.print("UNCOMMON TRAP method=%s", trap_scope->method()->name_and_sig_as_C_string());
2346         st.print("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT JVMCI_ONLY(", debug_id=%d"),
2347                  trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin() JVMCI_ONLY(COMMA debug_id));
2348         st.print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
2349 #if INCLUDE_JVMCI
2350         if (nm->is_compiled_by_jvmci()) {
2351           const char* installed_code_name = nm->jvmci_name();
2352           if (installed_code_name != nullptr) {
2353             st.print(" (JVMCI: installed code name=%s) ", installed_code_name);
2354           }
2355         }
2356 #endif
2357         st.print(" (@" INTPTR_FORMAT ") thread=%zu reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
2358                    p2i(fr.pc()),
2359                    os::current_thread_id(),
2360                    trap_reason_name(reason),
2361                    trap_action_name(action),
2362                    unloaded_class_index
2363 #if INCLUDE_JVMCI
2364                    , debug_id
2365 #endif
2366                    );
2367         if (class_name != nullptr) {
2368           st.print(unresolved ? " unresolved class: " : " symbol: ");
2369           class_name->print_symbol_on(&st);
2370         }
2371         st.cr();
2372         tty->print_raw(st.freeze());
2373       }
2374       if (xtty != nullptr) {
2375         // Log the precise location of the trap.
2376         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
2377           xtty->begin_elem("jvms bci='%d'", sd->bci());
2378           xtty->method(sd->method());
2379           xtty->end_elem();
2380           if (sd->is_top())  break;
2381         }
2382         xtty->tail("uncommon_trap");
2383       }
2384     }
2385     // (End diagnostic printout.)
2386 
2387     if (is_receiver_constraint_failure) {
2388       fatal("missing receiver type check");
2389     }
2390 
2391     // Load class if necessary
2392     if (unloaded_class_index >= 0) {
2393       constantPoolHandle constants(current, trap_method->constants());
2394       load_class_by_index(constants, unloaded_class_index, THREAD);
2395     }
2396 
2397     // Flush the nmethod if necessary and desirable.
2398     //
2399     // We need to avoid situations where we are re-flushing the nmethod
2400     // because of a hot deoptimization site.  Repeated flushes at the same
2401     // point need to be detected by the compiler and avoided.  If the compiler
2402     // cannot avoid them (or has a bug and "refuses" to avoid them), this
2403     // module must take measures to avoid an infinite cycle of recompilation
2404     // and deoptimization.  There are several such measures:
2405     //
2406     //   1. If a recompilation is ordered a second time at some site X
2407     //   and for the same reason R, the action is adjusted to 'reinterpret',
2408     //   to give the interpreter time to exercise the method more thoroughly.
2409     //   If this happens, the method's overflow_recompile_count is incremented.
2410     //
2411     //   2. If the compiler fails to reduce the deoptimization rate, then
2412     //   the method's overflow_recompile_count will begin to exceed the set
2413     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
2414     //   is adjusted to 'make_not_compilable', and the method is abandoned
2415     //   to the interpreter.  This is a performance hit for hot methods,
2416     //   but is better than a disastrous infinite cycle of recompilations.
2417     //   (Actually, only the method containing the site X is abandoned.)
2418     //
2419     //   3. In parallel with the previous measures, if the total number of
2420     //   recompilations of a method exceeds the much larger set limit
2421     //   PerMethodRecompilationCutoff, the method is abandoned.
2422     //   This should only happen if the method is very large and has
2423     //   many "lukewarm" deoptimizations.  The code which enforces this
2424     //   limit is elsewhere (class nmethod, class Method).
2425     //
2426     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
2427     // to recompile at each bytecode independently of the per-BCI cutoff.
2428     //
2429     // The decision to update code is up to the compiler, and is encoded
2430     // in the Action_xxx code.  If the compiler requests Action_none
2431     // no trap state is changed, no compiled code is changed, and the
2432     // computation suffers along in the interpreter.
2433     //
2434     // The other action codes specify various tactics for decompilation
2435     // and recompilation.  Action_maybe_recompile is the loosest, and
2436     // allows the compiled code to stay around until enough traps are seen,
2437     // and until the compiler gets around to recompiling the trapping method.
2438     //
2439     // The other actions cause immediate removal of the present code.
2440 
2441     // Traps caused by injected profile shouldn't pollute trap counts.
2442     bool injected_profile_trap = trap_method->has_injected_profile() &&
2443                                  (reason == Reason_intrinsic || reason == Reason_unreached);
2444 
2445     bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap;
2446     bool make_not_entrant = false;
2447     bool make_not_compilable = false;
2448     bool reprofile = false;
2449     switch (action) {
2450     case Action_none:
2451       // Keep the old code.
2452       update_trap_state = false;
2453       break;
2454     case Action_maybe_recompile:
2455       // Do not need to invalidate the present code, but we can
2456       // initiate another
2457       // Start compiler without (necessarily) invalidating the nmethod.
2458       // The system will tolerate the old code, but new code should be
2459       // generated when possible.
2460       break;
2461     case Action_reinterpret:
2462       // Go back into the interpreter for a while, and then consider
2463       // recompiling form scratch.
2464       make_not_entrant = true;
2465       // Reset invocation counter for outer most method.
2466       // This will allow the interpreter to exercise the bytecodes
2467       // for a while before recompiling.
2468       // By contrast, Action_make_not_entrant is immediate.
2469       //
2470       // Note that the compiler will track null_check, null_assert,
2471       // range_check, and class_check events and log them as if they
2472       // had been traps taken from compiled code.  This will update
2473       // the MDO trap history so that the next compilation will
2474       // properly detect hot trap sites.
2475       reprofile = true;
2476       break;
2477     case Action_make_not_entrant:
2478       // Request immediate recompilation, and get rid of the old code.
2479       // Make them not entrant, so next time they are called they get
2480       // recompiled.  Unloaded classes are loaded now so recompile before next
2481       // time they are called.  Same for uninitialized.  The interpreter will
2482       // link the missing class, if any.
2483       make_not_entrant = true;
2484       break;
2485     case Action_make_not_compilable:
2486       // Give up on compiling this method at all.
2487       make_not_entrant = true;
2488       make_not_compilable = true;
2489       break;
2490     default:
2491       ShouldNotReachHere();
2492     }
2493 
2494 #if INCLUDE_JVMCI
2495     // Deoptimization count is used by the CompileBroker to reason about compilations
2496     // it requests so do not pollute the count for deoptimizations in non-default (i.e.
2497     // non-CompilerBroker) compilations.
2498     if (nm->jvmci_skip_profile_deopt()) {
2499       update_trap_state = false;
2500     }
2501 #endif
2502     // Setting +ProfileTraps fixes the following, on all platforms:
2503     // The result is infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
2504     // recompile relies on a MethodData* to record heroic opt failures.
2505 
2506     // Whether the interpreter is producing MDO data or not, we also need
2507     // to use the MDO to detect hot deoptimization points and control
2508     // aggressive optimization.
2509     bool inc_recompile_count = false;
2510 
2511     // Lock to read ProfileData, and ensure lock is not broken by a safepoint
2512     ConditionalMutexLocker ml((trap_mdo != nullptr) ? trap_mdo->extra_data_lock() : nullptr,
2513                               (trap_mdo != nullptr),
2514                               Mutex::_no_safepoint_check_flag);
2515     ProfileData* pdata = nullptr;
2516     if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != nullptr) {
2517       assert(trap_mdo == get_method_data(current, profiled_method, false), "sanity");
2518       uint this_trap_count = 0;
2519       bool maybe_prior_trap = false;
2520       bool maybe_prior_recompile = false;
2521 
2522       pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
2523 #if INCLUDE_JVMCI
2524                                    nm->is_compiled_by_jvmci() && nm->is_osr_method(),
2525 #endif
2526                                    nm->method(),
2527                                    //outputs:
2528                                    this_trap_count,
2529                                    maybe_prior_trap,
2530                                    maybe_prior_recompile);
2531       // Because the interpreter also counts null, div0, range, and class
2532       // checks, these traps from compiled code are double-counted.
2533       // This is harmless; it just means that the PerXTrapLimit values
2534       // are in effect a little smaller than they look.
2535 
2536       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2537       if (per_bc_reason != Reason_none) {
2538         // Now take action based on the partially known per-BCI history.
2539         if (maybe_prior_trap
2540             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
2541           // If there are too many traps at this BCI, force a recompile.
2542           // This will allow the compiler to see the limit overflow, and
2543           // take corrective action, if possible.  The compiler generally
2544           // does not use the exact PerBytecodeTrapLimit value, but instead
2545           // changes its tactics if it sees any traps at all.  This provides
2546           // a little hysteresis, delaying a recompile until a trap happens
2547           // several times.
2548           //
2549           // Actually, since there is only one bit of counter per BCI,
2550           // the possible per-BCI counts are {0,1,(per-method count)}.
2551           // This produces accurate results if in fact there is only
2552           // one hot trap site, but begins to get fuzzy if there are
2553           // many sites.  For example, if there are ten sites each
2554           // trapping two or more times, they each get the blame for
2555           // all of their traps.
2556           make_not_entrant = true;
2557         }
2558 
2559         // Detect repeated recompilation at the same BCI, and enforce a limit.
2560         if (make_not_entrant && maybe_prior_recompile) {
2561           // More than one recompile at this point.
2562           inc_recompile_count = maybe_prior_trap;
2563         }
2564       } else {
2565         // For reasons which are not recorded per-bytecode, we simply
2566         // force recompiles unconditionally.
2567         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
2568         make_not_entrant = true;
2569       }
2570 
2571       // Go back to the compiler if there are too many traps in this method.
2572       if (this_trap_count >= per_method_trap_limit(reason)) {
2573         // If there are too many traps in this method, force a recompile.
2574         // This will allow the compiler to see the limit overflow, and
2575         // take corrective action, if possible.
2576         // (This condition is an unlikely backstop only, because the
2577         // PerBytecodeTrapLimit is more likely to take effect first,
2578         // if it is applicable.)
2579         make_not_entrant = true;
2580       }
2581 
2582       // Here's more hysteresis:  If there has been a recompile at
2583       // this trap point already, run the method in the interpreter
2584       // for a while to exercise it more thoroughly.
2585       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
2586         reprofile = true;
2587       }
2588     }
2589 
2590     // Take requested actions on the method:
2591 
2592     // Recompile
2593     if (make_not_entrant) {
2594       if (!nm->make_not_entrant(nmethod::InvalidationReason::UNCOMMON_TRAP)) {
2595         return; // the call did not change nmethod's state
2596       }
2597 
2598       if (pdata != nullptr) {
2599         // Record the recompilation event, if any.
2600         int tstate0 = pdata->trap_state();
2601         int tstate1 = trap_state_set_recompiled(tstate0, true);
2602         if (tstate1 != tstate0)
2603           pdata->set_trap_state(tstate1);
2604       }
2605 
2606       // For code aging we count traps separately here, using make_not_entrant()
2607       // as a guard against simultaneous deopts in multiple threads.
2608       if (reason == Reason_tenured && trap_mdo != nullptr) {
2609         trap_mdo->inc_tenure_traps();
2610       }
2611     }
2612     if (inc_recompile_count) {
2613       trap_mdo->inc_overflow_recompile_count();
2614       if ((uint)trap_mdo->overflow_recompile_count() >
2615           (uint)PerBytecodeRecompilationCutoff) {
2616         // Give up on the method containing the bad BCI.
2617         if (trap_method() == nm->method()) {
2618           make_not_compilable = true;
2619         } else {
2620           trap_method->set_not_compilable("overflow_recompile_count > PerBytecodeRecompilationCutoff", CompLevel_full_optimization);
2621           // But give grace to the enclosing nm->method().
2622         }
2623       }
2624     }
2625 
2626     // Reprofile
2627     if (reprofile) {
2628       CompilationPolicy::reprofile(trap_scope, nm->is_osr_method());
2629     }
2630 
2631     // Give up compiling
2632     if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
2633       assert(make_not_entrant, "consistent");
2634       nm->method()->set_not_compilable("give up compiling", CompLevel_full_optimization);
2635     }
2636 
2637     if (ProfileExceptionHandlers && trap_mdo != nullptr) {
2638       BitData* exception_handler_data = trap_mdo->exception_handler_bci_to_data_or_null(trap_bci);
2639       if (exception_handler_data != nullptr) {
2640         // uncommon trap at the start of an exception handler.
2641         // C2 generates these for un-entered exception handlers.
2642         // mark the handler as entered to avoid generating
2643         // another uncommon trap the next time the handler is compiled
2644         exception_handler_data->set_exception_handler_entered();
2645       }
2646     }
2647 
2648   } // Free marked resources
2649 
2650 }
2651 JRT_END
2652 
2653 ProfileData*
2654 Deoptimization::query_update_method_data(MethodData* trap_mdo,
2655                                          int trap_bci,
2656                                          Deoptimization::DeoptReason reason,
2657                                          bool update_total_trap_count,
2658 #if INCLUDE_JVMCI
2659                                          bool is_osr,
2660 #endif
2661                                          Method* compiled_method,
2662                                          //outputs:
2663                                          uint& ret_this_trap_count,
2664                                          bool& ret_maybe_prior_trap,
2665                                          bool& ret_maybe_prior_recompile) {
2666   trap_mdo->check_extra_data_locked();
2667 
2668   bool maybe_prior_trap = false;
2669   bool maybe_prior_recompile = false;
2670   uint this_trap_count = 0;
2671   if (update_total_trap_count) {
2672     uint idx = reason;
2673 #if INCLUDE_JVMCI
2674     if (is_osr) {
2675       // Upper half of history array used for traps in OSR compilations
2676       idx += Reason_TRAP_HISTORY_LENGTH;
2677     }
2678 #endif
2679     uint prior_trap_count = trap_mdo->trap_count(idx);
2680     this_trap_count  = trap_mdo->inc_trap_count(idx);
2681 
2682     // If the runtime cannot find a place to store trap history,
2683     // it is estimated based on the general condition of the method.
2684     // If the method has ever been recompiled, or has ever incurred
2685     // a trap with the present reason , then this BCI is assumed
2686     // (pessimistically) to be the culprit.
2687     maybe_prior_trap      = (prior_trap_count != 0);
2688     maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
2689   }
2690   ProfileData* pdata = nullptr;
2691 
2692 
2693   // For reasons which are recorded per bytecode, we check per-BCI data.
2694   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2695   assert(per_bc_reason != Reason_none || update_total_trap_count, "must be");
2696   if (per_bc_reason != Reason_none) {
2697     // Find the profile data for this BCI.  If there isn't one,
2698     // try to allocate one from the MDO's set of spares.
2699     // This will let us detect a repeated trap at this point.
2700     pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : nullptr);
2701 
2702     if (pdata != nullptr) {
2703       if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
2704         if (LogCompilation && xtty != nullptr) {
2705           ttyLocker ttyl;
2706           // no more room for speculative traps in this MDO
2707           xtty->elem("speculative_traps_oom");
2708         }
2709       }
2710       // Query the trap state of this profile datum.
2711       int tstate0 = pdata->trap_state();
2712       if (!trap_state_has_reason(tstate0, per_bc_reason))
2713         maybe_prior_trap = false;
2714       if (!trap_state_is_recompiled(tstate0))
2715         maybe_prior_recompile = false;
2716 
2717       // Update the trap state of this profile datum.
2718       int tstate1 = tstate0;
2719       // Record the reason.
2720       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
2721       // Store the updated state on the MDO, for next time.
2722       if (tstate1 != tstate0)
2723         pdata->set_trap_state(tstate1);
2724     } else {
2725       if (LogCompilation && xtty != nullptr) {
2726         ttyLocker ttyl;
2727         // Missing MDP?  Leave a small complaint in the log.
2728         xtty->elem("missing_mdp bci='%d'", trap_bci);
2729       }
2730     }
2731   }
2732 
2733   // Return results:
2734   ret_this_trap_count = this_trap_count;
2735   ret_maybe_prior_trap = maybe_prior_trap;
2736   ret_maybe_prior_recompile = maybe_prior_recompile;
2737   return pdata;
2738 }
2739 
2740 void
2741 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2742   ResourceMark rm;
2743   // Ignored outputs:
2744   uint ignore_this_trap_count;
2745   bool ignore_maybe_prior_trap;
2746   bool ignore_maybe_prior_recompile;
2747   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2748   // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2749   bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
2750 
2751   // Lock to read ProfileData, and ensure lock is not broken by a safepoint
2752   MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
2753 
2754   query_update_method_data(trap_mdo, trap_bci,
2755                            (DeoptReason)reason,
2756                            update_total_counts,
2757 #if INCLUDE_JVMCI
2758                            false,
2759 #endif
2760                            nullptr,
2761                            ignore_this_trap_count,
2762                            ignore_maybe_prior_trap,
2763                            ignore_maybe_prior_recompile);
2764 }
2765 
2766 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* current, jint trap_request, jint exec_mode) {
2767   // Enable WXWrite: current function is called from methods compiled by C2 directly
2768   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
2769 
2770   // Still in Java no safepoints
2771   {
2772     // This enters VM and may safepoint
2773     uncommon_trap_inner(current, trap_request);
2774   }
2775   HandleMark hm(current);
2776   return fetch_unroll_info_helper(current, exec_mode);
2777 }
2778 
2779 // Local derived constants.
2780 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2781 const int DS_REASON_MASK   = ((uint)DataLayout::trap_mask) >> 1;
2782 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2783 
2784 //---------------------------trap_state_reason---------------------------------
2785 Deoptimization::DeoptReason
2786 Deoptimization::trap_state_reason(int trap_state) {
2787   // This assert provides the link between the width of DataLayout::trap_bits
2788   // and the encoding of "recorded" reasons.  It ensures there are enough
2789   // bits to store all needed reasons in the per-BCI MDO profile.
2790   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2791   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2792   trap_state -= recompile_bit;
2793   if (trap_state == DS_REASON_MASK) {
2794     return Reason_many;
2795   } else {
2796     assert((int)Reason_none == 0, "state=0 => Reason_none");
2797     return (DeoptReason)trap_state;
2798   }
2799 }
2800 //-------------------------trap_state_has_reason-------------------------------
2801 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2802   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
2803   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2804   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2805   trap_state -= recompile_bit;
2806   if (trap_state == DS_REASON_MASK) {
2807     return -1;  // true, unspecifically (bottom of state lattice)
2808   } else if (trap_state == reason) {
2809     return 1;   // true, definitely
2810   } else if (trap_state == 0) {
2811     return 0;   // false, definitely (top of state lattice)
2812   } else {
2813     return 0;   // false, definitely
2814   }
2815 }
2816 //-------------------------trap_state_add_reason-------------------------------
2817 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
2818   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
2819   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2820   trap_state -= recompile_bit;
2821   if (trap_state == DS_REASON_MASK) {
2822     return trap_state + recompile_bit;     // already at state lattice bottom
2823   } else if (trap_state == reason) {
2824     return trap_state + recompile_bit;     // the condition is already true
2825   } else if (trap_state == 0) {
2826     return reason + recompile_bit;          // no condition has yet been true
2827   } else {
2828     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
2829   }
2830 }
2831 //-----------------------trap_state_is_recompiled------------------------------
2832 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2833   return (trap_state & DS_RECOMPILE_BIT) != 0;
2834 }
2835 //-----------------------trap_state_set_recompiled-----------------------------
2836 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
2837   if (z)  return trap_state |  DS_RECOMPILE_BIT;
2838   else    return trap_state & ~DS_RECOMPILE_BIT;
2839 }
2840 //---------------------------format_trap_state---------------------------------
2841 // This is used for debugging and diagnostics, including LogFile output.
2842 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2843                                               int trap_state) {
2844   assert(buflen > 0, "sanity");
2845   DeoptReason reason      = trap_state_reason(trap_state);
2846   bool        recomp_flag = trap_state_is_recompiled(trap_state);
2847   // Re-encode the state from its decoded components.
2848   int decoded_state = 0;
2849   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
2850     decoded_state = trap_state_add_reason(decoded_state, reason);
2851   if (recomp_flag)
2852     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
2853   // If the state re-encodes properly, format it symbolically.
2854   // Because this routine is used for debugging and diagnostics,
2855   // be robust even if the state is a strange value.
2856   size_t len;
2857   if (decoded_state != trap_state) {
2858     // Random buggy state that doesn't decode??
2859     len = jio_snprintf(buf, buflen, "#%d", trap_state);
2860   } else {
2861     len = jio_snprintf(buf, buflen, "%s%s",
2862                        trap_reason_name(reason),
2863                        recomp_flag ? " recompiled" : "");
2864   }
2865   return buf;
2866 }
2867 
2868 
2869 //--------------------------------statics--------------------------------------
2870 const char* Deoptimization::_trap_reason_name[] = {
2871   // Note:  Keep this in sync. with enum DeoptReason.
2872   "none",
2873   "null_check",
2874   "null_assert" JVMCI_ONLY("_or_unreached0"),
2875   "range_check",
2876   "class_check",
2877   "array_check",
2878   "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"),
2879   "bimorphic" JVMCI_ONLY("_or_optimized_type_check"),
2880   "profile_predicate",
2881   "auto_vectorization_check",
2882   "unloaded",
2883   "uninitialized",
2884   "initialized",
2885   "unreached",
2886   "unhandled",
2887   "constraint",
2888   "div0_check",
2889   "age",
2890   "predicate",
2891   "loop_limit_check",
2892   "speculate_class_check",
2893   "speculate_null_check",
2894   "speculate_null_assert",
2895   "unstable_if",
2896   "unstable_fused_if",
2897   "receiver_constraint",
2898   "not_compiled_exception_handler",
2899   "short_running_loop" JVMCI_ONLY("_or_aliasing"),
2900 #if INCLUDE_JVMCI
2901   "transfer_to_interpreter",
2902   "unresolved",
2903   "jsr_mismatch",
2904 #endif
2905   "tenured"
2906 };
2907 const char* Deoptimization::_trap_action_name[] = {
2908   // Note:  Keep this in sync. with enum DeoptAction.
2909   "none",
2910   "maybe_recompile",
2911   "reinterpret",
2912   "make_not_entrant",
2913   "make_not_compilable"
2914 };
2915 
2916 const char* Deoptimization::trap_reason_name(int reason) {
2917   // Check that every reason has a name
2918   STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
2919 
2920   if (reason == Reason_many)  return "many";
2921   if ((uint)reason < Reason_LIMIT)
2922     return _trap_reason_name[reason];
2923   static char buf[20];
2924   os::snprintf_checked(buf, sizeof(buf), "reason%d", reason);
2925   return buf;
2926 }
2927 const char* Deoptimization::trap_action_name(int action) {
2928   // Check that every action has a name
2929   STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
2930 
2931   if ((uint)action < Action_LIMIT)
2932     return _trap_action_name[action];
2933   static char buf[20];
2934   os::snprintf_checked(buf, sizeof(buf), "action%d", action);
2935   return buf;
2936 }
2937 
2938 // This is used for debugging and diagnostics, including LogFile output.
2939 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
2940                                                 int trap_request) {
2941   jint unloaded_class_index = trap_request_index(trap_request);
2942   const char* reason = trap_reason_name(trap_request_reason(trap_request));
2943   const char* action = trap_action_name(trap_request_action(trap_request));
2944 #if INCLUDE_JVMCI
2945   int debug_id = trap_request_debug_id(trap_request);
2946 #endif
2947   size_t len;
2948   if (unloaded_class_index < 0) {
2949     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2950                        reason, action
2951 #if INCLUDE_JVMCI
2952                        ,debug_id
2953 #endif
2954                        );
2955   } else {
2956     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2957                        reason, action, unloaded_class_index
2958 #if INCLUDE_JVMCI
2959                        ,debug_id
2960 #endif
2961                        );
2962   }
2963   return buf;
2964 }
2965 
2966 juint Deoptimization::_deoptimization_hist
2967         [Deoptimization::Reason_LIMIT]
2968     [1 + Deoptimization::Action_LIMIT]
2969         [Deoptimization::BC_CASE_LIMIT]
2970   = {0};
2971 
2972 enum {
2973   LSB_BITS = 8,
2974   LSB_MASK = right_n_bits(LSB_BITS)
2975 };
2976 
2977 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2978                                        Bytecodes::Code bc) {
2979   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2980   assert(action >= 0 && action < Action_LIMIT, "oob");
2981   _deoptimization_hist[Reason_none][0][0] += 1;  // total
2982   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
2983   juint* cases = _deoptimization_hist[reason][1+action];
2984   juint* bc_counter_addr = nullptr;
2985   juint  bc_counter      = 0;
2986   // Look for an unused counter, or an exact match to this BC.
2987   if (bc != Bytecodes::_illegal) {
2988     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2989       juint* counter_addr = &cases[bc_case];
2990       juint  counter = *counter_addr;
2991       if ((counter == 0 && bc_counter_addr == nullptr)
2992           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2993         // this counter is either free or is already devoted to this BC
2994         bc_counter_addr = counter_addr;
2995         bc_counter = counter | bc;
2996       }
2997     }
2998   }
2999   if (bc_counter_addr == nullptr) {
3000     // Overflow, or no given bytecode.
3001     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
3002     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
3003   }
3004   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
3005 }
3006 
3007 jint Deoptimization::total_deoptimization_count() {
3008   return _deoptimization_hist[Reason_none][0][0];
3009 }
3010 
3011 // Get the deopt count for a specific reason and a specific action. If either
3012 // one of 'reason' or 'action' is null, the method returns the sum of all
3013 // deoptimizations with the specific 'action' or 'reason' respectively.
3014 // If both arguments are null, the method returns the total deopt count.
3015 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
3016   if (reason_str == nullptr && action_str == nullptr) {
3017     return total_deoptimization_count();
3018   }
3019   juint counter = 0;
3020   for (int reason = 0; reason < Reason_LIMIT; reason++) {
3021     if (reason_str == nullptr || !strcmp(reason_str, trap_reason_name(reason))) {
3022       for (int action = 0; action < Action_LIMIT; action++) {
3023         if (action_str == nullptr || !strcmp(action_str, trap_action_name(action))) {
3024           juint* cases = _deoptimization_hist[reason][1+action];
3025           for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
3026             counter += cases[bc_case] >> LSB_BITS;
3027           }
3028         }
3029       }
3030     }
3031   }
3032   return counter;
3033 }
3034 
3035 void Deoptimization::print_statistics() {
3036   juint total = total_deoptimization_count();
3037   juint account = total;
3038   if (total != 0) {
3039     ttyLocker ttyl;
3040     if (xtty != nullptr)  xtty->head("statistics type='deoptimization'");
3041     tty->print_cr("Deoptimization traps recorded:");
3042     #define PRINT_STAT_LINE(name, r) \
3043       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
3044     PRINT_STAT_LINE("total", total);
3045     // For each non-zero entry in the histogram, print the reason,
3046     // the action, and (if specifically known) the type of bytecode.
3047     for (int reason = 0; reason < Reason_LIMIT; reason++) {
3048       for (int action = 0; action < Action_LIMIT; action++) {
3049         juint* cases = _deoptimization_hist[reason][1+action];
3050         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
3051           juint counter = cases[bc_case];
3052           if (counter != 0) {
3053             char name[1*K];
3054             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
3055             os::snprintf_checked(name, sizeof(name), "%s/%s/%s",
3056                     trap_reason_name(reason),
3057                     trap_action_name(action),
3058                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
3059             juint r = counter >> LSB_BITS;
3060             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
3061             account -= r;
3062           }
3063         }
3064       }
3065     }
3066     if (account != 0) {
3067       PRINT_STAT_LINE("unaccounted", account);
3068     }
3069     #undef PRINT_STAT_LINE
3070     if (xtty != nullptr)  xtty->tail("statistics");
3071   }
3072 }
3073 
3074 #else // COMPILER2_OR_JVMCI
3075 
3076 
3077 // Stubs for C1 only system.
3078 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
3079   return false;
3080 }
3081 
3082 const char* Deoptimization::trap_reason_name(int reason) {
3083   return "unknown";
3084 }
3085 
3086 jint Deoptimization::total_deoptimization_count() {
3087   return 0;
3088 }
3089 
3090 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
3091   return 0;
3092 }
3093 
3094 void Deoptimization::print_statistics() {
3095   // no output
3096 }
3097 
3098 void
3099 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
3100   // no update
3101 }
3102 
3103 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
3104   return 0;
3105 }
3106 
3107 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
3108                                        Bytecodes::Code bc) {
3109   // no update
3110 }
3111 
3112 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
3113                                               int trap_state) {
3114   jio_snprintf(buf, buflen, "#%d", trap_state);
3115   return buf;
3116 }
3117 
3118 #endif // COMPILER2_OR_JVMCI