1 /*
   2  * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/javaClasses.inline.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "classfile/vmClasses.hpp"
  31 #include "code/codeCache.hpp"
  32 #include "code/debugInfoRec.hpp"
  33 #include "code/nmethod.hpp"
  34 #include "code/pcDesc.hpp"
  35 #include "code/scopeDesc.hpp"
  36 #include "compiler/compilationPolicy.hpp"
  37 #include "compiler/compilerDefinitions.inline.hpp"
  38 #include "gc/shared/collectedHeap.hpp"
  39 #include "interpreter/bytecode.hpp"
  40 #include "interpreter/interpreter.hpp"
  41 #include "interpreter/oopMapCache.hpp"
  42 #include "logging/log.hpp"
  43 #include "logging/logLevel.hpp"
  44 #include "logging/logMessage.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/allocation.inline.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/constantPool.hpp"
  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "oops/method.hpp"
  53 #include "oops/objArrayKlass.hpp"
  54 #include "oops/objArrayOop.inline.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "oops/typeArrayOop.inline.hpp"
  57 #include "oops/verifyOopClosure.hpp"
  58 #include "prims/jvmtiDeferredUpdates.hpp"
  59 #include "prims/jvmtiExport.hpp"
  60 #include "prims/jvmtiThreadState.hpp"
  61 #include "prims/methodHandles.hpp"
  62 #include "prims/vectorSupport.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/continuation.hpp"
  65 #include "runtime/continuationEntry.inline.hpp"
  66 #include "runtime/deoptimization.hpp"
  67 #include "runtime/escapeBarrier.hpp"
  68 #include "runtime/fieldDescriptor.hpp"
  69 #include "runtime/fieldDescriptor.inline.hpp"
  70 #include "runtime/frame.inline.hpp"
  71 #include "runtime/handles.inline.hpp"
  72 #include "runtime/interfaceSupport.inline.hpp"
  73 #include "runtime/javaThread.hpp"
  74 #include "runtime/jniHandles.inline.hpp"
  75 #include "runtime/keepStackGCProcessed.hpp"
  76 #include "runtime/objectMonitor.inline.hpp"
  77 #include "runtime/osThread.hpp"
  78 #include "runtime/safepointVerifiers.hpp"
  79 #include "runtime/sharedRuntime.hpp"
  80 #include "runtime/signature.hpp"
  81 #include "runtime/stackFrameStream.inline.hpp"
  82 #include "runtime/stackValue.hpp"
  83 #include "runtime/stackWatermarkSet.hpp"
  84 #include "runtime/stubRoutines.hpp"
  85 #include "runtime/threadSMR.hpp"
  86 #include "runtime/threadWXSetters.inline.hpp"
  87 #include "runtime/vframe.hpp"
  88 #include "runtime/vframeArray.hpp"
  89 #include "runtime/vframe_hp.hpp"
  90 #include "runtime/vmOperations.hpp"
  91 #include "utilities/events.hpp"
  92 #include "utilities/growableArray.hpp"
  93 #include "utilities/macros.hpp"
  94 #include "utilities/preserveException.hpp"
  95 #include "utilities/xmlstream.hpp"
  96 #if INCLUDE_JFR
  97 #include "jfr/jfrEvents.hpp"
  98 #include "jfr/metadata/jfrSerializer.hpp"
  99 #endif
 100 
 101 bool DeoptimizationMarker::_is_active = false;
 102 
 103 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
 104                                          int  caller_adjustment,
 105                                          int  caller_actual_parameters,
 106                                          int  number_of_frames,
 107                                          intptr_t* frame_sizes,
 108                                          address* frame_pcs,
 109                                          BasicType return_type,
 110                                          int exec_mode) {
 111   _size_of_deoptimized_frame = size_of_deoptimized_frame;
 112   _caller_adjustment         = caller_adjustment;
 113   _caller_actual_parameters  = caller_actual_parameters;
 114   _number_of_frames          = number_of_frames;
 115   _frame_sizes               = frame_sizes;
 116   _frame_pcs                 = frame_pcs;
 117   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
 118   _return_type               = return_type;
 119   _initial_info              = 0;
 120   // PD (x86 only)
 121   _counter_temp              = 0;
 122   _unpack_kind               = exec_mode;
 123   _sender_sp_temp            = 0;
 124 
 125   _total_frame_sizes         = size_of_frames();
 126   assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode");
 127 }
 128 
 129 
 130 Deoptimization::UnrollBlock::~UnrollBlock() {
 131   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
 132   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
 133   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
 134 }
 135 
 136 
 137 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
 138   assert(register_number < RegisterMap::reg_count, "checking register number");
 139   return &_register_block[register_number * 2];
 140 }
 141 
 142 
 143 
 144 int Deoptimization::UnrollBlock::size_of_frames() const {
 145   // Account first for the adjustment of the initial frame
 146   int result = _caller_adjustment;
 147   for (int index = 0; index < number_of_frames(); index++) {
 148     result += frame_sizes()[index];
 149   }
 150   return result;
 151 }
 152 
 153 
 154 void Deoptimization::UnrollBlock::print() {
 155   ResourceMark rm;
 156   stringStream st;
 157   st.print_cr("UnrollBlock");
 158   st.print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
 159   st.print(   "  frame_sizes: ");
 160   for (int index = 0; index < number_of_frames(); index++) {
 161     st.print(INTX_FORMAT " ", frame_sizes()[index]);
 162   }
 163   st.cr();
 164   tty->print_raw(st.freeze());
 165 }
 166 
 167 
 168 // In order to make fetch_unroll_info work properly with escape
 169 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY.
 170 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
 171 // which is called from the method fetch_unroll_info_helper below.
 172 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
 173   // fetch_unroll_info() is called at the beginning of the deoptimization
 174   // handler. Note this fact before we start generating temporary frames
 175   // that can confuse an asynchronous stack walker. This counter is
 176   // decremented at the end of unpack_frames().
 177   current->inc_in_deopt_handler();
 178 
 179   if (exec_mode == Unpack_exception) {
 180     // When we get here, a callee has thrown an exception into a deoptimized
 181     // frame. That throw might have deferred stack watermark checking until
 182     // after unwinding. So we deal with such deferred requests here.
 183     StackWatermarkSet::after_unwind(current);
 184   }
 185 
 186   return fetch_unroll_info_helper(current, exec_mode);
 187 JRT_END
 188 
 189 #if COMPILER2_OR_JVMCI
 190 // print information about reallocated objects
 191 static void print_objects(JavaThread* deoptee_thread,
 192                           GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
 193   ResourceMark rm;
 194   stringStream st;  // change to logStream with logging
 195   st.print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(deoptee_thread));
 196   fieldDescriptor fd;
 197 
 198   for (int i = 0; i < objects->length(); i++) {
 199     ObjectValue* sv = (ObjectValue*) objects->at(i);
 200     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
 201     Handle obj = sv->value();
 202 
 203     st.print("     object <" INTPTR_FORMAT "> of type ", p2i(sv->value()()));
 204     k->print_value_on(&st);
 205     assert(obj.not_null() || realloc_failures, "reallocation was missed");
 206     if (obj.is_null()) {
 207       st.print(" allocation failed");
 208     } else {
 209       st.print(" allocated (" SIZE_FORMAT " bytes)", obj->size() * HeapWordSize);
 210     }
 211     st.cr();
 212 
 213     if (Verbose && !obj.is_null()) {
 214       k->oop_print_on(obj(), &st);
 215     }
 216   }
 217   tty->print_raw(st.freeze());
 218 }
 219 
 220 static bool rematerialize_objects(JavaThread* thread, int exec_mode, CompiledMethod* compiled_method,
 221                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 222                                   bool& deoptimized_objects) {
 223   bool realloc_failures = false;
 224   assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
 225 
 226   JavaThread* deoptee_thread = chunk->at(0)->thread();
 227   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 228          "a frame can only be deoptimized by the owner thread");
 229 
 230   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
 231 
 232   // The flag return_oop() indicates call sites which return oop
 233   // in compiled code. Such sites include java method calls,
 234   // runtime calls (for example, used to allocate new objects/arrays
 235   // on slow code path) and any other calls generated in compiled code.
 236   // It is not guaranteed that we can get such information here only
 237   // by analyzing bytecode in deoptimized frames. This is why this flag
 238   // is set during method compilation (see Compile::Process_OopMap_Node()).
 239   // If the previous frame was popped or if we are dispatching an exception,
 240   // we don't have an oop result.
 241   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 242   Handle return_value;
 243   if (save_oop_result) {
 244     // Reallocation may trigger GC. If deoptimization happened on return from
 245     // call which returns oop we need to save it since it is not in oopmap.
 246     oop result = deoptee.saved_oop_result(&map);
 247     assert(oopDesc::is_oop_or_null(result), "must be oop");
 248     return_value = Handle(thread, result);
 249     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 250     if (TraceDeoptimization) {
 251       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 252       tty->cr();
 253     }
 254   }
 255   if (objects != NULL) {
 256     if (exec_mode == Deoptimization::Unpack_none) {
 257       assert(thread->thread_state() == _thread_in_vm, "assumption");
 258       JavaThread* THREAD = thread; // For exception macros.
 259       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 260       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 261       deoptimized_objects = true;
 262     } else {
 263       JavaThread* current = thread; // For JRT_BLOCK
 264       JRT_BLOCK
 265       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 266       JRT_END
 267     }
 268     bool skip_internal = (compiled_method != NULL) && !compiled_method->is_compiled_by_jvmci();
 269     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
 270     if (TraceDeoptimization) {
 271       print_objects(deoptee_thread, objects, realloc_failures);
 272     }
 273   }
 274   if (save_oop_result) {
 275     // Restore result.
 276     deoptee.set_saved_oop_result(&map, return_value());
 277   }
 278   return realloc_failures;
 279 }
 280 
 281 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 282                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 283   JavaThread* deoptee_thread = chunk->at(0)->thread();
 284   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 285   assert(thread == Thread::current(), "should be");
 286   HandleMark hm(thread);
 287 #ifndef PRODUCT
 288   bool first = true;
 289 #endif // !PRODUCT
 290   for (int i = 0; i < chunk->length(); i++) {
 291     compiledVFrame* cvf = chunk->at(i);
 292     assert (cvf->scope() != NULL,"expect only compiled java frames");
 293     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 294     if (monitors->is_nonempty()) {
 295       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,
 296                                                      exec_mode, realloc_failures);
 297       deoptimized_objects = deoptimized_objects || relocked;
 298 #ifndef PRODUCT
 299       if (PrintDeoptimizationDetails) {
 300         ResourceMark rm;
 301         stringStream st;
 302         for (int j = 0; j < monitors->length(); j++) {
 303           MonitorInfo* mi = monitors->at(j);
 304           if (mi->eliminated()) {
 305             if (first) {
 306               first = false;
 307               st.print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread));
 308             }
 309             if (exec_mode == Deoptimization::Unpack_none) {
 310               ObjectMonitor* monitor = deoptee_thread->current_waiting_monitor();
 311               if (monitor != NULL && monitor->object() == mi->owner()) {
 312                 st.print_cr("     object <" INTPTR_FORMAT "> DEFERRED relocking after wait", p2i(mi->owner()));
 313                 continue;
 314               }
 315             }
 316             if (mi->owner_is_scalar_replaced()) {
 317               Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
 318               st.print_cr("     failed reallocation for klass %s", k->external_name());
 319             } else {
 320               st.print_cr("     object <" INTPTR_FORMAT "> locked", p2i(mi->owner()));
 321             }
 322           }
 323         }
 324         tty->print_raw(st.freeze());
 325       }
 326 #endif // !PRODUCT
 327     }
 328   }
 329 }
 330 
 331 // Deoptimize objects, that is reallocate and relock them, just before they escape through JVMTI.
 332 // The given vframes cover one physical frame.
 333 bool Deoptimization::deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk,
 334                                                  bool& realloc_failures) {
 335   frame deoptee = chunk->at(0)->fr();
 336   JavaThread* deoptee_thread = chunk->at(0)->thread();
 337   CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
 338   RegisterMap map(chunk->at(0)->register_map());
 339   bool deoptimized_objects = false;
 340 
 341   bool const jvmci_enabled = JVMCI_ONLY(UseJVMCICompiler) NOT_JVMCI(false);
 342 
 343   // Reallocate the non-escaping objects and restore their fields.
 344   if (jvmci_enabled COMPILER2_PRESENT(|| (DoEscapeAnalysis && EliminateAllocations)
 345                                       || EliminateAutoBox || EnableVectorAggressiveReboxing)) {
 346     realloc_failures = rematerialize_objects(thread, Unpack_none, cm, deoptee, map, chunk, deoptimized_objects);
 347   }
 348 
 349   // MonitorInfo structures used in eliminate_locks are not GC safe.
 350   NoSafepointVerifier no_safepoint;
 351 
 352   // Now relock objects if synchronization on them was eliminated.
 353   if (jvmci_enabled COMPILER2_PRESENT(|| ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks))) {
 354     restore_eliminated_locks(thread, chunk, realloc_failures, deoptee, Unpack_none, deoptimized_objects);
 355   }
 356   return deoptimized_objects;
 357 }
 358 #endif // COMPILER2_OR_JVMCI
 359 
 360 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
 361 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* current, int exec_mode) {
 362   // When we get here we are about to unwind the deoptee frame. In order to
 363   // catch not yet safe to use frames, the following stack watermark barrier
 364   // poll will make such frames safe to use.
 365   StackWatermarkSet::before_unwind(current);
 366 
 367   // Note: there is a safepoint safety issue here. No matter whether we enter
 368   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
 369   // the vframeArray is created.
 370   //
 371 
 372   // Allocate our special deoptimization ResourceMark
 373   DeoptResourceMark* dmark = new DeoptResourceMark(current);
 374   assert(current->deopt_mark() == NULL, "Pending deopt!");
 375   current->set_deopt_mark(dmark);
 376 
 377   frame stub_frame = current->last_frame(); // Makes stack walkable as side effect
 378   RegisterMap map(current,
 379                   RegisterMap::UpdateMap::include,
 380                   RegisterMap::ProcessFrames::include,
 381                   RegisterMap::WalkContinuation::skip);
 382   RegisterMap dummy_map(current,
 383                         RegisterMap::UpdateMap::skip,
 384                         RegisterMap::ProcessFrames::include,
 385                         RegisterMap::WalkContinuation::skip);
 386   // Now get the deoptee with a valid map
 387   frame deoptee = stub_frame.sender(&map);
 388   // Set the deoptee nmethod
 389   assert(current->deopt_compiled_method() == NULL, "Pending deopt!");
 390   CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
 391   current->set_deopt_compiled_method(cm);
 392 
 393   if (VerifyStack) {
 394     current->validate_frame_layout();
 395   }
 396 
 397   // Create a growable array of VFrames where each VFrame represents an inlined
 398   // Java frame.  This storage is allocated with the usual system arena.
 399   assert(deoptee.is_compiled_frame(), "Wrong frame type");
 400   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
 401   vframe* vf = vframe::new_vframe(&deoptee, &map, current);
 402   while (!vf->is_top()) {
 403     assert(vf->is_compiled_frame(), "Wrong frame type");
 404     chunk->push(compiledVFrame::cast(vf));
 405     vf = vf->sender();
 406   }
 407   assert(vf->is_compiled_frame(), "Wrong frame type");
 408   chunk->push(compiledVFrame::cast(vf));
 409 
 410   bool realloc_failures = false;
 411 
 412 #if COMPILER2_OR_JVMCI
 413   bool const jvmci_enabled = JVMCI_ONLY(EnableJVMCI) NOT_JVMCI(false);
 414 
 415   // Reallocate the non-escaping objects and restore their fields. Then
 416   // relock objects if synchronization on them was eliminated.
 417   if (jvmci_enabled COMPILER2_PRESENT( || (DoEscapeAnalysis && EliminateAllocations)
 418                                        || EliminateAutoBox || EnableVectorAggressiveReboxing )) {
 419     bool unused;
 420     realloc_failures = rematerialize_objects(current, exec_mode, cm, deoptee, map, chunk, unused);
 421   }
 422 #endif // COMPILER2_OR_JVMCI
 423 
 424   // Ensure that no safepoint is taken after pointers have been stored
 425   // in fields of rematerialized objects.  If a safepoint occurs from here on
 426   // out the java state residing in the vframeArray will be missed.
 427   // Locks may be rebaised in a safepoint.
 428   NoSafepointVerifier no_safepoint;
 429 
 430 #if COMPILER2_OR_JVMCI
 431   if ((jvmci_enabled COMPILER2_PRESENT( || ((DoEscapeAnalysis || EliminateNestedLocks) && EliminateLocks) ))
 432       && !EscapeBarrier::objs_are_deoptimized(current, deoptee.id())) {
 433     bool unused;
 434     restore_eliminated_locks(current, chunk, realloc_failures, deoptee, exec_mode, unused);
 435   }
 436 #endif // COMPILER2_OR_JVMCI
 437 
 438   ScopeDesc* trap_scope = chunk->at(0)->scope();
 439   Handle exceptionObject;
 440   if (trap_scope->rethrow_exception()) {
 441 #ifndef PRODUCT
 442     if (PrintDeoptimizationDetails) {
 443       tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci());
 444     }
 445 #endif // !PRODUCT
 446 
 447     GrowableArray<ScopeValue*>* expressions = trap_scope->expressions();
 448     guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw");
 449     ScopeValue* topOfStack = expressions->top();
 450     exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj();
 451     guarantee(exceptionObject() != NULL, "exception oop can not be null");
 452   }
 453 
 454   vframeArray* array = create_vframeArray(current, deoptee, &map, chunk, realloc_failures);
 455 #if COMPILER2_OR_JVMCI
 456   if (realloc_failures) {
 457     // FIXME: This very crudely destroys all ExtentLocal bindings. This
 458     // is better than a bound value escaping, but far from ideal.
 459     oop java_thread = current->threadObj();
 460     current->set_extentLocalCache(NULL);
 461     java_lang_Thread::clear_extentLocalBindings(java_thread);
 462     pop_frames_failed_reallocs(current, array);
 463   }
 464 #endif
 465 
 466   assert(current->vframe_array_head() == NULL, "Pending deopt!");
 467   current->set_vframe_array_head(array);
 468 
 469   // Now that the vframeArray has been created if we have any deferred local writes
 470   // added by jvmti then we can free up that structure as the data is now in the
 471   // vframeArray
 472 
 473   JvmtiDeferredUpdates::delete_updates_for_frame(current, array->original().id());
 474 
 475   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
 476   CodeBlob* cb = stub_frame.cb();
 477   // Verify we have the right vframeArray
 478   assert(cb->frame_size() >= 0, "Unexpected frame size");
 479   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
 480 
 481   // If the deopt call site is a MethodHandle invoke call site we have
 482   // to adjust the unpack_sp.
 483   nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
 484   if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
 485     unpack_sp = deoptee.unextended_sp();
 486 
 487 #ifdef ASSERT
 488   assert(cb->is_deoptimization_stub() ||
 489          cb->is_uncommon_trap_stub() ||
 490          strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 ||
 491          strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0,
 492          "unexpected code blob: %s", cb->name());
 493 #endif
 494 
 495   // This is a guarantee instead of an assert because if vframe doesn't match
 496   // we will unpack the wrong deoptimized frame and wind up in strange places
 497   // where it will be very difficult to figure out what went wrong. Better
 498   // to die an early death here than some very obscure death later when the
 499   // trail is cold.
 500   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
 501   // in that it will fail to detect a problem when there is one. This needs
 502   // more work in tiger timeframe.
 503   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
 504 
 505   int number_of_frames = array->frames();
 506 
 507   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
 508   // virtual activation, which is the reverse of the elements in the vframes array.
 509   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
 510   // +1 because we always have an interpreter return address for the final slot.
 511   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
 512   int popframe_extra_args = 0;
 513   // Create an interpreter return address for the stub to use as its return
 514   // address so the skeletal frames are perfectly walkable
 515   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
 516 
 517   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
 518   // activation be put back on the expression stack of the caller for reexecution
 519   if (JvmtiExport::can_pop_frame() && current->popframe_forcing_deopt_reexecution()) {
 520     popframe_extra_args = in_words(current->popframe_preserved_args_size_in_words());
 521   }
 522 
 523   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
 524   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
 525   // than simply use array->sender.pc(). This requires us to walk the current set of frames
 526   //
 527   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
 528   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
 529 
 530   // It's possible that the number of parameters at the call site is
 531   // different than number of arguments in the callee when method
 532   // handles are used.  If the caller is interpreted get the real
 533   // value so that the proper amount of space can be added to it's
 534   // frame.
 535   bool caller_was_method_handle = false;
 536   if (deopt_sender.is_interpreted_frame()) {
 537     methodHandle method(current, deopt_sender.interpreter_frame_method());
 538     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
 539     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
 540       // Method handle invokes may involve fairly arbitrary chains of
 541       // calls so it's impossible to know how much actual space the
 542       // caller has for locals.
 543       caller_was_method_handle = true;
 544     }
 545   }
 546 
 547   //
 548   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
 549   // frame_sizes/frame_pcs[1] next oldest frame (int)
 550   // frame_sizes/frame_pcs[n] youngest frame (int)
 551   //
 552   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
 553   // owns the space for the return address to it's caller).  Confusing ain't it.
 554   //
 555   // The vframe array can address vframes with indices running from
 556   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
 557   // When we create the skeletal frames we need the oldest frame to be in the zero slot
 558   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
 559   // so things look a little strange in this loop.
 560   //
 561   int callee_parameters = 0;
 562   int callee_locals = 0;
 563   for (int index = 0; index < array->frames(); index++ ) {
 564     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
 565     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
 566     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
 567     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
 568                                                                                                     callee_locals,
 569                                                                                                     index == 0,
 570                                                                                                     popframe_extra_args);
 571     // This pc doesn't have to be perfect just good enough to identify the frame
 572     // as interpreted so the skeleton frame will be walkable
 573     // The correct pc will be set when the skeleton frame is completely filled out
 574     // The final pc we store in the loop is wrong and will be overwritten below
 575     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
 576 
 577     callee_parameters = array->element(index)->method()->size_of_parameters();
 578     callee_locals = array->element(index)->method()->max_locals();
 579     popframe_extra_args = 0;
 580   }
 581 
 582   // Compute whether the root vframe returns a float or double value.
 583   BasicType return_type;
 584   {
 585     methodHandle method(current, array->element(0)->method());
 586     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
 587     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
 588   }
 589 
 590   // Compute information for handling adapters and adjusting the frame size of the caller.
 591   int caller_adjustment = 0;
 592 
 593   // Compute the amount the oldest interpreter frame will have to adjust
 594   // its caller's stack by. If the caller is a compiled frame then
 595   // we pretend that the callee has no parameters so that the
 596   // extension counts for the full amount of locals and not just
 597   // locals-parms. This is because without a c2i adapter the parm
 598   // area as created by the compiled frame will not be usable by
 599   // the interpreter. (Depending on the calling convention there
 600   // may not even be enough space).
 601 
 602   // QQQ I'd rather see this pushed down into last_frame_adjust
 603   // and have it take the sender (aka caller).
 604 
 605   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 606     caller_adjustment = last_frame_adjust(0, callee_locals);
 607   } else if (callee_locals > callee_parameters) {
 608     // The caller frame may need extending to accommodate
 609     // non-parameter locals of the first unpacked interpreted frame.
 610     // Compute that adjustment.
 611     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 612   }
 613 
 614   // If the sender is deoptimized the we must retrieve the address of the handler
 615   // since the frame will "magically" show the original pc before the deopt
 616   // and we'd undo the deopt.
 617 
 618   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 619   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 620     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 621   }
 622 
 623   assert(CodeCache::find_blob(frame_pcs[0]) != NULL, "bad pc");
 624 
 625 #if INCLUDE_JVMCI
 626   if (exceptionObject() != NULL) {
 627     current->set_exception_oop(exceptionObject());
 628     exec_mode = Unpack_exception;
 629   }
 630 #endif
 631 
 632   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 633     assert(current->has_pending_exception(), "should have thrown OOME");
 634     current->set_exception_oop(current->pending_exception());
 635     current->clear_pending_exception();
 636     exec_mode = Unpack_exception;
 637   }
 638 
 639 #if INCLUDE_JVMCI
 640   if (current->frames_to_pop_failed_realloc() > 0) {
 641     current->set_pending_monitorenter(false);
 642   }
 643 #endif
 644 
 645   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
 646                                       caller_adjustment * BytesPerWord,
 647                                       caller_was_method_handle ? 0 : callee_parameters,
 648                                       number_of_frames,
 649                                       frame_sizes,
 650                                       frame_pcs,
 651                                       return_type,
 652                                       exec_mode);
 653   // On some platforms, we need a way to pass some platform dependent
 654   // information to the unpacking code so the skeletal frames come out
 655   // correct (initial fp value, unextended sp, ...)
 656   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
 657 
 658   if (array->frames() > 1) {
 659     if (VerifyStack && TraceDeoptimization) {
 660       tty->print_cr("Deoptimizing method containing inlining");
 661     }
 662   }
 663 
 664   array->set_unroll_block(info);
 665   return info;
 666 }
 667 
 668 // Called to cleanup deoptimization data structures in normal case
 669 // after unpacking to stack and when stack overflow error occurs
 670 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
 671                                         vframeArray *array) {
 672 
 673   // Get array if coming from exception
 674   if (array == NULL) {
 675     array = thread->vframe_array_head();
 676   }
 677   thread->set_vframe_array_head(NULL);
 678 
 679   // Free the previous UnrollBlock
 680   vframeArray* old_array = thread->vframe_array_last();
 681   thread->set_vframe_array_last(array);
 682 
 683   if (old_array != NULL) {
 684     UnrollBlock* old_info = old_array->unroll_block();
 685     old_array->set_unroll_block(NULL);
 686     delete old_info;
 687     delete old_array;
 688   }
 689 
 690   // Deallocate any resource creating in this routine and any ResourceObjs allocated
 691   // inside the vframeArray (StackValueCollections)
 692 
 693   delete thread->deopt_mark();
 694   thread->set_deopt_mark(NULL);
 695   thread->set_deopt_compiled_method(NULL);
 696 
 697 
 698   if (JvmtiExport::can_pop_frame()) {
 699     // Regardless of whether we entered this routine with the pending
 700     // popframe condition bit set, we should always clear it now
 701     thread->clear_popframe_condition();
 702   }
 703 
 704   // unpack_frames() is called at the end of the deoptimization handler
 705   // and (in C2) at the end of the uncommon trap handler. Note this fact
 706   // so that an asynchronous stack walker can work again. This counter is
 707   // incremented at the beginning of fetch_unroll_info() and (in C2) at
 708   // the beginning of uncommon_trap().
 709   thread->dec_in_deopt_handler();
 710 }
 711 
 712 // Moved from cpu directories because none of the cpus has callee save values.
 713 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp.
 714 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
 715 
 716   // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
 717   // the days we had adapter frames. When we deoptimize a situation where a
 718   // compiled caller calls a compiled caller will have registers it expects
 719   // to survive the call to the callee. If we deoptimize the callee the only
 720   // way we can restore these registers is to have the oldest interpreter
 721   // frame that we create restore these values. That is what this routine
 722   // will accomplish.
 723 
 724   // At the moment we have modified c2 to not have any callee save registers
 725   // so this problem does not exist and this routine is just a place holder.
 726 
 727   assert(f->is_interpreted_frame(), "must be interpreted");
 728 }
 729 
 730 #ifndef PRODUCT
 731 static bool falls_through(Bytecodes::Code bc) {
 732   switch (bc) {
 733     // List may be incomplete.  Here we really only care about bytecodes where compiled code
 734     // can deoptimize.
 735     case Bytecodes::_goto:
 736     case Bytecodes::_goto_w:
 737     case Bytecodes::_athrow:
 738       return false;
 739     default:
 740       return true;
 741   }
 742 }
 743 #endif
 744 
 745 // Return BasicType of value being returned
 746 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
 747 
 748   // We are already active in the special DeoptResourceMark any ResourceObj's we
 749   // allocate will be freed at the end of the routine.
 750 
 751   // JRT_LEAF methods don't normally allocate handles and there is a
 752   // NoHandleMark to enforce that. It is actually safe to use Handles
 753   // in a JRT_LEAF method, and sometimes desirable, but to do so we
 754   // must use ResetNoHandleMark to bypass the NoHandleMark, and
 755   // then use a HandleMark to ensure any Handles we do create are
 756   // cleaned up in this scope.
 757   ResetNoHandleMark rnhm;
 758   HandleMark hm(thread);
 759 
 760   frame stub_frame = thread->last_frame();
 761 
 762   Continuation::notify_deopt(thread, stub_frame.sp());
 763 
 764   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
 765   // must point to the vframeArray for the unpack frame.
 766   vframeArray* array = thread->vframe_array_head();
 767   UnrollBlock* info = array->unroll_block();
 768 
 769   // We set the last_Java frame. But the stack isn't really parsable here. So we
 770   // clear it to make sure JFR understands not to try and walk stacks from events
 771   // in here.
 772   intptr_t* sp = thread->frame_anchor()->last_Java_sp();
 773   thread->frame_anchor()->set_last_Java_sp(NULL);
 774 
 775   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
 776   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
 777 
 778   thread->frame_anchor()->set_last_Java_sp(sp);
 779 
 780   BasicType bt = info->return_type();
 781 
 782   // If we have an exception pending, claim that the return type is an oop
 783   // so the deopt_blob does not overwrite the exception_oop.
 784 
 785   if (exec_mode == Unpack_exception)
 786     bt = T_OBJECT;
 787 
 788   // Cleanup thread deopt data
 789   cleanup_deopt_info(thread, array);
 790 
 791 #ifndef PRODUCT
 792   if (VerifyStack) {
 793     ResourceMark res_mark;
 794     // Clear pending exception to not break verification code (restored afterwards)
 795     PreserveExceptionMark pm(thread);
 796 
 797     thread->validate_frame_layout();
 798 
 799     // Verify that the just-unpacked frames match the interpreter's
 800     // notions of expression stack and locals
 801     vframeArray* cur_array = thread->vframe_array_last();
 802     RegisterMap rm(thread,
 803                    RegisterMap::UpdateMap::skip,
 804                    RegisterMap::ProcessFrames::include,
 805                    RegisterMap::WalkContinuation::skip);
 806     rm.set_include_argument_oops(false);
 807     bool is_top_frame = true;
 808     int callee_size_of_parameters = 0;
 809     int callee_max_locals = 0;
 810     for (int i = 0; i < cur_array->frames(); i++) {
 811       vframeArrayElement* el = cur_array->element(i);
 812       frame* iframe = el->iframe();
 813       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
 814 
 815       // Get the oop map for this bci
 816       InterpreterOopMap mask;
 817       int cur_invoke_parameter_size = 0;
 818       bool try_next_mask = false;
 819       int next_mask_expression_stack_size = -1;
 820       int top_frame_expression_stack_adjustment = 0;
 821       methodHandle mh(thread, iframe->interpreter_frame_method());
 822       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
 823       BytecodeStream str(mh, iframe->interpreter_frame_bci());
 824       int max_bci = mh->code_size();
 825       // Get to the next bytecode if possible
 826       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
 827       // Check to see if we can grab the number of outgoing arguments
 828       // at an uncommon trap for an invoke (where the compiler
 829       // generates debug info before the invoke has executed)
 830       Bytecodes::Code cur_code = str.next();
 831       Bytecodes::Code next_code = Bytecodes::_shouldnotreachhere;
 832       if (Bytecodes::is_invoke(cur_code)) {
 833         Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
 834         cur_invoke_parameter_size = invoke.size_of_parameters();
 835         if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
 836           callee_size_of_parameters++;
 837         }
 838       }
 839       if (str.bci() < max_bci) {
 840         next_code = str.next();
 841         if (next_code >= 0) {
 842           // The interpreter oop map generator reports results before
 843           // the current bytecode has executed except in the case of
 844           // calls. It seems to be hard to tell whether the compiler
 845           // has emitted debug information matching the "state before"
 846           // a given bytecode or the state after, so we try both
 847           if (!Bytecodes::is_invoke(cur_code) && falls_through(cur_code)) {
 848             // Get expression stack size for the next bytecode
 849             InterpreterOopMap next_mask;
 850             OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
 851             next_mask_expression_stack_size = next_mask.expression_stack_size();
 852             if (Bytecodes::is_invoke(next_code)) {
 853               Bytecode_invoke invoke(mh, str.bci());
 854               next_mask_expression_stack_size += invoke.size_of_parameters();
 855             }
 856             // Need to subtract off the size of the result type of
 857             // the bytecode because this is not described in the
 858             // debug info but returned to the interpreter in the TOS
 859             // caching register
 860             BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
 861             if (bytecode_result_type != T_ILLEGAL) {
 862               top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
 863             }
 864             assert(top_frame_expression_stack_adjustment >= 0, "stack adjustment must be positive");
 865             try_next_mask = true;
 866           }
 867         }
 868       }
 869 
 870       // Verify stack depth and oops in frame
 871       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
 872       if (!(
 873             /* SPARC */
 874             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
 875             /* x86 */
 876             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
 877             (try_next_mask &&
 878              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
 879                                                                     top_frame_expression_stack_adjustment))) ||
 880             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
 881             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
 882              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
 883             )) {
 884         {
 885           // Print out some information that will help us debug the problem
 886           tty->print_cr("Wrong number of expression stack elements during deoptimization");
 887           tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
 888           tty->print_cr("  Current code %s", Bytecodes::name(cur_code));
 889           if (try_next_mask) {
 890             tty->print_cr("  Next code %s", Bytecodes::name(next_code));
 891           }
 892           tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
 893                         iframe->interpreter_frame_expression_stack_size());
 894           tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
 895           tty->print_cr("  try_next_mask = %d", try_next_mask);
 896           tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
 897           tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
 898           tty->print_cr("  callee_max_locals = %d", callee_max_locals);
 899           tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
 900           tty->print_cr("  exec_mode = %d", exec_mode);
 901           tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
 902           tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id());
 903           tty->print_cr("  Interpreted frames:");
 904           for (int k = 0; k < cur_array->frames(); k++) {
 905             vframeArrayElement* el = cur_array->element(k);
 906             tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
 907           }
 908           cur_array->print_on_2(tty);
 909         }
 910         guarantee(false, "wrong number of expression stack elements during deopt");
 911       }
 912       VerifyOopClosure verify;
 913       iframe->oops_interpreted_do(&verify, &rm, false);
 914       callee_size_of_parameters = mh->size_of_parameters();
 915       callee_max_locals = mh->max_locals();
 916       is_top_frame = false;
 917     }
 918   }
 919 #endif // !PRODUCT
 920 
 921   return bt;
 922 JRT_END
 923 
 924 class DeoptimizeMarkedClosure : public HandshakeClosure {
 925  public:
 926   DeoptimizeMarkedClosure() : HandshakeClosure("Deoptimize") {}
 927   void do_thread(Thread* thread) {
 928     JavaThread* jt = JavaThread::cast(thread);
 929     jt->deoptimize_marked_methods();
 930   }
 931 };
 932 
 933 void Deoptimization::deoptimize_all_marked(nmethod* nmethod_only) {
 934   ResourceMark rm;
 935   DeoptimizationMarker dm;
 936 
 937   // Make the dependent methods not entrant
 938   if (nmethod_only != NULL) {
 939     nmethod_only->mark_for_deoptimization();
 940     nmethod_only->make_not_entrant();
 941     CodeCache::make_nmethod_deoptimized(nmethod_only);
 942   } else {
 943     CodeCache::make_marked_nmethods_deoptimized();
 944   }
 945 
 946   DeoptimizeMarkedClosure deopt;
 947   if (SafepointSynchronize::is_at_safepoint()) {
 948     Threads::java_threads_do(&deopt);
 949   } else {
 950     Handshake::execute(&deopt);
 951   }
 952 }
 953 
 954 Deoptimization::DeoptAction Deoptimization::_unloaded_action
 955   = Deoptimization::Action_reinterpret;
 956 
 957 #if INCLUDE_JVMCI
 958 template<typename CacheType>
 959 class BoxCacheBase : public CHeapObj<mtCompiler> {
 960 protected:
 961   static InstanceKlass* find_cache_klass(Thread* thread, Symbol* klass_name) {
 962     ResourceMark rm(thread);
 963     char* klass_name_str = klass_name->as_C_string();
 964     InstanceKlass* ik = SystemDictionary::find_instance_klass(thread, klass_name, Handle(), Handle());
 965     guarantee(ik != NULL, "%s must be loaded", klass_name_str);
 966     guarantee(ik->is_initialized(), "%s must be initialized", klass_name_str);
 967     CacheType::compute_offsets(ik);
 968     return ik;
 969   }
 970 };
 971 
 972 template<typename PrimitiveType, typename CacheType, typename BoxType> class BoxCache  : public BoxCacheBase<CacheType> {
 973   PrimitiveType _low;
 974   PrimitiveType _high;
 975   jobject _cache;
 976 protected:
 977   static BoxCache<PrimitiveType, CacheType, BoxType> *_singleton;
 978   BoxCache(Thread* thread) {
 979     InstanceKlass* ik = BoxCacheBase<CacheType>::find_cache_klass(thread, CacheType::symbol());
 980     objArrayOop cache = CacheType::cache(ik);
 981     assert(cache->length() > 0, "Empty cache");
 982     _low = BoxType::value(cache->obj_at(0));
 983     _high = _low + cache->length() - 1;
 984     _cache = JNIHandles::make_global(Handle(thread, cache));
 985   }
 986   ~BoxCache() {
 987     JNIHandles::destroy_global(_cache);
 988   }
 989 public:
 990   static BoxCache<PrimitiveType, CacheType, BoxType>* singleton(Thread* thread) {
 991     if (_singleton == NULL) {
 992       BoxCache<PrimitiveType, CacheType, BoxType>* s = new BoxCache<PrimitiveType, CacheType, BoxType>(thread);
 993       if (!Atomic::replace_if_null(&_singleton, s)) {
 994         delete s;
 995       }
 996     }
 997     return _singleton;
 998   }
 999   oop lookup(PrimitiveType value) {
1000     if (_low <= value && value <= _high) {
1001       int offset = value - _low;
1002       return objArrayOop(JNIHandles::resolve_non_null(_cache))->obj_at(offset);
1003     }
1004     return NULL;
1005   }
1006   oop lookup_raw(intptr_t raw_value) {
1007     // Have to cast to avoid little/big-endian problems.
1008     if (sizeof(PrimitiveType) > sizeof(jint)) {
1009       jlong value = (jlong)raw_value;
1010       return lookup(value);
1011     }
1012     PrimitiveType value = (PrimitiveType)*((jint*)&raw_value);
1013     return lookup(value);
1014   }
1015 };
1016 
1017 typedef BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer> IntegerBoxCache;
1018 typedef BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long> LongBoxCache;
1019 typedef BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character> CharacterBoxCache;
1020 typedef BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short> ShortBoxCache;
1021 typedef BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte> ByteBoxCache;
1022 
1023 template<> BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>* BoxCache<jint, java_lang_Integer_IntegerCache, java_lang_Integer>::_singleton = NULL;
1024 template<> BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>* BoxCache<jlong, java_lang_Long_LongCache, java_lang_Long>::_singleton = NULL;
1025 template<> BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>* BoxCache<jchar, java_lang_Character_CharacterCache, java_lang_Character>::_singleton = NULL;
1026 template<> BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>* BoxCache<jshort, java_lang_Short_ShortCache, java_lang_Short>::_singleton = NULL;
1027 template<> BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>* BoxCache<jbyte, java_lang_Byte_ByteCache, java_lang_Byte>::_singleton = NULL;
1028 
1029 class BooleanBoxCache : public BoxCacheBase<java_lang_Boolean> {
1030   jobject _true_cache;
1031   jobject _false_cache;
1032 protected:
1033   static BooleanBoxCache *_singleton;
1034   BooleanBoxCache(Thread *thread) {
1035     InstanceKlass* ik = find_cache_klass(thread, java_lang_Boolean::symbol());
1036     _true_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_TRUE(ik)));
1037     _false_cache = JNIHandles::make_global(Handle(thread, java_lang_Boolean::get_FALSE(ik)));
1038   }
1039   ~BooleanBoxCache() {
1040     JNIHandles::destroy_global(_true_cache);
1041     JNIHandles::destroy_global(_false_cache);
1042   }
1043 public:
1044   static BooleanBoxCache* singleton(Thread* thread) {
1045     if (_singleton == NULL) {
1046       BooleanBoxCache* s = new BooleanBoxCache(thread);
1047       if (!Atomic::replace_if_null(&_singleton, s)) {
1048         delete s;
1049       }
1050     }
1051     return _singleton;
1052   }
1053   oop lookup_raw(intptr_t raw_value) {
1054     // Have to cast to avoid little/big-endian problems.
1055     jboolean value = (jboolean)*((jint*)&raw_value);
1056     return lookup(value);
1057   }
1058   oop lookup(jboolean value) {
1059     if (value != 0) {
1060       return JNIHandles::resolve_non_null(_true_cache);
1061     }
1062     return JNIHandles::resolve_non_null(_false_cache);
1063   }
1064 };
1065 
1066 BooleanBoxCache* BooleanBoxCache::_singleton = NULL;
1067 
1068 oop Deoptimization::get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, TRAPS) {
1069    Klass* k = java_lang_Class::as_Klass(bv->klass()->as_ConstantOopReadValue()->value()());
1070    BasicType box_type = vmClasses::box_klass_type(k);
1071    if (box_type != T_OBJECT) {
1072      StackValue* value = StackValue::create_stack_value(fr, reg_map, bv->field_at(box_type == T_LONG ? 1 : 0));
1073      switch(box_type) {
1074        case T_INT:     return IntegerBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1075        case T_CHAR:    return CharacterBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1076        case T_SHORT:   return ShortBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1077        case T_BYTE:    return ByteBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1078        case T_BOOLEAN: return BooleanBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1079        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_int());
1080        default:;
1081      }
1082    }
1083    return NULL;
1084 }
1085 #endif // INCLUDE_JVMCI
1086 
1087 #if COMPILER2_OR_JVMCI
1088 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1089   Handle pending_exception(THREAD, thread->pending_exception());
1090   const char* exception_file = thread->exception_file();
1091   int exception_line = thread->exception_line();
1092   thread->clear_pending_exception();
1093 
1094   bool failures = false;
1095 
1096   for (int i = 0; i < objects->length(); i++) {
1097     assert(objects->at(i)->is_object(), "invalid debug information");
1098     ObjectValue* sv = (ObjectValue*) objects->at(i);
1099 
1100     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1101     oop obj = NULL;
1102 
1103     if (k->is_instance_klass()) {
1104 #if INCLUDE_JVMCI
1105       CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
1106       if (cm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1107         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1108         obj = get_cached_box(abv, fr, reg_map, THREAD);
1109         if (obj != NULL) {
1110           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1111           abv->set_cached(true);
1112         }
1113       }
1114 #endif // INCLUDE_JVMCI
1115 
1116       InstanceKlass* ik = InstanceKlass::cast(k);
1117       if (obj == NULL) {
1118 #ifdef COMPILER2
1119         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1120           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1121         } else {
1122           obj = ik->allocate_instance(THREAD);
1123         }
1124 #else
1125         obj = ik->allocate_instance(THREAD);
1126 #endif // COMPILER2
1127       }
1128     } else if (k->is_typeArray_klass()) {
1129       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1130       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1131       int len = sv->field_size() / type2size[ak->element_type()];
1132       obj = ak->allocate(len, THREAD);
1133     } else if (k->is_objArray_klass()) {
1134       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1135       obj = ak->allocate(sv->field_size(), THREAD);
1136     }
1137 
1138     if (obj == NULL) {
1139       failures = true;
1140     }
1141 
1142     assert(sv->value().is_null(), "redundant reallocation");
1143     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
1144     CLEAR_PENDING_EXCEPTION;
1145     sv->set_value(obj);
1146   }
1147 
1148   if (failures) {
1149     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1150   } else if (pending_exception.not_null()) {
1151     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1152   }
1153 
1154   return failures;
1155 }
1156 
1157 #if INCLUDE_JVMCI
1158 /**
1159  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1160  * we need to somehow be able to recover the actual kind to be able to write the correct
1161  * amount of bytes.
1162  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1163  * the entries at index n + 1 to n + i are 'markers'.
1164  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1165  * expected form of the array would be:
1166  *
1167  * {b0, b1, b2, b3, INT, marker, b6, b7}
1168  *
1169  * Thus, in order to get back the size of the entry, we simply need to count the number
1170  * of marked entries
1171  *
1172  * @param virtualArray the virtualized byte array
1173  * @param i index of the virtual entry we are recovering
1174  * @return The number of bytes the entry spans
1175  */
1176 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {
1177   int index = i;
1178   while (++index < virtualArray->field_size() &&
1179            virtualArray->field_at(index)->is_marker()) {}
1180   return index - i;
1181 }
1182 
1183 /**
1184  * If there was a guarantee for byte array to always start aligned to a long, we could
1185  * do a simple check on the parity of the index. Unfortunately, that is not always the
1186  * case. Thus, we check alignment of the actual address we are writing to.
1187  * In the unlikely case index 0 is 5-aligned for example, it would then be possible to
1188  * write a long to index 3.
1189  */
1190 static jbyte* check_alignment_get_addr(typeArrayOop obj, int index, int expected_alignment) {
1191     jbyte* res = obj->byte_at_addr(index);
1192     assert((((intptr_t) res) % expected_alignment) == 0, "Non-aligned write");
1193     return res;
1194 }
1195 
1196 static void byte_array_put(typeArrayOop obj, intptr_t val, int index, int byte_count) {
1197   switch (byte_count) {
1198     case 1:
1199       obj->byte_at_put(index, (jbyte) *((jint *) &val));
1200       break;
1201     case 2:
1202       *((jshort *) check_alignment_get_addr(obj, index, 2)) = (jshort) *((jint *) &val);
1203       break;
1204     case 4:
1205       *((jint *) check_alignment_get_addr(obj, index, 4)) = (jint) *((jint *) &val);
1206       break;
1207     case 8:
1208       *((jlong *) check_alignment_get_addr(obj, index, 8)) = (jlong) *((jlong *) &val);
1209       break;
1210     default:
1211       ShouldNotReachHere();
1212   }
1213 }
1214 #endif // INCLUDE_JVMCI
1215 
1216 
1217 // restore elements of an eliminated type array
1218 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
1219   int index = 0;
1220   intptr_t val;
1221 
1222   for (int i = 0; i < sv->field_size(); i++) {
1223     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1224     switch(type) {
1225     case T_LONG: case T_DOUBLE: {
1226       assert(value->type() == T_INT, "Agreement.");
1227       StackValue* low =
1228         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1229 #ifdef _LP64
1230       jlong res = (jlong)low->get_int();
1231 #else
1232       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1233 #endif
1234       obj->long_at_put(index, res);
1235       break;
1236     }
1237 
1238     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1239     case T_INT: case T_FLOAT: { // 4 bytes.
1240       assert(value->type() == T_INT, "Agreement.");
1241       bool big_value = false;
1242       if (i + 1 < sv->field_size() && type == T_INT) {
1243         if (sv->field_at(i)->is_location()) {
1244           Location::Type type = ((LocationValue*) sv->field_at(i))->location().type();
1245           if (type == Location::dbl || type == Location::lng) {
1246             big_value = true;
1247           }
1248         } else if (sv->field_at(i)->is_constant_int()) {
1249           ScopeValue* next_scope_field = sv->field_at(i + 1);
1250           if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1251             big_value = true;
1252           }
1253         }
1254       }
1255 
1256       if (big_value) {
1257         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
1258   #ifdef _LP64
1259         jlong res = (jlong)low->get_int();
1260   #else
1261         jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1262   #endif
1263         obj->int_at_put(index, (jint)*((jint*)&res));
1264         obj->int_at_put(++index, (jint)*(((jint*)&res) + 1));
1265       } else {
1266         val = value->get_int();
1267         obj->int_at_put(index, (jint)*((jint*)&val));
1268       }
1269       break;
1270     }
1271 
1272     case T_SHORT:
1273       assert(value->type() == T_INT, "Agreement.");
1274       val = value->get_int();
1275       obj->short_at_put(index, (jshort)*((jint*)&val));
1276       break;
1277 
1278     case T_CHAR:
1279       assert(value->type() == T_INT, "Agreement.");
1280       val = value->get_int();
1281       obj->char_at_put(index, (jchar)*((jint*)&val));
1282       break;
1283 
1284     case T_BYTE: {
1285       assert(value->type() == T_INT, "Agreement.");
1286       // The value we get is erased as a regular int. We will need to find its actual byte count 'by hand'.
1287       val = value->get_int();
1288 #if INCLUDE_JVMCI
1289       int byte_count = count_number_of_bytes_for_entry(sv, i);
1290       byte_array_put(obj, val, index, byte_count);
1291       // According to byte_count contract, the values from i + 1 to i + byte_count are illegal values. Skip.
1292       i += byte_count - 1; // Balance the loop counter.
1293       index += byte_count;
1294       // index has been updated so continue at top of loop
1295       continue;
1296 #else
1297       obj->byte_at_put(index, (jbyte)*((jint*)&val));
1298       break;
1299 #endif // INCLUDE_JVMCI
1300     }
1301 
1302     case T_BOOLEAN: {
1303       assert(value->type() == T_INT, "Agreement.");
1304       val = value->get_int();
1305       obj->bool_at_put(index, (jboolean)*((jint*)&val));
1306       break;
1307     }
1308 
1309       default:
1310         ShouldNotReachHere();
1311     }
1312     index++;
1313   }
1314 }
1315 
1316 // restore fields of an eliminated object array
1317 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1318   for (int i = 0; i < sv->field_size(); i++) {
1319     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1320     assert(value->type() == T_OBJECT, "object element expected");
1321     obj->obj_at_put(i, value->get_obj()());
1322   }
1323 }
1324 
1325 class ReassignedField {
1326 public:
1327   int _offset;
1328   BasicType _type;
1329 public:
1330   ReassignedField() {
1331     _offset = 0;
1332     _type = T_ILLEGAL;
1333   }
1334 };
1335 
1336 int compare(ReassignedField* left, ReassignedField* right) {
1337   return left->_offset - right->_offset;
1338 }
1339 
1340 // Restore fields of an eliminated instance object using the same field order
1341 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true)
1342 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal) {
1343   GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>();
1344   InstanceKlass* ik = klass;
1345   while (ik != NULL) {
1346     for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
1347       if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
1348         ReassignedField field;
1349         field._offset = fs.offset();
1350         field._type = Signature::basic_type(fs.signature());
1351         fields->append(field);
1352       }
1353     }
1354     ik = ik->superklass();
1355   }
1356   fields->sort(compare);
1357   for (int i = 0; i < fields->length(); i++) {
1358     intptr_t val;
1359     ScopeValue* scope_field = sv->field_at(svIndex);
1360     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1361     int offset = fields->at(i)._offset;
1362     BasicType type = fields->at(i)._type;
1363     switch (type) {
1364       case T_OBJECT: case T_ARRAY:
1365         assert(value->type() == T_OBJECT, "Agreement.");
1366         obj->obj_field_put(offset, value->get_obj()());
1367         break;
1368 
1369       // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
1370       case T_INT: case T_FLOAT: { // 4 bytes.
1371         assert(value->type() == T_INT, "Agreement.");
1372         bool big_value = false;
1373         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1374           if (scope_field->is_location()) {
1375             Location::Type type = ((LocationValue*) scope_field)->location().type();
1376             if (type == Location::dbl || type == Location::lng) {
1377               big_value = true;
1378             }
1379           }
1380           if (scope_field->is_constant_int()) {
1381             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1382             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1383               big_value = true;
1384             }
1385           }
1386         }
1387 
1388         if (big_value) {
1389           i++;
1390           assert(i < fields->length(), "second T_INT field needed");
1391           assert(fields->at(i)._type == T_INT, "T_INT field needed");
1392         } else {
1393           val = value->get_int();
1394           obj->int_field_put(offset, (jint)*((jint*)&val));
1395           break;
1396         }
1397       }
1398         /* no break */
1399 
1400       case T_LONG: case T_DOUBLE: {
1401         assert(value->type() == T_INT, "Agreement.");
1402         StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex));
1403 #ifdef _LP64
1404         jlong res = (jlong)low->get_int();
1405 #else
1406         jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
1407 #endif
1408         obj->long_field_put(offset, res);
1409         break;
1410       }
1411 
1412       case T_SHORT:
1413         assert(value->type() == T_INT, "Agreement.");
1414         val = value->get_int();
1415         obj->short_field_put(offset, (jshort)*((jint*)&val));
1416         break;
1417 
1418       case T_CHAR:
1419         assert(value->type() == T_INT, "Agreement.");
1420         val = value->get_int();
1421         obj->char_field_put(offset, (jchar)*((jint*)&val));
1422         break;
1423 
1424       case T_BYTE:
1425         assert(value->type() == T_INT, "Agreement.");
1426         val = value->get_int();
1427         obj->byte_field_put(offset, (jbyte)*((jint*)&val));
1428         break;
1429 
1430       case T_BOOLEAN:
1431         assert(value->type() == T_INT, "Agreement.");
1432         val = value->get_int();
1433         obj->bool_field_put(offset, (jboolean)*((jint*)&val));
1434         break;
1435 
1436       default:
1437         ShouldNotReachHere();
1438     }
1439     svIndex++;
1440   }
1441   return svIndex;
1442 }
1443 
1444 // restore fields of all eliminated objects and arrays
1445 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal) {
1446   for (int i = 0; i < objects->length(); i++) {
1447     ObjectValue* sv = (ObjectValue*) objects->at(i);
1448     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1449     Handle obj = sv->value();
1450     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1451 #ifndef PRODUCT
1452     if (PrintDeoptimizationDetails) {
1453       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1454     }
1455 #endif // !PRODUCT
1456 
1457     if (obj.is_null()) {
1458       continue;
1459     }
1460 
1461 #if INCLUDE_JVMCI
1462     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1463     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1464       continue;
1465     }
1466 #endif // INCLUDE_JVMCI
1467 #ifdef COMPILER2
1468     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1469       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1470       ScopeValue* payload = sv->field_at(0);
1471       if (payload->is_location() &&
1472           payload->as_LocationValue()->location().type() == Location::vector) {
1473 #ifndef PRODUCT
1474         if (PrintDeoptimizationDetails) {
1475           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1476           if (Verbose) {
1477             Handle obj = sv->value();
1478             k->oop_print_on(obj(), tty);
1479           }
1480         }
1481 #endif // !PRODUCT
1482         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1483       }
1484       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1485       // which could be restored after vector object allocation.
1486     }
1487 #endif /* !COMPILER2 */
1488     if (k->is_instance_klass()) {
1489       InstanceKlass* ik = InstanceKlass::cast(k);
1490       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal);
1491     } else if (k->is_typeArray_klass()) {
1492       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1493       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1494     } else if (k->is_objArray_klass()) {
1495       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1496     }
1497   }
1498 }
1499 
1500 
1501 // relock objects for which synchronization was eliminated
1502 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1503                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1504   bool relocked_objects = false;
1505   for (int i = 0; i < monitors->length(); i++) {
1506     MonitorInfo* mon_info = monitors->at(i);
1507     if (mon_info->eliminated()) {
1508       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
1509       relocked_objects = true;
1510       if (!mon_info->owner_is_scalar_replaced()) {
1511         Handle obj(thread, mon_info->owner());
1512         markWord mark = obj->mark();
1513         if (exec_mode == Unpack_none && mark.has_monitor()) {
1514           // defer relocking if the deoptee thread is currently waiting for obj
1515           ObjectMonitor* waiting_monitor = deoptee_thread->current_waiting_monitor();
1516           if (waiting_monitor != NULL && waiting_monitor->object() == obj()) {
1517             assert(fr.is_deoptimized_frame(), "frame must be scheduled for deoptimization");
1518             JvmtiDeferredUpdates::inc_relock_count_after_wait(deoptee_thread);
1519             continue;
1520           }
1521         }
1522         ObjectSynchronizer::enter(obj, deoptee_thread);
1523         assert(mon_info->owner()->is_locked(), "object must be locked now");
1524       }
1525     }
1526   }
1527   return relocked_objects;
1528 }
1529 #endif // COMPILER2_OR_JVMCI
1530 
1531 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
1532   Events::log_deopt_message(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp()));
1533 
1534   // Register map for next frame (used for stack crawl).  We capture
1535   // the state of the deopt'ing frame's caller.  Thus if we need to
1536   // stuff a C2I adapter we can properly fill in the callee-save
1537   // register locations.
1538   frame caller = fr.sender(reg_map);
1539   int frame_size = caller.sp() - fr.sp();
1540 
1541   frame sender = caller;
1542 
1543   // Since the Java thread being deoptimized will eventually adjust it's own stack,
1544   // the vframeArray containing the unpacking information is allocated in the C heap.
1545   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
1546   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
1547 
1548   // Compare the vframeArray to the collected vframes
1549   assert(array->structural_compare(thread, chunk), "just checking");
1550 
1551   if (TraceDeoptimization) {
1552     ResourceMark rm;
1553     stringStream st;
1554     st.print_cr("DEOPT PACKING thread=" INTPTR_FORMAT " vframeArray=" INTPTR_FORMAT, p2i(thread), p2i(array));
1555     st.print("   ");
1556     fr.print_on(&st);
1557     st.print_cr("   Virtual frames (innermost/newest first):");
1558     for (int index = 0; index < chunk->length(); index++) {
1559       compiledVFrame* vf = chunk->at(index);
1560       int bci = vf->raw_bci();
1561       const char* code_name;
1562       if (bci == SynchronizationEntryBCI) {
1563         code_name = "sync entry";
1564       } else {
1565         Bytecodes::Code code = vf->method()->code_at(bci);
1566         code_name = Bytecodes::name(code);
1567       }
1568 
1569       st.print("      VFrame %d (" INTPTR_FORMAT ")", index, p2i(vf));
1570       st.print(" - %s", vf->method()->name_and_sig_as_C_string());
1571       st.print(" - %s", code_name);
1572       st.print_cr(" @ bci=%d ", bci);
1573     }
1574     tty->print_raw(st.freeze());
1575     tty->cr();
1576   }
1577 
1578   return array;
1579 }
1580 
1581 #if COMPILER2_OR_JVMCI
1582 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
1583   // Reallocation of some scalar replaced objects failed. Record
1584   // that we need to pop all the interpreter frames for the
1585   // deoptimized compiled frame.
1586   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
1587   thread->set_frames_to_pop_failed_realloc(array->frames());
1588   // Unlock all monitors here otherwise the interpreter will see a
1589   // mix of locked and unlocked monitors (because of failed
1590   // reallocations of synchronized objects) and be confused.
1591   for (int i = 0; i < array->frames(); i++) {
1592     MonitorChunk* monitors = array->element(i)->monitors();
1593     if (monitors != NULL) {
1594       for (int j = 0; j < monitors->number_of_monitors(); j++) {
1595         BasicObjectLock* src = monitors->at(j);
1596         if (src->obj() != NULL) {
1597           ObjectSynchronizer::exit(src->obj(), thread);
1598         }
1599       }
1600       array->element(i)->free_monitors(thread);
1601 #ifdef ASSERT
1602       array->element(i)->set_removed_monitors();
1603 #endif
1604     }
1605   }
1606 }
1607 #endif
1608 
1609 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1610   assert(fr.can_be_deoptimized(), "checking frame type");
1611 
1612   gather_statistics(reason, Action_none, Bytecodes::_illegal);
1613 
1614   if (LogCompilation && xtty != NULL) {
1615     CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
1616     assert(cm != NULL, "only compiled methods can deopt");
1617 
1618     ttyLocker ttyl;
1619     xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1620     cm->log_identity(xtty);
1621     xtty->end_head();
1622     for (ScopeDesc* sd = cm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1623       xtty->begin_elem("jvms bci='%d'", sd->bci());
1624       xtty->method(sd->method());
1625       xtty->end_elem();
1626       if (sd->is_top())  break;
1627     }
1628     xtty->tail("deoptimized");
1629   }
1630 
1631   Continuation::notify_deopt(thread, fr.sp());
1632 
1633   // Patch the compiled method so that when execution returns to it we will
1634   // deopt the execution state and return to the interpreter.
1635   fr.deoptimize(thread);
1636 }
1637 
1638 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1639   // Deoptimize only if the frame comes from compile code.
1640   // Do not deoptimize the frame which is already patched
1641   // during the execution of the loops below.
1642   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1643     return;
1644   }
1645   ResourceMark rm;
1646   DeoptimizationMarker dm;
1647   deoptimize_single_frame(thread, fr, reason);
1648 }
1649 
1650 #if INCLUDE_JVMCI
1651 address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
1652   // there is no exception handler for this pc => deoptimize
1653   cm->make_not_entrant();
1654 
1655   // Use Deoptimization::deoptimize for all of its side-effects:
1656   // gathering traps statistics, logging...
1657   // it also patches the return pc but we do not care about that
1658   // since we return a continuation to the deopt_blob below.
1659   JavaThread* thread = JavaThread::current();
1660   RegisterMap reg_map(thread,
1661                       RegisterMap::UpdateMap::skip,
1662                       RegisterMap::ProcessFrames::include,
1663                       RegisterMap::WalkContinuation::skip);
1664   frame runtime_frame = thread->last_frame();
1665   frame caller_frame = runtime_frame.sender(&reg_map);
1666   assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
1667   vframe* vf = vframe::new_vframe(&caller_frame, &reg_map, thread);
1668   compiledVFrame* cvf = compiledVFrame::cast(vf);
1669   ScopeDesc* imm_scope = cvf->scope();
1670   MethodData* imm_mdo = get_method_data(thread, methodHandle(thread, imm_scope->method()), true);
1671   if (imm_mdo != NULL) {
1672     ProfileData* pdata = imm_mdo->allocate_bci_to_data(imm_scope->bci(), NULL);
1673     if (pdata != NULL && pdata->is_BitData()) {
1674       BitData* bit_data = (BitData*) pdata;
1675       bit_data->set_exception_seen();
1676     }
1677   }
1678 
1679   Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
1680 
1681   MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true);
1682   if (trap_mdo != NULL) {
1683     trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
1684   }
1685 
1686   return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
1687 }
1688 #endif
1689 
1690 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1691   assert(thread == Thread::current() ||
1692          thread->is_handshake_safe_for(Thread::current()) ||
1693          SafepointSynchronize::is_at_safepoint(),
1694          "can only deoptimize other thread at a safepoint/handshake");
1695   // Compute frame and register map based on thread and sp.
1696   RegisterMap reg_map(thread,
1697                       RegisterMap::UpdateMap::skip,
1698                       RegisterMap::ProcessFrames::include,
1699                       RegisterMap::WalkContinuation::skip);
1700   frame fr = thread->last_frame();
1701   while (fr.id() != id) {
1702     fr = fr.sender(&reg_map);
1703   }
1704   deoptimize(thread, fr, reason);
1705 }
1706 
1707 
1708 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) {
1709   Thread* current = Thread::current();
1710   if (thread == current || thread->is_handshake_safe_for(current)) {
1711     Deoptimization::deoptimize_frame_internal(thread, id, reason);
1712   } else {
1713     VM_DeoptimizeFrame deopt(thread, id, reason);
1714     VMThread::execute(&deopt);
1715   }
1716 }
1717 
1718 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
1719   deoptimize_frame(thread, id, Reason_constraint);
1720 }
1721 
1722 // JVMTI PopFrame support
1723 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
1724 {
1725   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
1726 }
1727 JRT_END
1728 
1729 MethodData*
1730 Deoptimization::get_method_data(JavaThread* thread, const methodHandle& m,
1731                                 bool create_if_missing) {
1732   JavaThread* THREAD = thread; // For exception macros.
1733   MethodData* mdo = m()->method_data();
1734   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
1735     // Build an MDO.  Ignore errors like OutOfMemory;
1736     // that simply means we won't have an MDO to update.
1737     Method::build_profiling_method_data(m, THREAD);
1738     if (HAS_PENDING_EXCEPTION) {
1739       // Only metaspace OOM is expected. No Java code executed.
1740       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1741       CLEAR_PENDING_EXCEPTION;
1742     }
1743     mdo = m()->method_data();
1744   }
1745   return mdo;
1746 }
1747 
1748 #if COMPILER2_OR_JVMCI
1749 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
1750   // In case of an unresolved klass entry, load the class.
1751   // This path is exercised from case _ldc in Parse::do_one_bytecode,
1752   // and probably nowhere else.
1753   // Even that case would benefit from simply re-interpreting the
1754   // bytecode, without paying special attention to the class index.
1755   // So this whole "class index" feature should probably be removed.
1756 
1757   if (constant_pool->tag_at(index).is_unresolved_klass()) {
1758     Klass* tk = constant_pool->klass_at(index, THREAD);
1759     if (HAS_PENDING_EXCEPTION) {
1760       // Exception happened during classloading. We ignore the exception here, since it
1761       // is going to be rethrown since the current activation is going to be deoptimized and
1762       // the interpreter will re-execute the bytecode.
1763       // Do not clear probable Async Exceptions.
1764       CLEAR_PENDING_NONASYNC_EXCEPTION;
1765       // Class loading called java code which may have caused a stack
1766       // overflow. If the exception was thrown right before the return
1767       // to the runtime the stack is no longer guarded. Reguard the
1768       // stack otherwise if we return to the uncommon trap blob and the
1769       // stack bang causes a stack overflow we crash.
1770       JavaThread* jt = THREAD;
1771       bool guard_pages_enabled = jt->stack_overflow_state()->reguard_stack_if_needed();
1772       assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
1773     }
1774     return;
1775   }
1776 
1777   assert(!constant_pool->tag_at(index).is_symbol(),
1778          "no symbolic names here, please");
1779 }
1780 
1781 #if INCLUDE_JFR
1782 
1783 class DeoptReasonSerializer : public JfrSerializer {
1784  public:
1785   void serialize(JfrCheckpointWriter& writer) {
1786     writer.write_count((u4)(Deoptimization::Reason_LIMIT + 1)); // + Reason::many (-1)
1787     for (int i = -1; i < Deoptimization::Reason_LIMIT; ++i) {
1788       writer.write_key((u8)i);
1789       writer.write(Deoptimization::trap_reason_name(i));
1790     }
1791   }
1792 };
1793 
1794 class DeoptActionSerializer : public JfrSerializer {
1795  public:
1796   void serialize(JfrCheckpointWriter& writer) {
1797     static const u4 nof_actions = Deoptimization::Action_LIMIT;
1798     writer.write_count(nof_actions);
1799     for (u4 i = 0; i < Deoptimization::Action_LIMIT; ++i) {
1800       writer.write_key(i);
1801       writer.write(Deoptimization::trap_action_name((int)i));
1802     }
1803   }
1804 };
1805 
1806 static void register_serializers() {
1807   static int critical_section = 0;
1808   if (1 == critical_section || Atomic::cmpxchg(&critical_section, 0, 1) == 1) {
1809     return;
1810   }
1811   JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONREASON, true, new DeoptReasonSerializer());
1812   JfrSerializer::register_serializer(TYPE_DEOPTIMIZATIONACTION, true, new DeoptActionSerializer());
1813 }
1814 
1815 static void post_deoptimization_event(CompiledMethod* nm,
1816                                       const Method* method,
1817                                       int trap_bci,
1818                                       int instruction,
1819                                       Deoptimization::DeoptReason reason,
1820                                       Deoptimization::DeoptAction action) {
1821   assert(nm != NULL, "invariant");
1822   assert(method != NULL, "invariant");
1823   if (EventDeoptimization::is_enabled()) {
1824     static bool serializers_registered = false;
1825     if (!serializers_registered) {
1826       register_serializers();
1827       serializers_registered = true;
1828     }
1829     EventDeoptimization event;
1830     event.set_compileId(nm->compile_id());
1831     event.set_compiler(nm->compiler_type());
1832     event.set_method(method);
1833     event.set_lineNumber(method->line_number_from_bci(trap_bci));
1834     event.set_bci(trap_bci);
1835     event.set_instruction(instruction);
1836     event.set_reason(reason);
1837     event.set_action(action);
1838     event.commit();
1839   }
1840 }
1841 
1842 #endif // INCLUDE_JFR
1843 
1844 static void log_deopt(CompiledMethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci,
1845                               const char* reason_name, const char* reason_action) {
1846   LogTarget(Debug, deoptimization) lt;
1847   if (lt.is_enabled()) {
1848     LogStream ls(lt);
1849     bool is_osr = nm->is_osr_method();
1850     ls.print("cid=%4d %s level=%d",
1851              nm->compile_id(), (is_osr ? "osr" : "   "), nm->comp_level());
1852     ls.print(" %s", tm->name_and_sig_as_C_string());
1853     ls.print(" trap_bci=%d ", trap_bci);
1854     if (is_osr) {
1855       ls.print("osr_bci=%d ", nm->osr_entry_bci());
1856     }
1857     ls.print("%s ", reason_name);
1858     ls.print("%s ", reason_action);
1859     ls.print_cr("pc=" INTPTR_FORMAT " relative_pc=" INTPTR_FORMAT,
1860              pc, fr.pc() - nm->code_begin());
1861   }
1862 }
1863 
1864 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint trap_request)) {
1865   HandleMark hm(current);
1866 
1867   // uncommon_trap() is called at the beginning of the uncommon trap
1868   // handler. Note this fact before we start generating temporary frames
1869   // that can confuse an asynchronous stack walker. This counter is
1870   // decremented at the end of unpack_frames().
1871 
1872   current->inc_in_deopt_handler();
1873 
1874 #if INCLUDE_JVMCI
1875   // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
1876   RegisterMap reg_map(current,
1877                       RegisterMap::UpdateMap::include,
1878                       RegisterMap::ProcessFrames::include,
1879                       RegisterMap::WalkContinuation::skip);
1880 #else
1881   RegisterMap reg_map(current,
1882                       RegisterMap::UpdateMap::skip,
1883                       RegisterMap::ProcessFrames::include,
1884                       RegisterMap::WalkContinuation::skip);
1885 #endif
1886   frame stub_frame = current->last_frame();
1887   frame fr = stub_frame.sender(&reg_map);
1888 
1889   // Log a message
1890   Events::log_deopt_message(current, "Uncommon trap: trap_request=" INT32_FORMAT_X_0 " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT,
1891               trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin());
1892 
1893   {
1894     ResourceMark rm;
1895 
1896     DeoptReason reason = trap_request_reason(trap_request);
1897     DeoptAction action = trap_request_action(trap_request);
1898 #if INCLUDE_JVMCI
1899     int debug_id = trap_request_debug_id(trap_request);
1900 #endif
1901     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
1902 
1903     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, current);
1904     compiledVFrame* cvf = compiledVFrame::cast(vf);
1905 
1906     CompiledMethod* nm = cvf->code();
1907 
1908     ScopeDesc*      trap_scope  = cvf->scope();
1909 
1910     bool is_receiver_constraint_failure = COMPILER2_PRESENT(VerifyReceiverTypes &&) (reason == Deoptimization::Reason_receiver_constraint);
1911 
1912     if (is_receiver_constraint_failure) {
1913       tty->print_cr("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"),
1914                     trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string()
1915                     JVMCI_ONLY(COMMA debug_id));
1916     }
1917 
1918     methodHandle    trap_method(current, trap_scope->method());
1919     int             trap_bci    = trap_scope->bci();
1920 #if INCLUDE_JVMCI
1921     jlong           speculation = current->pending_failed_speculation();
1922     if (nm->is_compiled_by_jvmci()) {
1923       nm->as_nmethod()->update_speculation(current);
1924     } else {
1925       assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
1926     }
1927 
1928     if (trap_bci == SynchronizationEntryBCI) {
1929       trap_bci = 0;
1930       current->set_pending_monitorenter(true);
1931     }
1932 
1933     if (reason == Deoptimization::Reason_transfer_to_interpreter) {
1934       current->set_pending_transfer_to_interpreter(true);
1935     }
1936 #endif
1937 
1938     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
1939     // Record this event in the histogram.
1940     gather_statistics(reason, action, trap_bc);
1941 
1942     // Ensure that we can record deopt. history:
1943     // Need MDO to record RTM code generation state.
1944     bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking );
1945 
1946     methodHandle profiled_method;
1947 #if INCLUDE_JVMCI
1948     if (nm->is_compiled_by_jvmci()) {
1949       profiled_method = methodHandle(current, nm->method());
1950     } else {
1951       profiled_method = trap_method;
1952     }
1953 #else
1954     profiled_method = trap_method;
1955 #endif
1956 
1957     MethodData* trap_mdo =
1958       get_method_data(current, profiled_method, create_if_missing);
1959 
1960     { // Log Deoptimization event for JFR, UL and event system
1961       Method* tm = trap_method();
1962       const char* reason_name = trap_reason_name(reason);
1963       const char* reason_action = trap_action_name(action);
1964       intptr_t pc = p2i(fr.pc());
1965 
1966       JFR_ONLY(post_deoptimization_event(nm, tm, trap_bci, trap_bc, reason, action);)
1967       log_deopt(nm, tm, pc, fr, trap_bci, reason_name, reason_action);
1968       Events::log_deopt_message(current, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d %s",
1969                                 reason_name, reason_action, pc,
1970                                 tm->name_and_sig_as_C_string(), trap_bci, nm->compiler_name());
1971     }
1972 
1973     // Print a bunch of diagnostics, if requested.
1974     if (TraceDeoptimization || LogCompilation || is_receiver_constraint_failure) {
1975       ResourceMark rm;
1976       ttyLocker ttyl;
1977       char buf[100];
1978       if (xtty != NULL) {
1979         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s",
1980                          os::current_thread_id(),
1981                          format_trap_request(buf, sizeof(buf), trap_request));
1982 #if INCLUDE_JVMCI
1983         if (speculation != 0) {
1984           xtty->print(" speculation='" JLONG_FORMAT "'", speculation);
1985         }
1986 #endif
1987         nm->log_identity(xtty);
1988       }
1989       Symbol* class_name = NULL;
1990       bool unresolved = false;
1991       if (unloaded_class_index >= 0) {
1992         constantPoolHandle constants (current, trap_method->constants());
1993         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
1994           class_name = constants->klass_name_at(unloaded_class_index);
1995           unresolved = true;
1996           if (xtty != NULL)
1997             xtty->print(" unresolved='1'");
1998         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
1999           class_name = constants->symbol_at(unloaded_class_index);
2000         }
2001         if (xtty != NULL)
2002           xtty->name(class_name);
2003       }
2004       if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) {
2005         // Dump the relevant MDO state.
2006         // This is the deopt count for the current reason, any previous
2007         // reasons or recompiles seen at this point.
2008         int dcnt = trap_mdo->trap_count(reason);
2009         if (dcnt != 0)
2010           xtty->print(" count='%d'", dcnt);
2011         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
2012         int dos = (pdata == NULL)? 0: pdata->trap_state();
2013         if (dos != 0) {
2014           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
2015           if (trap_state_is_recompiled(dos)) {
2016             int recnt2 = trap_mdo->overflow_recompile_count();
2017             if (recnt2 != 0)
2018               xtty->print(" recompiles2='%d'", recnt2);
2019           }
2020         }
2021       }
2022       if (xtty != NULL) {
2023         xtty->stamp();
2024         xtty->end_head();
2025       }
2026       if (TraceDeoptimization) {  // make noise on the tty
2027         stringStream st;
2028         st.print("UNCOMMON TRAP method=%s", trap_scope->method()->name_and_sig_as_C_string());
2029         st.print("  bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT JVMCI_ONLY(", debug_id=%d"),
2030                  trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin() JVMCI_ONLY(COMMA debug_id));
2031         st.print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
2032 #if INCLUDE_JVMCI
2033         if (nm->is_nmethod()) {
2034           const char* installed_code_name = nm->as_nmethod()->jvmci_name();
2035           if (installed_code_name != NULL) {
2036             st.print(" (JVMCI: installed code name=%s) ", installed_code_name);
2037           }
2038         }
2039 #endif
2040         st.print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"),
2041                    p2i(fr.pc()),
2042                    os::current_thread_id(),
2043                    trap_reason_name(reason),
2044                    trap_action_name(action),
2045                    unloaded_class_index
2046 #if INCLUDE_JVMCI
2047                    , debug_id
2048 #endif
2049                    );
2050         if (class_name != NULL) {
2051           st.print(unresolved ? " unresolved class: " : " symbol: ");
2052           class_name->print_symbol_on(&st);
2053         }
2054         st.cr();
2055         tty->print_raw(st.freeze());
2056       }
2057       if (xtty != NULL) {
2058         // Log the precise location of the trap.
2059         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
2060           xtty->begin_elem("jvms bci='%d'", sd->bci());
2061           xtty->method(sd->method());
2062           xtty->end_elem();
2063           if (sd->is_top())  break;
2064         }
2065         xtty->tail("uncommon_trap");
2066       }
2067     }
2068     // (End diagnostic printout.)
2069 
2070     if (is_receiver_constraint_failure) {
2071       fatal("missing receiver type check");
2072     }
2073 
2074     // Load class if necessary
2075     if (unloaded_class_index >= 0) {
2076       constantPoolHandle constants(current, trap_method->constants());
2077       load_class_by_index(constants, unloaded_class_index, THREAD);
2078     }
2079 
2080     // Flush the nmethod if necessary and desirable.
2081     //
2082     // We need to avoid situations where we are re-flushing the nmethod
2083     // because of a hot deoptimization site.  Repeated flushes at the same
2084     // point need to be detected by the compiler and avoided.  If the compiler
2085     // cannot avoid them (or has a bug and "refuses" to avoid them), this
2086     // module must take measures to avoid an infinite cycle of recompilation
2087     // and deoptimization.  There are several such measures:
2088     //
2089     //   1. If a recompilation is ordered a second time at some site X
2090     //   and for the same reason R, the action is adjusted to 'reinterpret',
2091     //   to give the interpreter time to exercise the method more thoroughly.
2092     //   If this happens, the method's overflow_recompile_count is incremented.
2093     //
2094     //   2. If the compiler fails to reduce the deoptimization rate, then
2095     //   the method's overflow_recompile_count will begin to exceed the set
2096     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
2097     //   is adjusted to 'make_not_compilable', and the method is abandoned
2098     //   to the interpreter.  This is a performance hit for hot methods,
2099     //   but is better than a disastrous infinite cycle of recompilations.
2100     //   (Actually, only the method containing the site X is abandoned.)
2101     //
2102     //   3. In parallel with the previous measures, if the total number of
2103     //   recompilations of a method exceeds the much larger set limit
2104     //   PerMethodRecompilationCutoff, the method is abandoned.
2105     //   This should only happen if the method is very large and has
2106     //   many "lukewarm" deoptimizations.  The code which enforces this
2107     //   limit is elsewhere (class nmethod, class Method).
2108     //
2109     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
2110     // to recompile at each bytecode independently of the per-BCI cutoff.
2111     //
2112     // The decision to update code is up to the compiler, and is encoded
2113     // in the Action_xxx code.  If the compiler requests Action_none
2114     // no trap state is changed, no compiled code is changed, and the
2115     // computation suffers along in the interpreter.
2116     //
2117     // The other action codes specify various tactics for decompilation
2118     // and recompilation.  Action_maybe_recompile is the loosest, and
2119     // allows the compiled code to stay around until enough traps are seen,
2120     // and until the compiler gets around to recompiling the trapping method.
2121     //
2122     // The other actions cause immediate removal of the present code.
2123 
2124     // Traps caused by injected profile shouldn't pollute trap counts.
2125     bool injected_profile_trap = trap_method->has_injected_profile() &&
2126                                  (reason == Reason_intrinsic || reason == Reason_unreached);
2127 
2128     bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap;
2129     bool make_not_entrant = false;
2130     bool make_not_compilable = false;
2131     bool reprofile = false;
2132     switch (action) {
2133     case Action_none:
2134       // Keep the old code.
2135       update_trap_state = false;
2136       break;
2137     case Action_maybe_recompile:
2138       // Do not need to invalidate the present code, but we can
2139       // initiate another
2140       // Start compiler without (necessarily) invalidating the nmethod.
2141       // The system will tolerate the old code, but new code should be
2142       // generated when possible.
2143       break;
2144     case Action_reinterpret:
2145       // Go back into the interpreter for a while, and then consider
2146       // recompiling form scratch.
2147       make_not_entrant = true;
2148       // Reset invocation counter for outer most method.
2149       // This will allow the interpreter to exercise the bytecodes
2150       // for a while before recompiling.
2151       // By contrast, Action_make_not_entrant is immediate.
2152       //
2153       // Note that the compiler will track null_check, null_assert,
2154       // range_check, and class_check events and log them as if they
2155       // had been traps taken from compiled code.  This will update
2156       // the MDO trap history so that the next compilation will
2157       // properly detect hot trap sites.
2158       reprofile = true;
2159       break;
2160     case Action_make_not_entrant:
2161       // Request immediate recompilation, and get rid of the old code.
2162       // Make them not entrant, so next time they are called they get
2163       // recompiled.  Unloaded classes are loaded now so recompile before next
2164       // time they are called.  Same for uninitialized.  The interpreter will
2165       // link the missing class, if any.
2166       make_not_entrant = true;
2167       break;
2168     case Action_make_not_compilable:
2169       // Give up on compiling this method at all.
2170       make_not_entrant = true;
2171       make_not_compilable = true;
2172       break;
2173     default:
2174       ShouldNotReachHere();
2175     }
2176 
2177     // Setting +ProfileTraps fixes the following, on all platforms:
2178     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
2179     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
2180     // recompile relies on a MethodData* to record heroic opt failures.
2181 
2182     // Whether the interpreter is producing MDO data or not, we also need
2183     // to use the MDO to detect hot deoptimization points and control
2184     // aggressive optimization.
2185     bool inc_recompile_count = false;
2186     ProfileData* pdata = NULL;
2187     if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != NULL) {
2188       assert(trap_mdo == get_method_data(current, profiled_method, false), "sanity");
2189       uint this_trap_count = 0;
2190       bool maybe_prior_trap = false;
2191       bool maybe_prior_recompile = false;
2192       pdata = query_update_method_data(trap_mdo, trap_bci, reason, true,
2193 #if INCLUDE_JVMCI
2194                                    nm->is_compiled_by_jvmci() && nm->is_osr_method(),
2195 #endif
2196                                    nm->method(),
2197                                    //outputs:
2198                                    this_trap_count,
2199                                    maybe_prior_trap,
2200                                    maybe_prior_recompile);
2201       // Because the interpreter also counts null, div0, range, and class
2202       // checks, these traps from compiled code are double-counted.
2203       // This is harmless; it just means that the PerXTrapLimit values
2204       // are in effect a little smaller than they look.
2205 
2206       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2207       if (per_bc_reason != Reason_none) {
2208         // Now take action based on the partially known per-BCI history.
2209         if (maybe_prior_trap
2210             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
2211           // If there are too many traps at this BCI, force a recompile.
2212           // This will allow the compiler to see the limit overflow, and
2213           // take corrective action, if possible.  The compiler generally
2214           // does not use the exact PerBytecodeTrapLimit value, but instead
2215           // changes its tactics if it sees any traps at all.  This provides
2216           // a little hysteresis, delaying a recompile until a trap happens
2217           // several times.
2218           //
2219           // Actually, since there is only one bit of counter per BCI,
2220           // the possible per-BCI counts are {0,1,(per-method count)}.
2221           // This produces accurate results if in fact there is only
2222           // one hot trap site, but begins to get fuzzy if there are
2223           // many sites.  For example, if there are ten sites each
2224           // trapping two or more times, they each get the blame for
2225           // all of their traps.
2226           make_not_entrant = true;
2227         }
2228 
2229         // Detect repeated recompilation at the same BCI, and enforce a limit.
2230         if (make_not_entrant && maybe_prior_recompile) {
2231           // More than one recompile at this point.
2232           inc_recompile_count = maybe_prior_trap;
2233         }
2234       } else {
2235         // For reasons which are not recorded per-bytecode, we simply
2236         // force recompiles unconditionally.
2237         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
2238         make_not_entrant = true;
2239       }
2240 
2241       // Go back to the compiler if there are too many traps in this method.
2242       if (this_trap_count >= per_method_trap_limit(reason)) {
2243         // If there are too many traps in this method, force a recompile.
2244         // This will allow the compiler to see the limit overflow, and
2245         // take corrective action, if possible.
2246         // (This condition is an unlikely backstop only, because the
2247         // PerBytecodeTrapLimit is more likely to take effect first,
2248         // if it is applicable.)
2249         make_not_entrant = true;
2250       }
2251 
2252       // Here's more hysteresis:  If there has been a recompile at
2253       // this trap point already, run the method in the interpreter
2254       // for a while to exercise it more thoroughly.
2255       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
2256         reprofile = true;
2257       }
2258     }
2259 
2260     // Take requested actions on the method:
2261 
2262     // Recompile
2263     if (make_not_entrant) {
2264       if (!nm->make_not_entrant()) {
2265         return; // the call did not change nmethod's state
2266       }
2267 
2268       if (pdata != NULL) {
2269         // Record the recompilation event, if any.
2270         int tstate0 = pdata->trap_state();
2271         int tstate1 = trap_state_set_recompiled(tstate0, true);
2272         if (tstate1 != tstate0)
2273           pdata->set_trap_state(tstate1);
2274       }
2275 
2276 #if INCLUDE_RTM_OPT
2277       // Restart collecting RTM locking abort statistic if the method
2278       // is recompiled for a reason other than RTM state change.
2279       // Assume that in new recompiled code the statistic could be different,
2280       // for example, due to different inlining.
2281       if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
2282           UseRTMDeopt && (nm->as_nmethod()->rtm_state() != ProfileRTM)) {
2283         trap_mdo->atomic_set_rtm_state(ProfileRTM);
2284       }
2285 #endif
2286       // For code aging we count traps separately here, using make_not_entrant()
2287       // as a guard against simultaneous deopts in multiple threads.
2288       if (reason == Reason_tenured && trap_mdo != NULL) {
2289         trap_mdo->inc_tenure_traps();
2290       }
2291     }
2292 
2293     if (inc_recompile_count) {
2294       trap_mdo->inc_overflow_recompile_count();
2295       if ((uint)trap_mdo->overflow_recompile_count() >
2296           (uint)PerBytecodeRecompilationCutoff) {
2297         // Give up on the method containing the bad BCI.
2298         if (trap_method() == nm->method()) {
2299           make_not_compilable = true;
2300         } else {
2301           trap_method->set_not_compilable("overflow_recompile_count > PerBytecodeRecompilationCutoff", CompLevel_full_optimization);
2302           // But give grace to the enclosing nm->method().
2303         }
2304       }
2305     }
2306 
2307     // Reprofile
2308     if (reprofile) {
2309       CompilationPolicy::reprofile(trap_scope, nm->is_osr_method());
2310     }
2311 
2312     // Give up compiling
2313     if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
2314       assert(make_not_entrant, "consistent");
2315       nm->method()->set_not_compilable("give up compiling", CompLevel_full_optimization);
2316     }
2317 
2318   } // Free marked resources
2319 
2320 }
2321 JRT_END
2322 
2323 ProfileData*
2324 Deoptimization::query_update_method_data(MethodData* trap_mdo,
2325                                          int trap_bci,
2326                                          Deoptimization::DeoptReason reason,
2327                                          bool update_total_trap_count,
2328 #if INCLUDE_JVMCI
2329                                          bool is_osr,
2330 #endif
2331                                          Method* compiled_method,
2332                                          //outputs:
2333                                          uint& ret_this_trap_count,
2334                                          bool& ret_maybe_prior_trap,
2335                                          bool& ret_maybe_prior_recompile) {
2336   bool maybe_prior_trap = false;
2337   bool maybe_prior_recompile = false;
2338   uint this_trap_count = 0;
2339   if (update_total_trap_count) {
2340     uint idx = reason;
2341 #if INCLUDE_JVMCI
2342     if (is_osr) {
2343       // Upper half of history array used for traps in OSR compilations
2344       idx += Reason_TRAP_HISTORY_LENGTH;
2345     }
2346 #endif
2347     uint prior_trap_count = trap_mdo->trap_count(idx);
2348     this_trap_count  = trap_mdo->inc_trap_count(idx);
2349 
2350     // If the runtime cannot find a place to store trap history,
2351     // it is estimated based on the general condition of the method.
2352     // If the method has ever been recompiled, or has ever incurred
2353     // a trap with the present reason , then this BCI is assumed
2354     // (pessimistically) to be the culprit.
2355     maybe_prior_trap      = (prior_trap_count != 0);
2356     maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
2357   }
2358   ProfileData* pdata = NULL;
2359 
2360 
2361   // For reasons which are recorded per bytecode, we check per-BCI data.
2362   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
2363   assert(per_bc_reason != Reason_none || update_total_trap_count, "must be");
2364   if (per_bc_reason != Reason_none) {
2365     // Find the profile data for this BCI.  If there isn't one,
2366     // try to allocate one from the MDO's set of spares.
2367     // This will let us detect a repeated trap at this point.
2368     pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
2369 
2370     if (pdata != NULL) {
2371       if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
2372         if (LogCompilation && xtty != NULL) {
2373           ttyLocker ttyl;
2374           // no more room for speculative traps in this MDO
2375           xtty->elem("speculative_traps_oom");
2376         }
2377       }
2378       // Query the trap state of this profile datum.
2379       int tstate0 = pdata->trap_state();
2380       if (!trap_state_has_reason(tstate0, per_bc_reason))
2381         maybe_prior_trap = false;
2382       if (!trap_state_is_recompiled(tstate0))
2383         maybe_prior_recompile = false;
2384 
2385       // Update the trap state of this profile datum.
2386       int tstate1 = tstate0;
2387       // Record the reason.
2388       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
2389       // Store the updated state on the MDO, for next time.
2390       if (tstate1 != tstate0)
2391         pdata->set_trap_state(tstate1);
2392     } else {
2393       if (LogCompilation && xtty != NULL) {
2394         ttyLocker ttyl;
2395         // Missing MDP?  Leave a small complaint in the log.
2396         xtty->elem("missing_mdp bci='%d'", trap_bci);
2397       }
2398     }
2399   }
2400 
2401   // Return results:
2402   ret_this_trap_count = this_trap_count;
2403   ret_maybe_prior_trap = maybe_prior_trap;
2404   ret_maybe_prior_recompile = maybe_prior_recompile;
2405   return pdata;
2406 }
2407 
2408 void
2409 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2410   ResourceMark rm;
2411   // Ignored outputs:
2412   uint ignore_this_trap_count;
2413   bool ignore_maybe_prior_trap;
2414   bool ignore_maybe_prior_recompile;
2415   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2416   // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2417   bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
2418   query_update_method_data(trap_mdo, trap_bci,
2419                            (DeoptReason)reason,
2420                            update_total_counts,
2421 #if INCLUDE_JVMCI
2422                            false,
2423 #endif
2424                            NULL,
2425                            ignore_this_trap_count,
2426                            ignore_maybe_prior_trap,
2427                            ignore_maybe_prior_recompile);
2428 }
2429 
2430 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* current, jint trap_request, jint exec_mode) {
2431   // Enable WXWrite: current function is called from methods compiled by C2 directly
2432   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
2433 
2434   // Still in Java no safepoints
2435   {
2436     // This enters VM and may safepoint
2437     uncommon_trap_inner(current, trap_request);
2438   }
2439   HandleMark hm(current);
2440   return fetch_unroll_info_helper(current, exec_mode);
2441 }
2442 
2443 // Local derived constants.
2444 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2445 const int DS_REASON_MASK   = ((uint)DataLayout::trap_mask) >> 1;
2446 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2447 
2448 //---------------------------trap_state_reason---------------------------------
2449 Deoptimization::DeoptReason
2450 Deoptimization::trap_state_reason(int trap_state) {
2451   // This assert provides the link between the width of DataLayout::trap_bits
2452   // and the encoding of "recorded" reasons.  It ensures there are enough
2453   // bits to store all needed reasons in the per-BCI MDO profile.
2454   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2455   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2456   trap_state -= recompile_bit;
2457   if (trap_state == DS_REASON_MASK) {
2458     return Reason_many;
2459   } else {
2460     assert((int)Reason_none == 0, "state=0 => Reason_none");
2461     return (DeoptReason)trap_state;
2462   }
2463 }
2464 //-------------------------trap_state_has_reason-------------------------------
2465 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2466   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
2467   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2468   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2469   trap_state -= recompile_bit;
2470   if (trap_state == DS_REASON_MASK) {
2471     return -1;  // true, unspecifically (bottom of state lattice)
2472   } else if (trap_state == reason) {
2473     return 1;   // true, definitely
2474   } else if (trap_state == 0) {
2475     return 0;   // false, definitely (top of state lattice)
2476   } else {
2477     return 0;   // false, definitely
2478   }
2479 }
2480 //-------------------------trap_state_add_reason-------------------------------
2481 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
2482   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
2483   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2484   trap_state -= recompile_bit;
2485   if (trap_state == DS_REASON_MASK) {
2486     return trap_state + recompile_bit;     // already at state lattice bottom
2487   } else if (trap_state == reason) {
2488     return trap_state + recompile_bit;     // the condition is already true
2489   } else if (trap_state == 0) {
2490     return reason + recompile_bit;          // no condition has yet been true
2491   } else {
2492     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
2493   }
2494 }
2495 //-----------------------trap_state_is_recompiled------------------------------
2496 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2497   return (trap_state & DS_RECOMPILE_BIT) != 0;
2498 }
2499 //-----------------------trap_state_set_recompiled-----------------------------
2500 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
2501   if (z)  return trap_state |  DS_RECOMPILE_BIT;
2502   else    return trap_state & ~DS_RECOMPILE_BIT;
2503 }
2504 //---------------------------format_trap_state---------------------------------
2505 // This is used for debugging and diagnostics, including LogFile output.
2506 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2507                                               int trap_state) {
2508   assert(buflen > 0, "sanity");
2509   DeoptReason reason      = trap_state_reason(trap_state);
2510   bool        recomp_flag = trap_state_is_recompiled(trap_state);
2511   // Re-encode the state from its decoded components.
2512   int decoded_state = 0;
2513   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
2514     decoded_state = trap_state_add_reason(decoded_state, reason);
2515   if (recomp_flag)
2516     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
2517   // If the state re-encodes properly, format it symbolically.
2518   // Because this routine is used for debugging and diagnostics,
2519   // be robust even if the state is a strange value.
2520   size_t len;
2521   if (decoded_state != trap_state) {
2522     // Random buggy state that doesn't decode??
2523     len = jio_snprintf(buf, buflen, "#%d", trap_state);
2524   } else {
2525     len = jio_snprintf(buf, buflen, "%s%s",
2526                        trap_reason_name(reason),
2527                        recomp_flag ? " recompiled" : "");
2528   }
2529   return buf;
2530 }
2531 
2532 
2533 //--------------------------------statics--------------------------------------
2534 const char* Deoptimization::_trap_reason_name[] = {
2535   // Note:  Keep this in sync. with enum DeoptReason.
2536   "none",
2537   "null_check",
2538   "null_assert" JVMCI_ONLY("_or_unreached0"),
2539   "range_check",
2540   "class_check",
2541   "array_check",
2542   "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"),
2543   "bimorphic" JVMCI_ONLY("_or_optimized_type_check"),
2544   "profile_predicate",
2545   "unloaded",
2546   "uninitialized",
2547   "initialized",
2548   "unreached",
2549   "unhandled",
2550   "constraint",
2551   "div0_check",
2552   "age",
2553   "predicate",
2554   "loop_limit_check",
2555   "speculate_class_check",
2556   "speculate_null_check",
2557   "speculate_null_assert",
2558   "rtm_state_change",
2559   "unstable_if",
2560   "unstable_fused_if",
2561   "receiver_constraint",
2562 #if INCLUDE_JVMCI
2563   "aliasing",
2564   "transfer_to_interpreter",
2565   "not_compiled_exception_handler",
2566   "unresolved",
2567   "jsr_mismatch",
2568 #endif
2569   "tenured"
2570 };
2571 const char* Deoptimization::_trap_action_name[] = {
2572   // Note:  Keep this in sync. with enum DeoptAction.
2573   "none",
2574   "maybe_recompile",
2575   "reinterpret",
2576   "make_not_entrant",
2577   "make_not_compilable"
2578 };
2579 
2580 const char* Deoptimization::trap_reason_name(int reason) {
2581   // Check that every reason has a name
2582   STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT);
2583 
2584   if (reason == Reason_many)  return "many";
2585   if ((uint)reason < Reason_LIMIT)
2586     return _trap_reason_name[reason];
2587   static char buf[20];
2588   sprintf(buf, "reason%d", reason);
2589   return buf;
2590 }
2591 const char* Deoptimization::trap_action_name(int action) {
2592   // Check that every action has a name
2593   STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT);
2594 
2595   if ((uint)action < Action_LIMIT)
2596     return _trap_action_name[action];
2597   static char buf[20];
2598   sprintf(buf, "action%d", action);
2599   return buf;
2600 }
2601 
2602 // This is used for debugging and diagnostics, including LogFile output.
2603 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
2604                                                 int trap_request) {
2605   jint unloaded_class_index = trap_request_index(trap_request);
2606   const char* reason = trap_reason_name(trap_request_reason(trap_request));
2607   const char* action = trap_action_name(trap_request_action(trap_request));
2608 #if INCLUDE_JVMCI
2609   int debug_id = trap_request_debug_id(trap_request);
2610 #endif
2611   size_t len;
2612   if (unloaded_class_index < 0) {
2613     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2614                        reason, action
2615 #if INCLUDE_JVMCI
2616                        ,debug_id
2617 #endif
2618                        );
2619   } else {
2620     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2621                        reason, action, unloaded_class_index
2622 #if INCLUDE_JVMCI
2623                        ,debug_id
2624 #endif
2625                        );
2626   }
2627   return buf;
2628 }
2629 
2630 juint Deoptimization::_deoptimization_hist
2631         [Deoptimization::Reason_LIMIT]
2632     [1 + Deoptimization::Action_LIMIT]
2633         [Deoptimization::BC_CASE_LIMIT]
2634   = {0};
2635 
2636 enum {
2637   LSB_BITS = 8,
2638   LSB_MASK = right_n_bits(LSB_BITS)
2639 };
2640 
2641 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2642                                        Bytecodes::Code bc) {
2643   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2644   assert(action >= 0 && action < Action_LIMIT, "oob");
2645   _deoptimization_hist[Reason_none][0][0] += 1;  // total
2646   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
2647   juint* cases = _deoptimization_hist[reason][1+action];
2648   juint* bc_counter_addr = NULL;
2649   juint  bc_counter      = 0;
2650   // Look for an unused counter, or an exact match to this BC.
2651   if (bc != Bytecodes::_illegal) {
2652     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2653       juint* counter_addr = &cases[bc_case];
2654       juint  counter = *counter_addr;
2655       if ((counter == 0 && bc_counter_addr == NULL)
2656           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2657         // this counter is either free or is already devoted to this BC
2658         bc_counter_addr = counter_addr;
2659         bc_counter = counter | bc;
2660       }
2661     }
2662   }
2663   if (bc_counter_addr == NULL) {
2664     // Overflow, or no given bytecode.
2665     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
2666     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
2667   }
2668   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2669 }
2670 
2671 jint Deoptimization::total_deoptimization_count() {
2672   return _deoptimization_hist[Reason_none][0][0];
2673 }
2674 
2675 // Get the deopt count for a specific reason and a specific action. If either
2676 // one of 'reason' or 'action' is null, the method returns the sum of all
2677 // deoptimizations with the specific 'action' or 'reason' respectively.
2678 // If both arguments are null, the method returns the total deopt count.
2679 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
2680   if (reason_str == NULL && action_str == NULL) {
2681     return total_deoptimization_count();
2682   }
2683   juint counter = 0;
2684   for (int reason = 0; reason < Reason_LIMIT; reason++) {
2685     if (reason_str == NULL || !strcmp(reason_str, trap_reason_name(reason))) {
2686       for (int action = 0; action < Action_LIMIT; action++) {
2687         if (action_str == NULL || !strcmp(action_str, trap_action_name(action))) {
2688           juint* cases = _deoptimization_hist[reason][1+action];
2689           for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2690             counter += cases[bc_case] >> LSB_BITS;
2691           }
2692         }
2693       }
2694     }
2695   }
2696   return counter;
2697 }
2698 
2699 void Deoptimization::print_statistics() {
2700   juint total = total_deoptimization_count();
2701   juint account = total;
2702   if (total != 0) {
2703     ttyLocker ttyl;
2704     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
2705     tty->print_cr("Deoptimization traps recorded:");
2706     #define PRINT_STAT_LINE(name, r) \
2707       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
2708     PRINT_STAT_LINE("total", total);
2709     // For each non-zero entry in the histogram, print the reason,
2710     // the action, and (if specifically known) the type of bytecode.
2711     for (int reason = 0; reason < Reason_LIMIT; reason++) {
2712       for (int action = 0; action < Action_LIMIT; action++) {
2713         juint* cases = _deoptimization_hist[reason][1+action];
2714         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2715           juint counter = cases[bc_case];
2716           if (counter != 0) {
2717             char name[1*K];
2718             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2719             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
2720               bc = Bytecodes::_illegal;
2721             sprintf(name, "%s/%s/%s",
2722                     trap_reason_name(reason),
2723                     trap_action_name(action),
2724                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
2725             juint r = counter >> LSB_BITS;
2726             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
2727             account -= r;
2728           }
2729         }
2730       }
2731     }
2732     if (account != 0) {
2733       PRINT_STAT_LINE("unaccounted", account);
2734     }
2735     #undef PRINT_STAT_LINE
2736     if (xtty != NULL)  xtty->tail("statistics");
2737   }
2738 }
2739 
2740 #else // COMPILER2_OR_JVMCI
2741 
2742 
2743 // Stubs for C1 only system.
2744 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2745   return false;
2746 }
2747 
2748 const char* Deoptimization::trap_reason_name(int reason) {
2749   return "unknown";
2750 }
2751 
2752 jint Deoptimization::total_deoptimization_count() {
2753   return 0;
2754 }
2755 
2756 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
2757   return 0;
2758 }
2759 
2760 void Deoptimization::print_statistics() {
2761   // no output
2762 }
2763 
2764 void
2765 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2766   // no update
2767 }
2768 
2769 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2770   return 0;
2771 }
2772 
2773 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2774                                        Bytecodes::Code bc) {
2775   // no update
2776 }
2777 
2778 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2779                                               int trap_state) {
2780   jio_snprintf(buf, buflen, "#%d", trap_state);
2781   return buf;
2782 }
2783 
2784 #endif // COMPILER2_OR_JVMCI