1 /*
   2  * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/vmClasses.hpp"
  29 #include "classfile/vmSymbols.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "code/compiledIC.hpp"
  32 #include "code/icBuffer.hpp"
  33 #include "code/compiledMethod.inline.hpp"
  34 #include "code/scopeDesc.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/abstractCompiler.hpp"
  37 #include "compiler/compileBroker.hpp"
  38 #include "compiler/disassembler.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/collectedHeap.hpp"
  41 #include "gc/shared/gcLocker.inline.hpp"
  42 #include "interpreter/interpreter.hpp"
  43 #include "interpreter/interpreterRuntime.hpp"
  44 #include "jvm.h"
  45 #include "jfr/jfrEvents.hpp"
  46 #include "logging/log.hpp"
  47 #include "memory/oopFactory.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/access.hpp"
  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "metaprogramming/primitiveConversions.hpp"
  53 #include "oops/compiledICHolder.inline.hpp"
  54 #include "oops/klass.hpp"
  55 #include "oops/method.inline.hpp"
  56 #include "oops/objArrayKlass.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "oops/inlineKlass.inline.hpp"
  60 #include "prims/forte.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "prims/jvmtiThreadState.hpp"
  63 #include "prims/methodHandles.hpp"
  64 #include "prims/nativeLookup.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/frame.inline.hpp"
  67 #include "runtime/handles.inline.hpp"
  68 #include "runtime/init.hpp"
  69 #include "runtime/interfaceSupport.inline.hpp"
  70 #include "runtime/java.hpp"
  71 #include "runtime/javaCalls.hpp"
  72 #include "runtime/jniHandles.inline.hpp"
  73 #include "runtime/sharedRuntime.hpp"
  74 #include "runtime/stackWatermarkSet.hpp"
  75 #include "runtime/stubRoutines.hpp"
  76 #include "runtime/synchronizer.hpp"
  77 #include "runtime/vframe.inline.hpp"
  78 #include "runtime/vframeArray.hpp"
  79 #include "runtime/vm_version.hpp"
  80 #include "utilities/copy.hpp"
  81 #include "utilities/dtrace.hpp"
  82 #include "utilities/events.hpp"
  83 #include "utilities/resourceHash.hpp"
  84 #include "utilities/macros.hpp"
  85 #include "utilities/xmlstream.hpp"
  86 #ifdef COMPILER1
  87 #include "c1/c1_Runtime1.hpp"
  88 #endif
  89 
  90 // Shared stub locations
  91 RuntimeStub*        SharedRuntime::_wrong_method_blob;
  92 RuntimeStub*        SharedRuntime::_wrong_method_abstract_blob;
  93 RuntimeStub*        SharedRuntime::_ic_miss_blob;
  94 RuntimeStub*        SharedRuntime::_resolve_opt_virtual_call_blob;
  95 RuntimeStub*        SharedRuntime::_resolve_virtual_call_blob;
  96 RuntimeStub*        SharedRuntime::_resolve_static_call_blob;
  97 
  98 DeoptimizationBlob* SharedRuntime::_deopt_blob;
  99 SafepointBlob*      SharedRuntime::_polling_page_vectors_safepoint_handler_blob;
 100 SafepointBlob*      SharedRuntime::_polling_page_safepoint_handler_blob;
 101 SafepointBlob*      SharedRuntime::_polling_page_return_handler_blob;
 102 
 103 #ifdef COMPILER2
 104 UncommonTrapBlob*   SharedRuntime::_uncommon_trap_blob;
 105 #endif // COMPILER2
 106 
 107 nmethod*            SharedRuntime::_cont_doYield_stub;
 108 
 109 //----------------------------generate_stubs-----------------------------------
 110 void SharedRuntime::generate_stubs() {
 111   _wrong_method_blob                   = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),          "wrong_method_stub");
 112   _wrong_method_abstract_blob          = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract), "wrong_method_abstract_stub");
 113   _ic_miss_blob                        = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),  "ic_miss_stub");
 114   _resolve_opt_virtual_call_blob       = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),   "resolve_opt_virtual_call");
 115   _resolve_virtual_call_blob           = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),       "resolve_virtual_call");
 116   _resolve_static_call_blob            = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),        "resolve_static_call");
 117 
 118   AdapterHandlerLibrary::initialize();
 119 
 120 #if COMPILER2_OR_JVMCI
 121   // Vectors are generated only by C2 and JVMCI.
 122   bool support_wide = is_wide_vector(MaxVectorSize);
 123   if (support_wide) {
 124     _polling_page_vectors_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_VECTOR_LOOP);
 125   }
 126 #endif // COMPILER2_OR_JVMCI
 127   _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_LOOP);
 128   _polling_page_return_handler_blob    = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), POLL_AT_RETURN);
 129 
 130   generate_deopt_blob();
 131 
 132 #ifdef COMPILER2
 133   generate_uncommon_trap_blob();
 134 #endif // COMPILER2
 135 }
 136 
 137 #include <math.h>
 138 
 139 // Implementation of SharedRuntime
 140 
 141 #ifndef PRODUCT
 142 // For statistics
 143 int SharedRuntime::_ic_miss_ctr = 0;
 144 int SharedRuntime::_wrong_method_ctr = 0;
 145 int SharedRuntime::_resolve_static_ctr = 0;
 146 int SharedRuntime::_resolve_virtual_ctr = 0;
 147 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
 148 int SharedRuntime::_implicit_null_throws = 0;
 149 int SharedRuntime::_implicit_div0_throws = 0;
 150 
 151 int64_t SharedRuntime::_nof_normal_calls = 0;
 152 int64_t SharedRuntime::_nof_inlined_calls = 0;
 153 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
 154 int64_t SharedRuntime::_nof_static_calls = 0;
 155 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
 156 int64_t SharedRuntime::_nof_interface_calls = 0;
 157 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
 158 
 159 int SharedRuntime::_new_instance_ctr=0;
 160 int SharedRuntime::_new_array_ctr=0;
 161 int SharedRuntime::_multi2_ctr=0;
 162 int SharedRuntime::_multi3_ctr=0;
 163 int SharedRuntime::_multi4_ctr=0;
 164 int SharedRuntime::_multi5_ctr=0;
 165 int SharedRuntime::_mon_enter_stub_ctr=0;
 166 int SharedRuntime::_mon_exit_stub_ctr=0;
 167 int SharedRuntime::_mon_enter_ctr=0;
 168 int SharedRuntime::_mon_exit_ctr=0;
 169 int SharedRuntime::_partial_subtype_ctr=0;
 170 int SharedRuntime::_jbyte_array_copy_ctr=0;
 171 int SharedRuntime::_jshort_array_copy_ctr=0;
 172 int SharedRuntime::_jint_array_copy_ctr=0;
 173 int SharedRuntime::_jlong_array_copy_ctr=0;
 174 int SharedRuntime::_oop_array_copy_ctr=0;
 175 int SharedRuntime::_checkcast_array_copy_ctr=0;
 176 int SharedRuntime::_unsafe_array_copy_ctr=0;
 177 int SharedRuntime::_generic_array_copy_ctr=0;
 178 int SharedRuntime::_slow_array_copy_ctr=0;
 179 int SharedRuntime::_find_handler_ctr=0;
 180 int SharedRuntime::_rethrow_ctr=0;
 181 
 182 int     SharedRuntime::_ICmiss_index                    = 0;
 183 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 184 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 185 
 186 
 187 void SharedRuntime::trace_ic_miss(address at) {
 188   for (int i = 0; i < _ICmiss_index; i++) {
 189     if (_ICmiss_at[i] == at) {
 190       _ICmiss_count[i]++;
 191       return;
 192     }
 193   }
 194   int index = _ICmiss_index++;
 195   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
 196   _ICmiss_at[index] = at;
 197   _ICmiss_count[index] = 1;
 198 }
 199 
 200 void SharedRuntime::print_ic_miss_histogram() {
 201   if (ICMissHistogram) {
 202     tty->print_cr("IC Miss Histogram:");
 203     int tot_misses = 0;
 204     for (int i = 0; i < _ICmiss_index; i++) {
 205       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
 206       tot_misses += _ICmiss_count[i];
 207     }
 208     tty->print_cr("Total IC misses: %7d", tot_misses);
 209   }
 210 }
 211 #endif // PRODUCT
 212 
 213 
 214 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
 215   return x * y;
 216 JRT_END
 217 
 218 
 219 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
 220   if (x == min_jlong && y == CONST64(-1)) {
 221     return x;
 222   } else {
 223     return x / y;
 224   }
 225 JRT_END
 226 
 227 
 228 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
 229   if (x == min_jlong && y == CONST64(-1)) {
 230     return 0;
 231   } else {
 232     return x % y;
 233   }
 234 JRT_END
 235 
 236 
 237 #ifdef _WIN64
 238 const juint  float_sign_mask  = 0x7FFFFFFF;
 239 const juint  float_infinity   = 0x7F800000;
 240 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
 241 const julong double_infinity  = CONST64(0x7FF0000000000000);
 242 #endif
 243 
 244 #if !defined(X86) || !defined(TARGET_COMPILER_gcc) || defined(_WIN64)
 245 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
 246 #ifdef _WIN64
 247   // 64-bit Windows on amd64 returns the wrong values for
 248   // infinity operands.
 249   juint xbits = PrimitiveConversions::cast<juint>(x);
 250   juint ybits = PrimitiveConversions::cast<juint>(y);
 251   // x Mod Infinity == x unless x is infinity
 252   if (((xbits & float_sign_mask) != float_infinity) &&
 253        ((ybits & float_sign_mask) == float_infinity) ) {
 254     return x;
 255   }
 256   return ((jfloat)fmod_winx64((double)x, (double)y));
 257 #else
 258   return ((jfloat)fmod((double)x,(double)y));
 259 #endif
 260 JRT_END
 261 
 262 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
 263 #ifdef _WIN64
 264   julong xbits = PrimitiveConversions::cast<julong>(x);
 265   julong ybits = PrimitiveConversions::cast<julong>(y);
 266   // x Mod Infinity == x unless x is infinity
 267   if (((xbits & double_sign_mask) != double_infinity) &&
 268        ((ybits & double_sign_mask) == double_infinity) ) {
 269     return x;
 270   }
 271   return ((jdouble)fmod_winx64((double)x, (double)y));
 272 #else
 273   return ((jdouble)fmod((double)x,(double)y));
 274 #endif
 275 JRT_END
 276 #endif // !X86 || !TARGET_COMPILER_gcc || _WIN64
 277 
 278 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
 279   return (jfloat)x;
 280 JRT_END
 281 
 282 #ifdef __SOFTFP__
 283 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
 284   return x + y;
 285 JRT_END
 286 
 287 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
 288   return x - y;
 289 JRT_END
 290 
 291 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
 292   return x * y;
 293 JRT_END
 294 
 295 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
 296   return x / y;
 297 JRT_END
 298 
 299 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
 300   return x + y;
 301 JRT_END
 302 
 303 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
 304   return x - y;
 305 JRT_END
 306 
 307 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
 308   return x * y;
 309 JRT_END
 310 
 311 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
 312   return x / y;
 313 JRT_END
 314 
 315 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
 316   return (jdouble)x;
 317 JRT_END
 318 
 319 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
 320   return (jdouble)x;
 321 JRT_END
 322 
 323 JRT_LEAF(int,  SharedRuntime::fcmpl(float x, float y))
 324   return x>y ? 1 : (x==y ? 0 : -1);  /* x<y or is_nan*/
 325 JRT_END
 326 
 327 JRT_LEAF(int,  SharedRuntime::fcmpg(float x, float y))
 328   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 329 JRT_END
 330 
 331 JRT_LEAF(int,  SharedRuntime::dcmpl(double x, double y))
 332   return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
 333 JRT_END
 334 
 335 JRT_LEAF(int,  SharedRuntime::dcmpg(double x, double y))
 336   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 337 JRT_END
 338 
 339 // Functions to return the opposite of the aeabi functions for nan.
 340 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
 341   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 342 JRT_END
 343 
 344 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
 345   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 346 JRT_END
 347 
 348 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
 349   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 350 JRT_END
 351 
 352 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
 353   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 354 JRT_END
 355 
 356 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
 357   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 358 JRT_END
 359 
 360 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
 361   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 362 JRT_END
 363 
 364 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
 365   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 366 JRT_END
 367 
 368 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
 369   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 370 JRT_END
 371 
 372 // Intrinsics make gcc generate code for these.
 373 float  SharedRuntime::fneg(float f)   {
 374   return -f;
 375 }
 376 
 377 double SharedRuntime::dneg(double f)  {
 378   return -f;
 379 }
 380 
 381 #endif // __SOFTFP__
 382 
 383 #if defined(__SOFTFP__) || defined(E500V2)
 384 // Intrinsics make gcc generate code for these.
 385 double SharedRuntime::dabs(double f)  {
 386   return (f <= (double)0.0) ? (double)0.0 - f : f;
 387 }
 388 
 389 #endif
 390 
 391 #if defined(__SOFTFP__) || defined(PPC)
 392 double SharedRuntime::dsqrt(double f) {
 393   return sqrt(f);
 394 }
 395 #endif
 396 
 397 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
 398   if (g_isnan(x))
 399     return 0;
 400   if (x >= (jfloat) max_jint)
 401     return max_jint;
 402   if (x <= (jfloat) min_jint)
 403     return min_jint;
 404   return (jint) x;
 405 JRT_END
 406 
 407 
 408 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
 409   if (g_isnan(x))
 410     return 0;
 411   if (x >= (jfloat) max_jlong)
 412     return max_jlong;
 413   if (x <= (jfloat) min_jlong)
 414     return min_jlong;
 415   return (jlong) x;
 416 JRT_END
 417 
 418 
 419 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
 420   if (g_isnan(x))
 421     return 0;
 422   if (x >= (jdouble) max_jint)
 423     return max_jint;
 424   if (x <= (jdouble) min_jint)
 425     return min_jint;
 426   return (jint) x;
 427 JRT_END
 428 
 429 
 430 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
 431   if (g_isnan(x))
 432     return 0;
 433   if (x >= (jdouble) max_jlong)
 434     return max_jlong;
 435   if (x <= (jdouble) min_jlong)
 436     return min_jlong;
 437   return (jlong) x;
 438 JRT_END
 439 
 440 
 441 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
 442   return (jfloat)x;
 443 JRT_END
 444 
 445 
 446 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
 447   return (jfloat)x;
 448 JRT_END
 449 
 450 
 451 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
 452   return (jdouble)x;
 453 JRT_END
 454 
 455 
 456 // Exception handling across interpreter/compiler boundaries
 457 //
 458 // exception_handler_for_return_address(...) returns the continuation address.
 459 // The continuation address is the entry point of the exception handler of the
 460 // previous frame depending on the return address.
 461 
 462 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
 463   // Note: This is called when we have unwound the frame of the callee that did
 464   // throw an exception. So far, no check has been performed by the StackWatermarkSet.
 465   // Notably, the stack is not walkable at this point, and hence the check must
 466   // be deferred until later. Specifically, any of the handlers returned here in
 467   // this function, will get dispatched to, and call deferred checks to
 468   // StackWatermarkSet::after_unwind at a point where the stack is walkable.
 469   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 470   assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 471 
 472   // Reset method handle flag.
 473   current->set_is_method_handle_return(false);
 474 
 475 #if INCLUDE_JVMCI
 476   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 477   // and other exception handler continuations do not read it
 478   current->set_exception_pc(nullptr);
 479 #endif // INCLUDE_JVMCI
 480 
 481   if (Continuation::is_return_barrier_entry(return_address)) {
 482     return StubRoutines::cont_returnBarrierExc();
 483   }
 484 
 485   // The fastest case first
 486   CodeBlob* blob = CodeCache::find_blob(return_address);
 487   CompiledMethod* nm = (blob != nullptr) ? blob->as_compiled_method_or_null() : nullptr;
 488   if (nm != nullptr) {
 489     // Set flag if return address is a method handle call site.
 490     current->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 491     // native nmethods don't have exception handlers
 492     assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
 493     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 494     if (nm->is_deopt_pc(return_address)) {
 495       // If we come here because of a stack overflow, the stack may be
 496       // unguarded. Reguard the stack otherwise if we return to the
 497       // deopt blob and the stack bang causes a stack overflow we
 498       // crash.
 499       StackOverflow* overflow_state = current->stack_overflow_state();
 500       bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
 501       if (overflow_state->reserved_stack_activation() != current->stack_base()) {
 502         overflow_state->set_reserved_stack_activation(current->stack_base());
 503       }
 504       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 505       // The deferred StackWatermarkSet::after_unwind check will be performed in
 506       // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
 507       return SharedRuntime::deopt_blob()->unpack_with_exception();
 508     } else {
 509       // The deferred StackWatermarkSet::after_unwind check will be performed in
 510       // * OptoRuntime::handle_exception_C_helper for C2 code
 511       // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
 512       return nm->exception_begin();
 513     }
 514   }
 515 
 516   // Entry code
 517   if (StubRoutines::returns_to_call_stub(return_address)) {
 518     // The deferred StackWatermarkSet::after_unwind check will be performed in
 519     // JavaCallWrapper::~JavaCallWrapper
 520     return StubRoutines::catch_exception_entry();
 521   }
 522   if (blob != nullptr && blob->is_upcall_stub()) {
 523     return ((UpcallStub*)blob)->exception_handler();
 524   }
 525   // Interpreted code
 526   if (Interpreter::contains(return_address)) {
 527     // The deferred StackWatermarkSet::after_unwind check will be performed in
 528     // InterpreterRuntime::exception_handler_for_exception
 529     return Interpreter::rethrow_exception_entry();
 530   }
 531 
 532   guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
 533   guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
 534 
 535 #ifndef PRODUCT
 536   { ResourceMark rm;
 537     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 538     os::print_location(tty, (intptr_t)return_address);
 539     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 540     tty->print_cr("b) other problem");
 541   }
 542 #endif // PRODUCT
 543 
 544   ShouldNotReachHere();
 545   return nullptr;
 546 }
 547 
 548 
 549 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
 550   return raw_exception_handler_for_return_address(current, return_address);
 551 JRT_END
 552 
 553 
 554 address SharedRuntime::get_poll_stub(address pc) {
 555   address stub;
 556   // Look up the code blob
 557   CodeBlob *cb = CodeCache::find_blob(pc);
 558 
 559   // Should be an nmethod
 560   guarantee(cb != nullptr && cb->is_compiled(), "safepoint polling: pc must refer to an nmethod");
 561 
 562   // Look up the relocation information
 563   assert(((CompiledMethod*)cb)->is_at_poll_or_poll_return(pc),
 564       "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
 565 
 566 #ifdef ASSERT
 567   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 568     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 569     Disassembler::decode(cb);
 570     fatal("Only polling locations are used for safepoint");
 571   }
 572 #endif
 573 
 574   bool at_poll_return = ((CompiledMethod*)cb)->is_at_poll_return(pc);
 575   bool has_wide_vectors = ((CompiledMethod*)cb)->has_wide_vectors();
 576   if (at_poll_return) {
 577     assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
 578            "polling page return stub not created yet");
 579     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 580   } else if (has_wide_vectors) {
 581     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
 582            "polling page vectors safepoint stub not created yet");
 583     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 584   } else {
 585     assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
 586            "polling page safepoint stub not created yet");
 587     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
 588   }
 589   log_debug(safepoint)("... found polling page %s exception at pc = "
 590                        INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
 591                        at_poll_return ? "return" : "loop",
 592                        (intptr_t)pc, (intptr_t)stub);
 593   return stub;
 594 }
 595 
 596 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
 597   if (JvmtiExport::can_post_on_exceptions()) {
 598     vframeStream vfst(current, true);
 599     methodHandle method = methodHandle(current, vfst.method());
 600     address bcp = method()->bcp_from(vfst.bci());
 601     JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
 602   }
 603 
 604 #if INCLUDE_JVMCI
 605   if (EnableJVMCI && UseJVMCICompiler) {
 606     vframeStream vfst(current, true);
 607     methodHandle method = methodHandle(current, vfst.method());
 608     int bci = vfst.bci();
 609     MethodData* trap_mdo = method->method_data();
 610     if (trap_mdo != nullptr) {
 611       // Set exception_seen if the exceptional bytecode is an invoke
 612       Bytecode_invoke call = Bytecode_invoke_check(method, bci);
 613       if (call.is_valid()) {
 614         ResourceMark rm(current);
 615         ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
 616         if (pdata != nullptr && pdata->is_BitData()) {
 617           BitData* bit_data = (BitData*) pdata;
 618           bit_data->set_exception_seen();
 619         }
 620       }
 621     }
 622   }
 623 #endif
 624 
 625   Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
 626 }
 627 
 628 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
 629   Handle h_exception = Exceptions::new_exception(current, name, message);
 630   throw_and_post_jvmti_exception(current, h_exception);
 631 }
 632 
 633 #if INCLUDE_JVMTI
 634 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current))
 635   assert(hide == JNI_FALSE, "must be VTMS transition finish");
 636   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 637   JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread);
 638   JNIHandles::destroy_local(vthread);
 639 JRT_END
 640 
 641 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current))
 642   assert(hide == JNI_TRUE, "must be VTMS transition start");
 643   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 644   JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread);
 645   JNIHandles::destroy_local(vthread);
 646 JRT_END
 647 
 648 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current))
 649   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 650   JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide);
 651   JNIHandles::destroy_local(vthread);
 652 JRT_END
 653 
 654 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current))
 655   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 656   JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
 657   JNIHandles::destroy_local(vthread);
 658 JRT_END
 659 #endif // INCLUDE_JVMTI
 660 
 661 // The interpreter code to call this tracing function is only
 662 // called/generated when UL is on for redefine, class and has the right level
 663 // and tags. Since obsolete methods are never compiled, we don't have
 664 // to modify the compilers to generate calls to this function.
 665 //
 666 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
 667     JavaThread* thread, Method* method))
 668   if (method->is_obsolete()) {
 669     // We are calling an obsolete method, but this is not necessarily
 670     // an error. Our method could have been redefined just after we
 671     // fetched the Method* from the constant pool.
 672     ResourceMark rm;
 673     log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
 674   }
 675   return 0;
 676 JRT_END
 677 
 678 // ret_pc points into caller; we are returning caller's exception handler
 679 // for given exception
 680 address SharedRuntime::compute_compiled_exc_handler(CompiledMethod* cm, address ret_pc, Handle& exception,
 681                                                     bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
 682   assert(cm != nullptr, "must exist");
 683   ResourceMark rm;
 684 
 685 #if INCLUDE_JVMCI
 686   if (cm->is_compiled_by_jvmci()) {
 687     // lookup exception handler for this pc
 688     int catch_pco = ret_pc - cm->code_begin();
 689     ExceptionHandlerTable table(cm);
 690     HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
 691     if (t != nullptr) {
 692       return cm->code_begin() + t->pco();
 693     } else {
 694       return Deoptimization::deoptimize_for_missing_exception_handler(cm);
 695     }
 696   }
 697 #endif // INCLUDE_JVMCI
 698 
 699   nmethod* nm = cm->as_nmethod();
 700   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
 701   // determine handler bci, if any
 702   EXCEPTION_MARK;
 703 
 704   int handler_bci = -1;
 705   int scope_depth = 0;
 706   if (!force_unwind) {
 707     int bci = sd->bci();
 708     bool recursive_exception = false;
 709     do {
 710       bool skip_scope_increment = false;
 711       // exception handler lookup
 712       Klass* ek = exception->klass();
 713       methodHandle mh(THREAD, sd->method());
 714       handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
 715       if (HAS_PENDING_EXCEPTION) {
 716         recursive_exception = true;
 717         // We threw an exception while trying to find the exception handler.
 718         // Transfer the new exception to the exception handle which will
 719         // be set into thread local storage, and do another lookup for an
 720         // exception handler for this exception, this time starting at the
 721         // BCI of the exception handler which caused the exception to be
 722         // thrown (bugs 4307310 and 4546590). Set "exception" reference
 723         // argument to ensure that the correct exception is thrown (4870175).
 724         recursive_exception_occurred = true;
 725         exception = Handle(THREAD, PENDING_EXCEPTION);
 726         CLEAR_PENDING_EXCEPTION;
 727         if (handler_bci >= 0) {
 728           bci = handler_bci;
 729           handler_bci = -1;
 730           skip_scope_increment = true;
 731         }
 732       }
 733       else {
 734         recursive_exception = false;
 735       }
 736       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
 737         sd = sd->sender();
 738         if (sd != nullptr) {
 739           bci = sd->bci();
 740         }
 741         ++scope_depth;
 742       }
 743     } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
 744   }
 745 
 746   // found handling method => lookup exception handler
 747   int catch_pco = ret_pc - nm->code_begin();
 748 
 749   ExceptionHandlerTable table(nm);
 750   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 751   if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 752     // Allow abbreviated catch tables.  The idea is to allow a method
 753     // to materialize its exceptions without committing to the exact
 754     // routing of exceptions.  In particular this is needed for adding
 755     // a synthetic handler to unlock monitors when inlining
 756     // synchronized methods since the unlock path isn't represented in
 757     // the bytecodes.
 758     t = table.entry_for(catch_pco, -1, 0);
 759   }
 760 
 761 #ifdef COMPILER1
 762   if (t == nullptr && nm->is_compiled_by_c1()) {
 763     assert(nm->unwind_handler_begin() != nullptr, "");
 764     return nm->unwind_handler_begin();
 765   }
 766 #endif
 767 
 768   if (t == nullptr) {
 769     ttyLocker ttyl;
 770     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
 771     tty->print_cr("   Exception:");
 772     exception->print();
 773     tty->cr();
 774     tty->print_cr(" Compiled exception table :");
 775     table.print();
 776     nm->print();
 777     nm->print_code();
 778     guarantee(false, "missing exception handler");
 779     return nullptr;
 780   }
 781 
 782   return nm->code_begin() + t->pco();
 783 }
 784 
 785 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
 786   // These errors occur only at call sites
 787   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
 788 JRT_END
 789 
 790 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
 791   // These errors occur only at call sites
 792   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 793 JRT_END
 794 
 795 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
 796   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 797 JRT_END
 798 
 799 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
 800   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
 801 JRT_END
 802 
 803 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
 804   // This entry point is effectively only used for NullPointerExceptions which occur at inline
 805   // cache sites (when the callee activation is not yet set up) so we are at a call site
 806   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
 807 JRT_END
 808 
 809 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
 810   throw_StackOverflowError_common(current, false);
 811 JRT_END
 812 
 813 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
 814   throw_StackOverflowError_common(current, true);
 815 JRT_END
 816 
 817 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
 818   // We avoid using the normal exception construction in this case because
 819   // it performs an upcall to Java, and we're already out of stack space.
 820   JavaThread* THREAD = current; // For exception macros.
 821   Klass* k = vmClasses::StackOverflowError_klass();
 822   oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
 823   if (delayed) {
 824     java_lang_Throwable::set_message(exception_oop,
 825                                      Universe::delayed_stack_overflow_error_message());
 826   }
 827   Handle exception (current, exception_oop);
 828   if (StackTraceInThrowable) {
 829     java_lang_Throwable::fill_in_stack_trace(exception);
 830   }
 831   // Remove the ScopedValue bindings in case we got a
 832   // StackOverflowError while we were trying to remove ScopedValue
 833   // bindings.
 834   current->clear_scopedValueBindings();
 835   // Increment counter for hs_err file reporting
 836   Atomic::inc(&Exceptions::_stack_overflow_errors);
 837   throw_and_post_jvmti_exception(current, exception);
 838 }
 839 
 840 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
 841                                                            address pc,
 842                                                            ImplicitExceptionKind exception_kind)
 843 {
 844   address target_pc = nullptr;
 845 
 846   if (Interpreter::contains(pc)) {
 847     switch (exception_kind) {
 848       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
 849       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
 850       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
 851       default:                      ShouldNotReachHere();
 852     }
 853   } else {
 854     switch (exception_kind) {
 855       case STACK_OVERFLOW: {
 856         // Stack overflow only occurs upon frame setup; the callee is
 857         // going to be unwound. Dispatch to a shared runtime stub
 858         // which will cause the StackOverflowError to be fabricated
 859         // and processed.
 860         // Stack overflow should never occur during deoptimization:
 861         // the compiled method bangs the stack by as much as the
 862         // interpreter would need in case of a deoptimization. The
 863         // deoptimization blob and uncommon trap blob bang the stack
 864         // in a debug VM to verify the correctness of the compiled
 865         // method stack banging.
 866         assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
 867         Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
 868         return StubRoutines::throw_StackOverflowError_entry();
 869       }
 870 
 871       case IMPLICIT_NULL: {
 872         if (VtableStubs::contains(pc)) {
 873           // We haven't yet entered the callee frame. Fabricate an
 874           // exception and begin dispatching it in the caller. Since
 875           // the caller was at a call site, it's safe to destroy all
 876           // caller-saved registers, as these entry points do.
 877           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
 878 
 879           // If vt_stub is null, then return null to signal handler to report the SEGV error.
 880           if (vt_stub == nullptr) return nullptr;
 881 
 882           if (vt_stub->is_abstract_method_error(pc)) {
 883             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
 884             Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
 885             // Instead of throwing the abstract method error here directly, we re-resolve
 886             // and will throw the AbstractMethodError during resolve. As a result, we'll
 887             // get a more detailed error message.
 888             return SharedRuntime::get_handle_wrong_method_stub();
 889           } else {
 890             Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
 891             // Assert that the signal comes from the expected location in stub code.
 892             assert(vt_stub->is_null_pointer_exception(pc),
 893                    "obtained signal from unexpected location in stub code");
 894             return StubRoutines::throw_NullPointerException_at_call_entry();
 895           }
 896         } else {
 897           CodeBlob* cb = CodeCache::find_blob(pc);
 898 
 899           // If code blob is null, then return null to signal handler to report the SEGV error.
 900           if (cb == nullptr) return nullptr;
 901 
 902           // Exception happened in CodeCache. Must be either:
 903           // 1. Inline-cache check in C2I handler blob,
 904           // 2. Inline-cache check in nmethod, or
 905           // 3. Implicit null exception in nmethod
 906 
 907           if (!cb->is_compiled()) {
 908             bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
 909             if (!is_in_blob) {
 910               // Allow normal crash reporting to handle this
 911               return nullptr;
 912             }
 913             Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
 914             // There is no handler here, so we will simply unwind.
 915             return StubRoutines::throw_NullPointerException_at_call_entry();
 916           }
 917 
 918           // Otherwise, it's a compiled method.  Consult its exception handlers.
 919           CompiledMethod* cm = (CompiledMethod*)cb;
 920           if (cm->inlinecache_check_contains(pc)) {
 921             // exception happened inside inline-cache check code
 922             // => the nmethod is not yet active (i.e., the frame
 923             // is not set up yet) => use return address pushed by
 924             // caller => don't push another return address
 925             Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
 926             return StubRoutines::throw_NullPointerException_at_call_entry();
 927           }
 928 
 929           if (cm->method()->is_method_handle_intrinsic()) {
 930             // exception happened inside MH dispatch code, similar to a vtable stub
 931             Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
 932             return StubRoutines::throw_NullPointerException_at_call_entry();
 933           }
 934 
 935 #ifndef PRODUCT
 936           _implicit_null_throws++;
 937 #endif
 938           target_pc = cm->continuation_for_implicit_null_exception(pc);
 939           // If there's an unexpected fault, target_pc might be null,
 940           // in which case we want to fall through into the normal
 941           // error handling code.
 942         }
 943 
 944         break; // fall through
 945       }
 946 
 947 
 948       case IMPLICIT_DIVIDE_BY_ZERO: {
 949         CompiledMethod* cm = CodeCache::find_compiled(pc);
 950         guarantee(cm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
 951 #ifndef PRODUCT
 952         _implicit_div0_throws++;
 953 #endif
 954         target_pc = cm->continuation_for_implicit_div0_exception(pc);
 955         // If there's an unexpected fault, target_pc might be null,
 956         // in which case we want to fall through into the normal
 957         // error handling code.
 958         break; // fall through
 959       }
 960 
 961       default: ShouldNotReachHere();
 962     }
 963 
 964     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
 965 
 966     if (exception_kind == IMPLICIT_NULL) {
 967 #ifndef PRODUCT
 968       // for AbortVMOnException flag
 969       Exceptions::debug_check_abort("java.lang.NullPointerException");
 970 #endif //PRODUCT
 971       Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
 972     } else {
 973 #ifndef PRODUCT
 974       // for AbortVMOnException flag
 975       Exceptions::debug_check_abort("java.lang.ArithmeticException");
 976 #endif //PRODUCT
 977       Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
 978     }
 979     return target_pc;
 980   }
 981 
 982   ShouldNotReachHere();
 983   return nullptr;
 984 }
 985 
 986 
 987 /**
 988  * Throws an java/lang/UnsatisfiedLinkError.  The address of this method is
 989  * installed in the native function entry of all native Java methods before
 990  * they get linked to their actual native methods.
 991  *
 992  * \note
 993  * This method actually never gets called!  The reason is because
 994  * the interpreter's native entries call NativeLookup::lookup() which
 995  * throws the exception when the lookup fails.  The exception is then
 996  * caught and forwarded on the return from NativeLookup::lookup() call
 997  * before the call to the native function.  This might change in the future.
 998  */
 999 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1000 {
1001   // We return a bad value here to make sure that the exception is
1002   // forwarded before we look at the return value.
1003   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1004 }
1005 JNI_END
1006 
1007 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1008   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1009 }
1010 
1011 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1012 #if INCLUDE_JVMCI
1013   if (!obj->klass()->has_finalizer()) {
1014     return;
1015   }
1016 #endif // INCLUDE_JVMCI
1017   assert(oopDesc::is_oop(obj), "must be a valid oop");
1018   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1019   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1020 JRT_END
1021 
1022 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1023   assert(thread != nullptr, "No thread");
1024   if (thread == nullptr) {
1025     return 0;
1026   }
1027   guarantee(Thread::current() != thread || thread->is_oop_safe(),
1028             "current cannot touch oops after its GC barrier is detached.");
1029   oop obj = thread->threadObj();
1030   return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1031 }
1032 
1033 /**
1034  * This function ought to be a void function, but cannot be because
1035  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1036  * 6254741.  Once that is fixed we can remove the dummy return value.
1037  */
1038 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1039   return dtrace_object_alloc(JavaThread::current(), o, o->size());
1040 }
1041 
1042 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1043   return dtrace_object_alloc(thread, o, o->size());
1044 }
1045 
1046 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1047   assert(DTraceAllocProbes, "wrong call");
1048   Klass* klass = o->klass();
1049   Symbol* name = klass->name();
1050   HOTSPOT_OBJECT_ALLOC(
1051                    get_java_tid(thread),
1052                    (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1053   return 0;
1054 }
1055 
1056 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1057     JavaThread* current, Method* method))
1058   assert(current == JavaThread::current(), "pre-condition");
1059 
1060   assert(DTraceMethodProbes, "wrong call");
1061   Symbol* kname = method->klass_name();
1062   Symbol* name = method->name();
1063   Symbol* sig = method->signature();
1064   HOTSPOT_METHOD_ENTRY(
1065       get_java_tid(current),
1066       (char *) kname->bytes(), kname->utf8_length(),
1067       (char *) name->bytes(), name->utf8_length(),
1068       (char *) sig->bytes(), sig->utf8_length());
1069   return 0;
1070 JRT_END
1071 
1072 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1073     JavaThread* current, Method* method))
1074   assert(current == JavaThread::current(), "pre-condition");
1075   assert(DTraceMethodProbes, "wrong call");
1076   Symbol* kname = method->klass_name();
1077   Symbol* name = method->name();
1078   Symbol* sig = method->signature();
1079   HOTSPOT_METHOD_RETURN(
1080       get_java_tid(current),
1081       (char *) kname->bytes(), kname->utf8_length(),
1082       (char *) name->bytes(), name->utf8_length(),
1083       (char *) sig->bytes(), sig->utf8_length());
1084   return 0;
1085 JRT_END
1086 
1087 
1088 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1089 // for a call current in progress, i.e., arguments has been pushed on stack
1090 // put callee has not been invoked yet.  Used by: resolve virtual/static,
1091 // vtable updates, etc.  Caller frame must be compiled.
1092 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1093   JavaThread* current = THREAD;
1094   ResourceMark rm(current);
1095 
1096   // last java frame on stack (which includes native call frames)
1097   vframeStream vfst(current, true);  // Do not skip and javaCalls
1098 
1099   return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1100 }
1101 
1102 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1103   CompiledMethod* caller = vfst.nm();
1104 
1105   address pc = vfst.frame_pc();
1106   { // Get call instruction under lock because another thread may be busy patching it.
1107     CompiledICLocker ic_locker(caller);
1108     return caller->attached_method_before_pc(pc);
1109   }
1110   return nullptr;
1111 }
1112 
1113 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1114 // for a call current in progress, i.e., arguments has been pushed on stack
1115 // but callee has not been invoked yet.  Caller frame must be compiled.
1116 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1117                                               CallInfo& callinfo, TRAPS) {
1118   Handle receiver;
1119   Handle nullHandle;  // create a handy null handle for exception returns
1120   JavaThread* current = THREAD;
1121 
1122   assert(!vfst.at_end(), "Java frame must exist");
1123 
1124   // Find caller and bci from vframe
1125   methodHandle caller(current, vfst.method());
1126   int          bci   = vfst.bci();
1127 
1128   if (caller->is_continuation_enter_intrinsic()) {
1129     bc = Bytecodes::_invokestatic;
1130     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1131     return receiver;
1132   }
1133 
1134   // Substitutability test implementation piggy backs on static call resolution
1135   Bytecodes::Code code = caller->java_code_at(bci);
1136   if (code == Bytecodes::_if_acmpeq || code == Bytecodes::_if_acmpne) {
1137     bc = Bytecodes::_invokestatic;
1138     methodHandle attached_method(THREAD, extract_attached_method(vfst));
1139     assert(attached_method.not_null(), "must have attached method");
1140     vmClasses::ValueObjectMethods_klass()->initialize(CHECK_NH);
1141     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, false, CHECK_NH);
1142 #ifdef ASSERT
1143     Method* is_subst = vmClasses::ValueObjectMethods_klass()->find_method(vmSymbols::isSubstitutable_name(), vmSymbols::object_object_boolean_signature());
1144     assert(callinfo.selected_method() == is_subst, "must be isSubstitutable method");
1145 #endif
1146     return receiver;
1147   }
1148 
1149   Bytecode_invoke bytecode(caller, bci);
1150   int bytecode_index = bytecode.index();
1151   bc = bytecode.invoke_code();
1152 
1153   methodHandle attached_method(current, extract_attached_method(vfst));
1154   if (attached_method.not_null()) {
1155     Method* callee = bytecode.static_target(CHECK_NH);
1156     vmIntrinsics::ID id = callee->intrinsic_id();
1157     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1158     // it attaches statically resolved method to the call site.
1159     if (MethodHandles::is_signature_polymorphic(id) &&
1160         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1161       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1162 
1163       // Adjust invocation mode according to the attached method.
1164       switch (bc) {
1165         case Bytecodes::_invokevirtual:
1166           if (attached_method->method_holder()->is_interface()) {
1167             bc = Bytecodes::_invokeinterface;
1168           }
1169           break;
1170         case Bytecodes::_invokeinterface:
1171           if (!attached_method->method_holder()->is_interface()) {
1172             bc = Bytecodes::_invokevirtual;
1173           }
1174           break;
1175         case Bytecodes::_invokehandle:
1176           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1177             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1178                                               : Bytecodes::_invokevirtual;
1179           }
1180           break;
1181         default:
1182           break;
1183       }
1184     } else {
1185       assert(attached_method->has_scalarized_args(), "invalid use of attached method");
1186       if (!attached_method->method_holder()->is_inline_klass()) {
1187         // Ignore the attached method in this case to not confuse below code
1188         attached_method = methodHandle(current, nullptr);
1189       }
1190     }
1191   }
1192 
1193   assert(bc != Bytecodes::_illegal, "not initialized");
1194 
1195   bool has_receiver = bc != Bytecodes::_invokestatic &&
1196                       bc != Bytecodes::_invokedynamic &&
1197                       bc != Bytecodes::_invokehandle;
1198   bool check_null_and_abstract = true;
1199 
1200   // Find receiver for non-static call
1201   if (has_receiver) {
1202     // This register map must be update since we need to find the receiver for
1203     // compiled frames. The receiver might be in a register.
1204     RegisterMap reg_map2(current,
1205                          RegisterMap::UpdateMap::include,
1206                          RegisterMap::ProcessFrames::include,
1207                          RegisterMap::WalkContinuation::skip);
1208     frame stubFrame   = current->last_frame();
1209     // Caller-frame is a compiled frame
1210     frame callerFrame = stubFrame.sender(&reg_map2);
1211 
1212     Method* callee = attached_method();
1213     if (callee == nullptr) {
1214       callee = bytecode.static_target(CHECK_NH);
1215       if (callee == nullptr) {
1216         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1217       }
1218     }
1219     bool caller_is_c1 = callerFrame.is_compiled_frame() && callerFrame.cb()->is_compiled_by_c1();
1220     if (!caller_is_c1 && callee->is_scalarized_arg(0)) {
1221       // If the receiver is an inline type that is passed as fields, no oop is available
1222       // Resolve the call without receiver null checking.
1223       assert(!callee->mismatch(), "calls with inline type receivers should never mismatch");
1224       assert(attached_method.not_null() && !attached_method->is_abstract(), "must have non-abstract attached method");
1225       if (bc == Bytecodes::_invokeinterface) {
1226         bc = Bytecodes::_invokevirtual; // C2 optimistically replaces interface calls by virtual calls
1227       }
1228       check_null_and_abstract = false;
1229     } else {
1230       // Retrieve from a compiled argument list
1231       receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1232       assert(oopDesc::is_oop_or_null(receiver()), "");
1233       if (receiver.is_null()) {
1234         THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1235       }
1236     }
1237   }
1238 
1239   // Resolve method
1240   if (attached_method.not_null()) {
1241     // Parameterized by attached method.
1242     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, check_null_and_abstract, CHECK_NH);
1243   } else {
1244     // Parameterized by bytecode.
1245     constantPoolHandle constants(current, caller->constants());
1246     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1247   }
1248 
1249 #ifdef ASSERT
1250   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1251   if (has_receiver && check_null_and_abstract) {
1252     assert(receiver.not_null(), "should have thrown exception");
1253     Klass* receiver_klass = receiver->klass();
1254     Klass* rk = nullptr;
1255     if (attached_method.not_null()) {
1256       // In case there's resolved method attached, use its holder during the check.
1257       rk = attached_method->method_holder();
1258     } else {
1259       // Klass is already loaded.
1260       constantPoolHandle constants(current, caller->constants());
1261       rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1262     }
1263     Klass* static_receiver_klass = rk;
1264     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1265            "actual receiver must be subclass of static receiver klass");
1266     if (receiver_klass->is_instance_klass()) {
1267       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1268         tty->print_cr("ERROR: Klass not yet initialized!!");
1269         receiver_klass->print();
1270       }
1271       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1272     }
1273   }
1274 #endif
1275 
1276   return receiver;
1277 }
1278 
1279 methodHandle SharedRuntime::find_callee_method(bool is_optimized, bool& caller_is_c1, TRAPS) {
1280   JavaThread* current = THREAD;
1281   ResourceMark rm(current);
1282   // We need first to check if any Java activations (compiled, interpreted)
1283   // exist on the stack since last JavaCall.  If not, we need
1284   // to get the target method from the JavaCall wrapper.
1285   vframeStream vfst(current, true);  // Do not skip any javaCalls
1286   methodHandle callee_method;
1287   if (vfst.at_end()) {
1288     // No Java frames were found on stack since we did the JavaCall.
1289     // Hence the stack can only contain an entry_frame.  We need to
1290     // find the target method from the stub frame.
1291     RegisterMap reg_map(current,
1292                         RegisterMap::UpdateMap::skip,
1293                         RegisterMap::ProcessFrames::include,
1294                         RegisterMap::WalkContinuation::skip);
1295     frame fr = current->last_frame();
1296     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1297     fr = fr.sender(&reg_map);
1298     assert(fr.is_entry_frame(), "must be");
1299     // fr is now pointing to the entry frame.
1300     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1301   } else {
1302     Bytecodes::Code bc;
1303     CallInfo callinfo;
1304     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1305     // Calls via mismatching methods are always non-scalarized
1306     if (callinfo.resolved_method()->mismatch() && !is_optimized) {
1307       caller_is_c1 = true;
1308     }
1309     callee_method = methodHandle(current, callinfo.selected_method());
1310   }
1311   assert(callee_method()->is_method(), "must be");
1312   return callee_method;
1313 }
1314 
1315 // Resolves a call.
1316 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1317   methodHandle callee_method;
1318   callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1319   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1320     int retry_count = 0;
1321     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1322            callee_method->method_holder() != vmClasses::Object_klass()) {
1323       // If has a pending exception then there is no need to re-try to
1324       // resolve this method.
1325       // If the method has been redefined, we need to try again.
1326       // Hack: we have no way to update the vtables of arrays, so don't
1327       // require that java.lang.Object has been updated.
1328 
1329       // It is very unlikely that method is redefined more than 100 times
1330       // in the middle of resolve. If it is looping here more than 100 times
1331       // means then there could be a bug here.
1332       guarantee((retry_count++ < 100),
1333                 "Could not resolve to latest version of redefined method");
1334       // method is redefined in the middle of resolve so re-try.
1335       callee_method = resolve_sub_helper(is_virtual, is_optimized, caller_is_c1, THREAD);
1336     }
1337   }
1338   return callee_method;
1339 }
1340 
1341 // This fails if resolution required refilling of IC stubs
1342 bool SharedRuntime::resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame,
1343                                                 CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, bool& caller_is_c1,
1344                                                 Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS) {
1345   StaticCallInfo static_call_info;
1346   CompiledICInfo virtual_call_info;
1347 
1348   // Make sure the callee nmethod does not get deoptimized and removed before
1349   // we are done patching the code.
1350   CompiledMethod* callee = callee_method->code();
1351 
1352   if (callee != nullptr) {
1353     assert(callee->is_compiled(), "must be nmethod for patching");
1354   }
1355 
1356   if (callee != nullptr && !callee->is_in_use()) {
1357     // Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
1358     callee = nullptr;
1359   }
1360 #ifdef ASSERT
1361   address dest_entry_point = callee == nullptr ? 0 : callee->entry_point(); // used below
1362 #endif
1363 
1364   bool is_nmethod = caller_nm->is_nmethod();
1365 
1366   if (is_virtual) {
1367     Klass* receiver_klass = nullptr;
1368     if (!caller_is_c1 && callee_method->is_scalarized_arg(0)) {
1369       // If the receiver is an inline type that is passed as fields, no oop is available
1370       receiver_klass = callee_method->method_holder();
1371     } else {
1372       assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
1373       receiver_klass = invoke_code == Bytecodes::_invokehandle ? nullptr : receiver->klass();
1374     }
1375     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1376     CompiledIC::compute_monomorphic_entry(callee_method, receiver_klass,
1377                      is_optimized, static_bound, is_nmethod, caller_is_c1, virtual_call_info,
1378                      CHECK_false);
1379   } else {
1380     // static call
1381     CompiledStaticCall::compute_entry(callee_method, caller_nm, static_call_info);
1382   }
1383 
1384   // grab lock, check for deoptimization and potentially patch caller
1385   {
1386     CompiledICLocker ml(caller_nm);
1387 
1388     // Lock blocks for safepoint during which both nmethods can change state.
1389 
1390     // Now that we are ready to patch if the Method* was redefined then
1391     // don't update call site and let the caller retry.
1392     // Don't update call site if callee nmethod was unloaded or deoptimized.
1393     // Don't update call site if callee nmethod was replaced by an other nmethod
1394     // which may happen when multiply alive nmethod (tiered compilation)
1395     // will be supported.
1396     if (!callee_method->is_old() &&
1397         (callee == nullptr || (callee->is_in_use() && callee_method->code() == callee))) {
1398       NoSafepointVerifier nsv;
1399 #ifdef ASSERT
1400       // We must not try to patch to jump to an already unloaded method.
1401       if (dest_entry_point != 0) {
1402         CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
1403         assert((cb != nullptr) && cb->is_compiled() && (((CompiledMethod*)cb) == callee),
1404                "should not call unloaded nmethod");
1405       }
1406 #endif
1407       if (is_virtual) {
1408         CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1409         if (inline_cache->is_clean()) {
1410           if (!inline_cache->set_to_monomorphic(virtual_call_info)) {
1411             return false;
1412           }
1413         }
1414       } else {
1415         if (VM_Version::supports_fast_class_init_checks() &&
1416             invoke_code == Bytecodes::_invokestatic &&
1417             callee_method->needs_clinit_barrier() &&
1418             callee != nullptr && callee->is_compiled_by_jvmci()) {
1419           return true; // skip patching for JVMCI
1420         }
1421         CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc());
1422         if (is_nmethod && caller_nm->method()->is_continuation_enter_intrinsic()) {
1423           ssc->compute_entry_for_continuation_entry(callee_method, static_call_info);
1424         }
1425         if (ssc->is_clean()) ssc->set(static_call_info);
1426       }
1427     }
1428   } // unlock CompiledICLocker
1429   return true;
1430 }
1431 
1432 // Resolves a call.  The compilers generate code for calls that go here
1433 // and are patched with the real destination of the call.
1434 methodHandle SharedRuntime::resolve_sub_helper(bool is_virtual, bool is_optimized, bool& caller_is_c1, TRAPS) {
1435   JavaThread* current = THREAD;
1436   ResourceMark rm(current);
1437   RegisterMap cbl_map(current,
1438                       RegisterMap::UpdateMap::skip,
1439                       RegisterMap::ProcessFrames::include,
1440                       RegisterMap::WalkContinuation::skip);
1441   frame caller_frame = current->last_frame().sender(&cbl_map);
1442 
1443   CodeBlob* caller_cb = caller_frame.cb();
1444   guarantee(caller_cb != nullptr && caller_cb->is_compiled(), "must be called from compiled method");
1445   CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
1446 
1447   // determine call info & receiver
1448   // note: a) receiver is null for static calls
1449   //       b) an exception is thrown if receiver is null for non-static calls
1450   CallInfo call_info;
1451   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1452   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1453   methodHandle callee_method(current, call_info.selected_method());
1454   // Calls via mismatching methods are always non-scalarized
1455   if (caller_nm->is_compiled_by_c1() || (call_info.resolved_method()->mismatch() && !is_optimized)) {
1456     caller_is_c1 = true;
1457   }
1458 
1459   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1460          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1461          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1462          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1463          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1464 
1465   assert(!caller_nm->is_unloading(), "It should not be unloading");
1466 
1467 #ifndef PRODUCT
1468   // tracing/debugging/statistics
1469   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1470                 (is_virtual) ? (&_resolve_virtual_ctr) :
1471                                (&_resolve_static_ctr);
1472   Atomic::inc(addr);
1473 
1474   if (TraceCallFixup) {
1475     ResourceMark rm(current);
1476     tty->print("resolving %s%s (%s) call to",
1477                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1478                Bytecodes::name(invoke_code));
1479     callee_method->print_short_name(tty);
1480     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1481                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1482   }
1483 #endif
1484 
1485   if (invoke_code == Bytecodes::_invokestatic) {
1486     assert(callee_method->method_holder()->is_initialized() ||
1487            callee_method->method_holder()->is_init_thread(current),
1488            "invalid class initialization state for invoke_static");
1489     if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1490       // In order to keep class initialization check, do not patch call
1491       // site for static call when the class is not fully initialized.
1492       // Proper check is enforced by call site re-resolution on every invocation.
1493       //
1494       // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1495       // explicit class initialization check is put in nmethod entry (VEP).
1496       assert(callee_method->method_holder()->is_linked(), "must be");
1497       return callee_method;
1498     }
1499   }
1500 
1501   // JSR 292 key invariant:
1502   // If the resolved method is a MethodHandle invoke target, the call
1503   // site must be a MethodHandle call site, because the lambda form might tail-call
1504   // leaving the stack in a state unknown to either caller or callee
1505   // TODO detune for now but we might need it again
1506 //  assert(!callee_method->is_compiled_lambda_form() ||
1507 //         caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1508 
1509   // Compute entry points. This might require generation of C2I converter
1510   // frames, so we cannot be holding any locks here. Furthermore, the
1511   // computation of the entry points is independent of patching the call.  We
1512   // always return the entry-point, but we only patch the stub if the call has
1513   // not been deoptimized.  Return values: For a virtual call this is an
1514   // (cached_oop, destination address) pair. For a static call/optimized
1515   // virtual this is just a destination address.
1516 
1517   // Patching IC caches may fail if we run out if transition stubs.
1518   // We refill the ic stubs then and try again.
1519   for (;;) {
1520     ICRefillVerifier ic_refill_verifier;
1521     bool successful = resolve_sub_helper_internal(callee_method, caller_frame, caller_nm,
1522                                                   is_virtual, is_optimized, caller_is_c1, receiver,
1523                                                   call_info, invoke_code, CHECK_(methodHandle()));
1524     if (successful) {
1525       return callee_method;
1526     } else {
1527       InlineCacheBuffer::refill_ic_stubs();
1528     }
1529   }
1530 
1531 }
1532 
1533 
1534 // Inline caches exist only in compiled code
1535 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1536 #ifdef ASSERT
1537   RegisterMap reg_map(current,
1538                       RegisterMap::UpdateMap::skip,
1539                       RegisterMap::ProcessFrames::include,
1540                       RegisterMap::WalkContinuation::skip);
1541   frame stub_frame = current->last_frame();
1542   assert(stub_frame.is_runtime_frame(), "sanity check");
1543   frame caller_frame = stub_frame.sender(&reg_map);
1544   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1545 #endif /* ASSERT */
1546 
1547   methodHandle callee_method;
1548   bool is_optimized = false;
1549   bool caller_is_c1 = false;
1550   JRT_BLOCK
1551     callee_method = SharedRuntime::handle_ic_miss_helper(is_optimized, caller_is_c1, CHECK_NULL);
1552     // Return Method* through TLS
1553     current->set_vm_result_2(callee_method());
1554   JRT_BLOCK_END
1555   // return compiled code entry point after potential safepoints
1556   return entry_for_handle_wrong_method(callee_method, false, is_optimized, caller_is_c1);
1557 JRT_END
1558 
1559 
1560 // Handle call site that has been made non-entrant
1561 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1562   // 6243940 We might end up in here if the callee is deoptimized
1563   // as we race to call it.  We don't want to take a safepoint if
1564   // the caller was interpreted because the caller frame will look
1565   // interpreted to the stack walkers and arguments are now
1566   // "compiled" so it is much better to make this transition
1567   // invisible to the stack walking code. The i2c path will
1568   // place the callee method in the callee_target. It is stashed
1569   // there because if we try and find the callee by normal means a
1570   // safepoint is possible and have trouble gc'ing the compiled args.
1571   RegisterMap reg_map(current,
1572                       RegisterMap::UpdateMap::skip,
1573                       RegisterMap::ProcessFrames::include,
1574                       RegisterMap::WalkContinuation::skip);
1575   frame stub_frame = current->last_frame();
1576   assert(stub_frame.is_runtime_frame(), "sanity check");
1577   frame caller_frame = stub_frame.sender(&reg_map);
1578 
1579   if (caller_frame.is_interpreted_frame() ||
1580       caller_frame.is_entry_frame() ||
1581       caller_frame.is_upcall_stub_frame()) {
1582     Method* callee = current->callee_target();
1583     guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1584     current->set_vm_result_2(callee);
1585     current->set_callee_target(nullptr);
1586     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1587       // Bypass class initialization checks in c2i when caller is in native.
1588       // JNI calls to static methods don't have class initialization checks.
1589       // Fast class initialization checks are present in c2i adapters and call into
1590       // SharedRuntime::handle_wrong_method() on the slow path.
1591       //
1592       // JVM upcalls may land here as well, but there's a proper check present in
1593       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1594       // so bypassing it in c2i adapter is benign.
1595       return callee->get_c2i_no_clinit_check_entry();
1596     } else {
1597       if (caller_frame.is_interpreted_frame()) {
1598         return callee->get_c2i_inline_entry();
1599       } else {
1600         return callee->get_c2i_entry();
1601       }
1602     }
1603   }
1604 
1605   // Must be compiled to compiled path which is safe to stackwalk
1606   methodHandle callee_method;
1607   bool is_static_call = false;
1608   bool is_optimized = false;
1609   bool caller_is_c1 = false;
1610   JRT_BLOCK
1611     // Force resolving of caller (if we called from compiled frame)
1612     callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_NULL);
1613     current->set_vm_result_2(callee_method());
1614   JRT_BLOCK_END
1615   // return compiled code entry point after potential safepoints
1616   return entry_for_handle_wrong_method(callee_method, is_static_call, is_optimized, caller_is_c1);
1617 JRT_END
1618 
1619 // Handle abstract method call
1620 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1621   // Verbose error message for AbstractMethodError.
1622   // Get the called method from the invoke bytecode.
1623   vframeStream vfst(current, true);
1624   assert(!vfst.at_end(), "Java frame must exist");
1625   methodHandle caller(current, vfst.method());
1626   Bytecode_invoke invoke(caller, vfst.bci());
1627   DEBUG_ONLY( invoke.verify(); )
1628 
1629   // Find the compiled caller frame.
1630   RegisterMap reg_map(current,
1631                       RegisterMap::UpdateMap::include,
1632                       RegisterMap::ProcessFrames::include,
1633                       RegisterMap::WalkContinuation::skip);
1634   frame stubFrame = current->last_frame();
1635   assert(stubFrame.is_runtime_frame(), "must be");
1636   frame callerFrame = stubFrame.sender(&reg_map);
1637   assert(callerFrame.is_compiled_frame(), "must be");
1638 
1639   // Install exception and return forward entry.
1640   address res = StubRoutines::throw_AbstractMethodError_entry();
1641   JRT_BLOCK
1642     methodHandle callee(current, invoke.static_target(current));
1643     if (!callee.is_null()) {
1644       oop recv = callerFrame.retrieve_receiver(&reg_map);
1645       Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1646       res = StubRoutines::forward_exception_entry();
1647       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1648     }
1649   JRT_BLOCK_END
1650   return res;
1651 JRT_END
1652 
1653 
1654 // resolve a static call and patch code
1655 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1656   methodHandle callee_method;
1657   bool caller_is_c1 = false;
1658   bool enter_special = false;
1659   JRT_BLOCK
1660     callee_method = SharedRuntime::resolve_helper(false, false, caller_is_c1, CHECK_NULL);
1661     current->set_vm_result_2(callee_method());
1662 
1663     if (current->is_interp_only_mode()) {
1664       RegisterMap reg_map(current,
1665                           RegisterMap::UpdateMap::skip,
1666                           RegisterMap::ProcessFrames::include,
1667                           RegisterMap::WalkContinuation::skip);
1668       frame stub_frame = current->last_frame();
1669       assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1670       frame caller = stub_frame.sender(&reg_map);
1671       enter_special = caller.cb() != nullptr && caller.cb()->is_compiled()
1672         && caller.cb()->as_compiled_method()->method()->is_continuation_enter_intrinsic();
1673     }
1674   JRT_BLOCK_END
1675 
1676   if (current->is_interp_only_mode() && enter_special) {
1677     // enterSpecial is compiled and calls this method to resolve the call to Continuation::enter
1678     // but in interp_only_mode we need to go to the interpreted entry
1679     // The c2i won't patch in this mode -- see fixup_callers_callsite
1680     //
1681     // This should probably be done in all cases, not just enterSpecial (see JDK-8218403),
1682     // but that's part of a larger fix, and the situation is worse for enterSpecial, as it has no
1683     // interpreted version.
1684     return callee_method->get_c2i_entry();
1685   }
1686 
1687   // return compiled code entry point after potential safepoints
1688   address entry = caller_is_c1 ?
1689     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1690   assert(entry != nullptr, "Jump to zero!");
1691   return entry;
1692 JRT_END
1693 
1694 
1695 // resolve virtual call and update inline cache to monomorphic
1696 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1697   methodHandle callee_method;
1698   bool caller_is_c1 = false;
1699   JRT_BLOCK
1700     callee_method = SharedRuntime::resolve_helper(true, false, caller_is_c1, CHECK_NULL);
1701     current->set_vm_result_2(callee_method());
1702   JRT_BLOCK_END
1703   // return compiled code entry point after potential safepoints
1704   address entry = caller_is_c1 ?
1705     callee_method->verified_inline_code_entry() : callee_method->verified_inline_ro_code_entry();
1706   assert(entry != nullptr, "Jump to zero!");
1707   return entry;
1708 JRT_END
1709 
1710 
1711 // Resolve a virtual call that can be statically bound (e.g., always
1712 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1713 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1714   methodHandle callee_method;
1715   bool caller_is_c1 = false;
1716   JRT_BLOCK
1717     callee_method = SharedRuntime::resolve_helper(true, true, caller_is_c1, CHECK_NULL);
1718     current->set_vm_result_2(callee_method());
1719   JRT_BLOCK_END
1720   // return compiled code entry point after potential safepoints
1721   address entry = caller_is_c1 ?
1722     callee_method->verified_inline_code_entry() : callee_method->verified_code_entry();
1723   assert(entry != nullptr, "Jump to zero!");
1724   return entry;
1725 JRT_END
1726 
1727 // The handle_ic_miss_helper_internal function returns false if it failed due
1728 // to either running out of vtable stubs or ic stubs due to IC transitions
1729 // to transitional states. The needs_ic_stub_refill value will be set if
1730 // the failure was due to running out of IC stubs, in which case handle_ic_miss_helper
1731 // refills the IC stubs and tries again.
1732 bool SharedRuntime::handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm,
1733                                                    const frame& caller_frame, methodHandle callee_method,
1734                                                    Bytecodes::Code bc, CallInfo& call_info,
1735                                                    bool& needs_ic_stub_refill, bool& is_optimized, bool caller_is_c1, TRAPS) {
1736   CompiledICLocker ml(caller_nm);
1737   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1738   bool should_be_mono = false;
1739   if (inline_cache->is_optimized()) {
1740     if (TraceCallFixup) {
1741       ResourceMark rm(THREAD);
1742       tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1743       callee_method->print_short_name(tty);
1744       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1745     }
1746     is_optimized = true;
1747     should_be_mono = true;
1748   } else if (inline_cache->is_icholder_call()) {
1749     CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1750     if (ic_oop != nullptr) {
1751       if (!ic_oop->is_loader_alive()) {
1752         // Deferred IC cleaning due to concurrent class unloading
1753         if (!inline_cache->set_to_clean()) {
1754           needs_ic_stub_refill = true;
1755           return false;
1756         }
1757       } else if (receiver()->klass() == ic_oop->holder_klass()) {
1758         // This isn't a real miss. We must have seen that compiled code
1759         // is now available and we want the call site converted to a
1760         // monomorphic compiled call site.
1761         // We can't assert for callee_method->code() != nullptr because it
1762         // could have been deoptimized in the meantime
1763         if (TraceCallFixup) {
1764           ResourceMark rm(THREAD);
1765           tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1766           callee_method->print_short_name(tty);
1767           tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1768         }
1769         should_be_mono = true;
1770       }
1771     }
1772   }
1773 
1774   if (should_be_mono) {
1775     // We have a path that was monomorphic but was going interpreted
1776     // and now we have (or had) a compiled entry. We correct the IC
1777     // by using a new icBuffer.
1778     CompiledICInfo info;
1779     Klass* receiver_klass = receiver()->klass();
1780     inline_cache->compute_monomorphic_entry(callee_method,
1781                                             receiver_klass,
1782                                             inline_cache->is_optimized(),
1783                                             false, caller_nm->is_nmethod(),
1784                                             caller_is_c1,
1785                                             info, CHECK_false);
1786     if (!inline_cache->set_to_monomorphic(info)) {
1787       needs_ic_stub_refill = true;
1788       return false;
1789     }
1790   } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1791     // Potential change to megamorphic
1792 
1793     bool successful = inline_cache->set_to_megamorphic(&call_info, bc, needs_ic_stub_refill, caller_is_c1, CHECK_false);
1794     if (needs_ic_stub_refill) {
1795       return false;
1796     }
1797     if (!successful) {
1798       if (!inline_cache->set_to_clean()) {
1799         needs_ic_stub_refill = true;
1800         return false;
1801       }
1802     }
1803   } else {
1804     // Either clean or megamorphic
1805   }
1806   return true;
1807 }
1808 
1809 methodHandle SharedRuntime::handle_ic_miss_helper(bool& is_optimized, bool& caller_is_c1, TRAPS) {
1810   JavaThread* current = THREAD;
1811   ResourceMark rm(current);
1812   CallInfo call_info;
1813   Bytecodes::Code bc;
1814 
1815   // receiver is null for static calls. An exception is thrown for null
1816   // receivers for non-static calls
1817   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1818   // Compiler1 can produce virtual call sites that can actually be statically bound
1819   // If we fell thru to below we would think that the site was going megamorphic
1820   // when in fact the site can never miss. Worse because we'd think it was megamorphic
1821   // we'd try and do a vtable dispatch however methods that can be statically bound
1822   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1823   // reresolution of the  call site (as if we did a handle_wrong_method and not an
1824   // plain ic_miss) and the site will be converted to an optimized virtual call site
1825   // never to miss again. I don't believe C2 will produce code like this but if it
1826   // did this would still be the correct thing to do for it too, hence no ifdef.
1827   //
1828   if (call_info.resolved_method()->can_be_statically_bound()) {
1829     bool is_static_call = false;
1830     methodHandle callee_method = SharedRuntime::reresolve_call_site(is_static_call, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1831     assert(!is_static_call, "IC miss at static call?");
1832     if (TraceCallFixup) {
1833       RegisterMap reg_map(current,
1834                           RegisterMap::UpdateMap::skip,
1835                           RegisterMap::ProcessFrames::include,
1836                           RegisterMap::WalkContinuation::skip);
1837       frame caller_frame = current->last_frame().sender(&reg_map);
1838       ResourceMark rm(current);
1839       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1840       callee_method->print_short_name(tty);
1841       tty->print_cr(" from pc: " INTPTR_FORMAT, p2i(caller_frame.pc()));
1842       tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1843     }
1844     return callee_method;
1845   }
1846 
1847   methodHandle callee_method(current, call_info.selected_method());
1848 
1849 #ifndef PRODUCT
1850   Atomic::inc(&_ic_miss_ctr);
1851 
1852   // Statistics & Tracing
1853   if (TraceCallFixup) {
1854     ResourceMark rm(current);
1855     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1856     callee_method->print_short_name(tty);
1857     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1858   }
1859 
1860   if (ICMissHistogram) {
1861     MutexLocker m(VMStatistic_lock);
1862     RegisterMap reg_map(current,
1863                         RegisterMap::UpdateMap::skip,
1864                         RegisterMap::ProcessFrames::include,
1865                         RegisterMap::WalkContinuation::skip);
1866     frame f = current->last_frame().real_sender(&reg_map);// skip runtime stub
1867     // produce statistics under the lock
1868     trace_ic_miss(f.pc());
1869   }
1870 #endif
1871 
1872   // install an event collector so that when a vtable stub is created the
1873   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1874   // event can't be posted when the stub is created as locks are held
1875   // - instead the event will be deferred until the event collector goes
1876   // out of scope.
1877   JvmtiDynamicCodeEventCollector event_collector;
1878 
1879   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1880   // Transitioning IC caches may require transition stubs. If we run out
1881   // of transition stubs, we have to drop locks and perform a safepoint
1882   // that refills them.
1883   RegisterMap reg_map(current,
1884                       RegisterMap::UpdateMap::skip,
1885                       RegisterMap::ProcessFrames::include,
1886                       RegisterMap::WalkContinuation::skip);
1887   frame caller_frame = current->last_frame().sender(&reg_map);
1888   CodeBlob* cb = caller_frame.cb();
1889   CompiledMethod* caller_nm = cb->as_compiled_method();
1890   // Calls via mismatching methods are always non-scalarized
1891   if (caller_nm->is_compiled_by_c1() || call_info.resolved_method()->mismatch()) {
1892     caller_is_c1 = true;
1893   }
1894 
1895   for (;;) {
1896     ICRefillVerifier ic_refill_verifier;
1897     bool needs_ic_stub_refill = false;
1898     bool successful = handle_ic_miss_helper_internal(receiver, caller_nm, caller_frame, callee_method,
1899                                                      bc, call_info, needs_ic_stub_refill, is_optimized, caller_is_c1, CHECK_(methodHandle()));
1900     if (successful || !needs_ic_stub_refill) {
1901       return callee_method;
1902     } else {
1903       InlineCacheBuffer::refill_ic_stubs();
1904     }
1905   }
1906 }
1907 
1908 static bool clear_ic_at_addr(CompiledMethod* caller_nm, address call_addr, bool is_static_call) {
1909   CompiledICLocker ml(caller_nm);
1910   if (is_static_call) {
1911     CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr);
1912     if (!ssc->is_clean()) {
1913       return ssc->set_to_clean();
1914     }
1915   } else {
1916     // compiled, dispatched call (which used to call an interpreted method)
1917     CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1918     if (!inline_cache->is_clean()) {
1919       return inline_cache->set_to_clean();
1920     }
1921   }
1922   return true;
1923 }
1924 
1925 //
1926 // Resets a call-site in compiled code so it will get resolved again.
1927 // This routines handles both virtual call sites, optimized virtual call
1928 // sites, and static call sites. Typically used to change a call sites
1929 // destination from compiled to interpreted.
1930 //
1931 methodHandle SharedRuntime::reresolve_call_site(bool& is_static_call, bool& is_optimized, bool& caller_is_c1, TRAPS) {
1932   JavaThread* current = THREAD;
1933   ResourceMark rm(current);
1934   RegisterMap reg_map(current,
1935                       RegisterMap::UpdateMap::skip,
1936                       RegisterMap::ProcessFrames::include,
1937                       RegisterMap::WalkContinuation::skip);
1938   frame stub_frame = current->last_frame();
1939   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1940   frame caller = stub_frame.sender(&reg_map);
1941   if (caller.is_compiled_frame()) {
1942     caller_is_c1 = caller.cb()->is_compiled_by_c1();
1943   }
1944 
1945   // Do nothing if the frame isn't a live compiled frame.
1946   // nmethod could be deoptimized by the time we get here
1947   // so no update to the caller is needed.
1948 
1949   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1950 
1951     address pc = caller.pc();
1952 
1953     // Check for static or virtual call
1954     CompiledMethod* caller_nm = CodeCache::find_compiled(pc);
1955 
1956     // Default call_addr is the location of the "basic" call.
1957     // Determine the address of the call we a reresolving. With
1958     // Inline Caches we will always find a recognizable call.
1959     // With Inline Caches disabled we may or may not find a
1960     // recognizable call. We will always find a call for static
1961     // calls and for optimized virtual calls. For vanilla virtual
1962     // calls it depends on the state of the UseInlineCaches switch.
1963     //
1964     // With Inline Caches disabled we can get here for a virtual call
1965     // for two reasons:
1966     //   1 - calling an abstract method. The vtable for abstract methods
1967     //       will run us thru handle_wrong_method and we will eventually
1968     //       end up in the interpreter to throw the ame.
1969     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1970     //       call and between the time we fetch the entry address and
1971     //       we jump to it the target gets deoptimized. Similar to 1
1972     //       we will wind up in the interprter (thru a c2i with c2).
1973     //
1974     address call_addr = nullptr;
1975     {
1976       // Get call instruction under lock because another thread may be
1977       // busy patching it.
1978       CompiledICLocker ml(caller_nm);
1979       // Location of call instruction
1980       call_addr = caller_nm->call_instruction_address(pc);
1981     }
1982 
1983     // Check relocations for the matching call to 1) avoid false positives,
1984     // and 2) determine the type.
1985     if (call_addr != nullptr) {
1986       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1987       // bytes back in the instruction stream so we must also check for reloc info.
1988       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1989       bool ret = iter.next(); // Get item
1990       if (ret) {
1991         is_static_call = false;
1992         is_optimized = false;
1993         switch (iter.type()) {
1994           case relocInfo::static_call_type:
1995             is_static_call = true;
1996 
1997           case relocInfo::virtual_call_type:
1998           case relocInfo::opt_virtual_call_type:
1999             is_optimized = (iter.type() == relocInfo::opt_virtual_call_type);
2000             // Cleaning the inline cache will force a new resolve. This is more robust
2001             // than directly setting it to the new destination, since resolving of calls
2002             // is always done through the same code path. (experience shows that it
2003             // leads to very hard to track down bugs, if an inline cache gets updated
2004             // to a wrong method). It should not be performance critical, since the
2005             // resolve is only done once.
2006             guarantee(iter.addr() == call_addr, "must find call");
2007             for (;;) {
2008               ICRefillVerifier ic_refill_verifier;
2009               if (!clear_ic_at_addr(caller_nm, call_addr, is_static_call)) {
2010                 InlineCacheBuffer::refill_ic_stubs();
2011               } else {
2012                 break;
2013               }
2014             }
2015             break;
2016           default:
2017             break;
2018         }
2019       }
2020     }
2021   }
2022 
2023   methodHandle callee_method = find_callee_method(is_optimized, caller_is_c1, CHECK_(methodHandle()));
2024 
2025 #ifndef PRODUCT
2026   Atomic::inc(&_wrong_method_ctr);
2027 
2028   if (TraceCallFixup) {
2029     ResourceMark rm(current);
2030     tty->print("handle_wrong_method reresolving call to");
2031     callee_method->print_short_name(tty);
2032     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
2033   }
2034 #endif
2035 
2036   return callee_method;
2037 }
2038 
2039 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
2040   // The faulting unsafe accesses should be changed to throw the error
2041   // synchronously instead. Meanwhile the faulting instruction will be
2042   // skipped over (effectively turning it into a no-op) and an
2043   // asynchronous exception will be raised which the thread will
2044   // handle at a later point. If the instruction is a load it will
2045   // return garbage.
2046 
2047   // Request an async exception.
2048   thread->set_pending_unsafe_access_error();
2049 
2050   // Return address of next instruction to execute.
2051   return next_pc;
2052 }
2053 
2054 #ifdef ASSERT
2055 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
2056                                                                 const BasicType* sig_bt,
2057                                                                 const VMRegPair* regs) {
2058   ResourceMark rm;
2059   const int total_args_passed = method->size_of_parameters();
2060   const VMRegPair*    regs_with_member_name = regs;
2061         VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
2062 
2063   const int member_arg_pos = total_args_passed - 1;
2064   assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
2065   assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
2066 
2067   int comp_args_on_stack = java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
2068 
2069   for (int i = 0; i < member_arg_pos; i++) {
2070     VMReg a =    regs_with_member_name[i].first();
2071     VMReg b = regs_without_member_name[i].first();
2072     assert(a->value() == b->value(), "register allocation mismatch: a=" INTX_FORMAT ", b=" INTX_FORMAT, a->value(), b->value());
2073   }
2074   assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
2075 }
2076 #endif
2077 
2078 bool SharedRuntime::should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb) {
2079   if (destination != entry_point) {
2080     CodeBlob* callee = CodeCache::find_blob(destination);
2081     // callee == cb seems weird. It means calling interpreter thru stub.
2082     if (callee != nullptr && (callee == cb || callee->is_adapter_blob())) {
2083       // static call or optimized virtual
2084       if (TraceCallFixup) {
2085         tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
2086         moop->print_short_name(tty);
2087         tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
2088       }
2089       return true;
2090     } else {
2091       if (TraceCallFixup) {
2092         tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
2093         moop->print_short_name(tty);
2094         tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
2095       }
2096       // assert is too strong could also be resolve destinations.
2097       // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
2098     }
2099   } else {
2100     if (TraceCallFixup) {
2101       tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc));
2102       moop->print_short_name(tty);
2103       tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point));
2104     }
2105   }
2106   return false;
2107 }
2108 
2109 // ---------------------------------------------------------------------------
2110 // We are calling the interpreter via a c2i. Normally this would mean that
2111 // we were called by a compiled method. However we could have lost a race
2112 // where we went int -> i2c -> c2i and so the caller could in fact be
2113 // interpreted. If the caller is compiled we attempt to patch the caller
2114 // so he no longer calls into the interpreter.
2115 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
2116   Method* moop(method);
2117 
2118   AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
2119 
2120   // It's possible that deoptimization can occur at a call site which hasn't
2121   // been resolved yet, in which case this function will be called from
2122   // an nmethod that has been patched for deopt and we can ignore the
2123   // request for a fixup.
2124   // Also it is possible that we lost a race in that from_compiled_entry
2125   // is now back to the i2c in that case we don't need to patch and if
2126   // we did we'd leap into space because the callsite needs to use
2127   // "to interpreter" stub in order to load up the Method*. Don't
2128   // ask me how I know this...
2129 
2130   // Result from nmethod::is_unloading is not stable across safepoints.
2131   NoSafepointVerifier nsv;
2132 
2133   CompiledMethod* callee = moop->code();
2134   if (callee == nullptr) {
2135     return;
2136   }
2137 
2138   CodeBlob* cb = CodeCache::find_blob(caller_pc);
2139   if (cb == nullptr || !cb->is_compiled() || callee->is_unloading()) {
2140     return;
2141   }
2142 
2143   // The check above makes sure this is a nmethod.
2144   CompiledMethod* nm = cb->as_compiled_method_or_null();
2145   assert(nm, "must be");
2146 
2147   // Get the return PC for the passed caller PC.
2148   address return_pc = caller_pc + frame::pc_return_offset;
2149 
2150   assert(!JavaThread::current()->is_interp_only_mode() || !nm->method()->is_continuation_enter_intrinsic()
2151     || ContinuationEntry::is_interpreted_call(return_pc), "interp_only_mode but not in enterSpecial interpreted entry");
2152 
2153   // There is a benign race here. We could be attempting to patch to a compiled
2154   // entry point at the same time the callee is being deoptimized. If that is
2155   // the case then entry_point may in fact point to a c2i and we'd patch the
2156   // call site with the same old data. clear_code will set code() to null
2157   // at the end of it. If we happen to see that null then we can skip trying
2158   // to patch. If we hit the window where the callee has a c2i in the
2159   // from_compiled_entry and the null isn't present yet then we lose the race
2160   // and patch the code with the same old data. Asi es la vida.
2161 
2162   if (moop->code() == nullptr) return;
2163 
2164   if (nm->is_in_use()) {
2165     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
2166     CompiledICLocker ic_locker(nm);
2167     if (NativeCall::is_call_before(return_pc)) {
2168       ResourceMark mark;
2169       NativeCallWrapper* call = nm->call_wrapper_before(return_pc);
2170       //
2171       // bug 6281185. We might get here after resolving a call site to a vanilla
2172       // virtual call. Because the resolvee uses the verified entry it may then
2173       // see compiled code and attempt to patch the site by calling us. This would
2174       // then incorrectly convert the call site to optimized and its downhill from
2175       // there. If you're lucky you'll get the assert in the bugid, if not you've
2176       // just made a call site that could be megamorphic into a monomorphic site
2177       // for the rest of its life! Just another racing bug in the life of
2178       // fixup_callers_callsite ...
2179       //
2180       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
2181       iter.next();
2182       assert(iter.has_current(), "must have a reloc at java call site");
2183       relocInfo::relocType typ = iter.reloc()->type();
2184       if (typ != relocInfo::static_call_type &&
2185            typ != relocInfo::opt_virtual_call_type &&
2186            typ != relocInfo::static_stub_type) {
2187         return;
2188       }
2189       if (nm->method()->is_continuation_enter_intrinsic()) {
2190         assert(ContinuationEntry::is_interpreted_call(call->instruction_address()) == JavaThread::current()->is_interp_only_mode(),
2191           "mode: %d", JavaThread::current()->is_interp_only_mode());
2192         if (ContinuationEntry::is_interpreted_call(call->instruction_address())) {
2193           return;
2194         }
2195       }
2196       address destination = call->destination();
2197       address entry_point = cb->is_compiled_by_c1() ? callee->verified_inline_entry_point() : callee->verified_entry_point();
2198       if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) {
2199         call->set_destination_mt_safe(entry_point);
2200       }
2201     }
2202   }
2203 JRT_END
2204 
2205 
2206 // same as JVM_Arraycopy, but called directly from compiled code
2207 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
2208                                                 oopDesc* dest, jint dest_pos,
2209                                                 jint length,
2210                                                 JavaThread* current)) {
2211 #ifndef PRODUCT
2212   _slow_array_copy_ctr++;
2213 #endif
2214   // Check if we have null pointers
2215   if (src == nullptr || dest == nullptr) {
2216     THROW(vmSymbols::java_lang_NullPointerException());
2217   }
2218   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
2219   // even though the copy_array API also performs dynamic checks to ensure
2220   // that src and dest are truly arrays (and are conformable).
2221   // The copy_array mechanism is awkward and could be removed, but
2222   // the compilers don't call this function except as a last resort,
2223   // so it probably doesn't matter.
2224   src->klass()->copy_array((arrayOopDesc*)src, src_pos,
2225                                         (arrayOopDesc*)dest, dest_pos,
2226                                         length, current);
2227 }
2228 JRT_END
2229 
2230 // The caller of generate_class_cast_message() (or one of its callers)
2231 // must use a ResourceMark in order to correctly free the result.
2232 char* SharedRuntime::generate_class_cast_message(
2233     JavaThread* thread, Klass* caster_klass) {
2234 
2235   // Get target class name from the checkcast instruction
2236   vframeStream vfst(thread, true);
2237   assert(!vfst.at_end(), "Java frame must exist");
2238   Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
2239   constantPoolHandle cpool(thread, vfst.method()->constants());
2240   Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
2241   Symbol* target_klass_name = nullptr;
2242   if (target_klass == nullptr) {
2243     // This klass should be resolved, but just in case, get the name in the klass slot.
2244     target_klass_name = cpool->klass_name_at(cc.index());
2245   }
2246   return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
2247 }
2248 
2249 
2250 // The caller of generate_class_cast_message() (or one of its callers)
2251 // must use a ResourceMark in order to correctly free the result.
2252 char* SharedRuntime::generate_class_cast_message(
2253     Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
2254   const char* caster_name = caster_klass->external_name();
2255 
2256   assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
2257   const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
2258                                                    target_klass->external_name();
2259 
2260   size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
2261 
2262   const char* caster_klass_description = "";
2263   const char* target_klass_description = "";
2264   const char* klass_separator = "";
2265   if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
2266     caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
2267   } else {
2268     caster_klass_description = caster_klass->class_in_module_of_loader();
2269     target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
2270     klass_separator = (target_klass != nullptr) ? "; " : "";
2271   }
2272 
2273   // add 3 for parenthesis and preceding space
2274   msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2275 
2276   char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2277   if (message == nullptr) {
2278     // Shouldn't happen, but don't cause even more problems if it does
2279     message = const_cast<char*>(caster_klass->external_name());
2280   } else {
2281     jio_snprintf(message,
2282                  msglen,
2283                  "class %s cannot be cast to class %s (%s%s%s)",
2284                  caster_name,
2285                  target_name,
2286                  caster_klass_description,
2287                  klass_separator,
2288                  target_klass_description
2289                  );
2290   }
2291   return message;
2292 }
2293 
2294 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2295   (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2296 JRT_END
2297 
2298 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2299   if (!SafepointSynchronize::is_synchronizing()) {
2300     // Only try quick_enter() if we're not trying to reach a safepoint
2301     // so that the calling thread reaches the safepoint more quickly.
2302     if (ObjectSynchronizer::quick_enter(obj, current, lock)) {
2303       return;
2304     }
2305   }
2306   // NO_ASYNC required because an async exception on the state transition destructor
2307   // would leave you with the lock held and it would never be released.
2308   // The normal monitorenter NullPointerException is thrown without acquiring a lock
2309   // and the model is that an exception implies the method failed.
2310   JRT_BLOCK_NO_ASYNC
2311   Handle h_obj(THREAD, obj);
2312   ObjectSynchronizer::enter(h_obj, lock, current);
2313   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2314   JRT_BLOCK_END
2315 }
2316 
2317 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2318 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2319   SharedRuntime::monitor_enter_helper(obj, lock, current);
2320 JRT_END
2321 
2322 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2323   assert(JavaThread::current() == current, "invariant");
2324   // Exit must be non-blocking, and therefore no exceptions can be thrown.
2325   ExceptionMark em(current);
2326   // The object could become unlocked through a JNI call, which we have no other checks for.
2327   // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2328   if (obj->is_unlocked()) {
2329     if (CheckJNICalls) {
2330       fatal("Object has been unlocked by JNI");
2331     }
2332     return;
2333   }
2334   ObjectSynchronizer::exit(obj, lock, current);
2335 }
2336 
2337 // Handles the uncommon cases of monitor unlocking in compiled code
2338 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2339   assert(current == JavaThread::current(), "pre-condition");
2340   SharedRuntime::monitor_exit_helper(obj, lock, current);
2341 JRT_END
2342 
2343 #ifndef PRODUCT
2344 
2345 void SharedRuntime::print_statistics() {
2346   ttyLocker ttyl;
2347   if (xtty != nullptr)  xtty->head("statistics type='SharedRuntime'");
2348 
2349   SharedRuntime::print_ic_miss_histogram();
2350 
2351   // Dump the JRT_ENTRY counters
2352   if (_new_instance_ctr) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
2353   if (_new_array_ctr) tty->print_cr("%5d new array requires GC", _new_array_ctr);
2354   if (_multi2_ctr) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
2355   if (_multi3_ctr) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
2356   if (_multi4_ctr) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
2357   if (_multi5_ctr) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
2358 
2359   tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr);
2360   tty->print_cr("%5d wrong method", _wrong_method_ctr);
2361   tty->print_cr("%5d unresolved static call site", _resolve_static_ctr);
2362   tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr);
2363   tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2364 
2365   if (_mon_enter_stub_ctr) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr);
2366   if (_mon_exit_stub_ctr) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr);
2367   if (_mon_enter_ctr) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr);
2368   if (_mon_exit_ctr) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr);
2369   if (_partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr);
2370   if (_jbyte_array_copy_ctr) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr);
2371   if (_jshort_array_copy_ctr) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr);
2372   if (_jint_array_copy_ctr) tty->print_cr("%5d int array copies", _jint_array_copy_ctr);
2373   if (_jlong_array_copy_ctr) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr);
2374   if (_oop_array_copy_ctr) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr);
2375   if (_checkcast_array_copy_ctr) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr);
2376   if (_unsafe_array_copy_ctr) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr);
2377   if (_generic_array_copy_ctr) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr);
2378   if (_slow_array_copy_ctr) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr);
2379   if (_find_handler_ctr) tty->print_cr("%5d find exception handler", _find_handler_ctr);
2380   if (_rethrow_ctr) tty->print_cr("%5d rethrow handler", _rethrow_ctr);
2381 
2382   AdapterHandlerLibrary::print_statistics();
2383 
2384   if (xtty != nullptr)  xtty->tail("statistics");
2385 }
2386 
2387 inline double percent(int64_t x, int64_t y) {
2388   return 100.0 * x / MAX2(y, (int64_t)1);
2389 }
2390 
2391 class MethodArityHistogram {
2392  public:
2393   enum { MAX_ARITY = 256 };
2394  private:
2395   static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2396   static uint64_t _size_histogram[MAX_ARITY];  // histogram of arg size in words
2397   static uint64_t _total_compiled_calls;
2398   static uint64_t _max_compiled_calls_per_method;
2399   static int _max_arity;                       // max. arity seen
2400   static int _max_size;                        // max. arg size seen
2401 
2402   static void add_method_to_histogram(nmethod* nm) {
2403     Method* method = (nm == nullptr) ? nullptr : nm->method();
2404     if (method != nullptr) {
2405       ArgumentCount args(method->signature());
2406       int arity   = args.size() + (method->is_static() ? 0 : 1);
2407       int argsize = method->size_of_parameters();
2408       arity   = MIN2(arity, MAX_ARITY-1);
2409       argsize = MIN2(argsize, MAX_ARITY-1);
2410       uint64_t count = (uint64_t)method->compiled_invocation_count();
2411       _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2412       _total_compiled_calls    += count;
2413       _arity_histogram[arity]  += count;
2414       _size_histogram[argsize] += count;
2415       _max_arity = MAX2(_max_arity, arity);
2416       _max_size  = MAX2(_max_size, argsize);
2417     }
2418   }
2419 
2420   void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2421     const int N = MIN2(9, n);
2422     double sum = 0;
2423     double weighted_sum = 0;
2424     for (int i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
2425     if (sum >= 1.0) { // prevent divide by zero or divide overflow
2426       double rest = sum;
2427       double percent = sum / 100;
2428       for (int i = 0; i <= N; i++) {
2429         rest -= histo[i];
2430         tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], histo[i] / percent);
2431       }
2432       tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2433       tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2434       tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2435       tty->print_cr("(max # of compiled calls   = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2436     } else {
2437       tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2438     }
2439   }
2440 
2441   void print_histogram() {
2442     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2443     print_histogram_helper(_max_arity, _arity_histogram, "arity");
2444     tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2445     print_histogram_helper(_max_size, _size_histogram, "size");
2446     tty->cr();
2447   }
2448 
2449  public:
2450   MethodArityHistogram() {
2451     // Take the Compile_lock to protect against changes in the CodeBlob structures
2452     MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2453     // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2454     MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2455     _max_arity = _max_size = 0;
2456     _total_compiled_calls = 0;
2457     _max_compiled_calls_per_method = 0;
2458     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2459     CodeCache::nmethods_do(add_method_to_histogram);
2460     print_histogram();
2461   }
2462 };
2463 
2464 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2465 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2466 uint64_t MethodArityHistogram::_total_compiled_calls;
2467 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2468 int MethodArityHistogram::_max_arity;
2469 int MethodArityHistogram::_max_size;
2470 
2471 void SharedRuntime::print_call_statistics(uint64_t comp_total) {
2472   tty->print_cr("Calls from compiled code:");
2473   int64_t total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2474   int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2475   int64_t mono_i = _nof_interface_calls;
2476   tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%)  total non-inlined   ", total);
2477   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2478   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2479   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
2480   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2481   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
2482   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2483   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
2484   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2485   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2486   tty->cr();
2487   tty->print_cr("Note 1: counter updates are not MT-safe.");
2488   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2489   tty->print_cr("        %% in nested categories are relative to their category");
2490   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2491   tty->cr();
2492 
2493   MethodArityHistogram h;
2494 }
2495 #endif
2496 
2497 #ifndef PRODUCT
2498 static int _lookups; // number of calls to lookup
2499 static int _equals;  // number of buckets checked with matching hash
2500 static int _hits;    // number of successful lookups
2501 static int _compact; // number of equals calls with compact signature
2502 #endif
2503 
2504 // A simple wrapper class around the calling convention information
2505 // that allows sharing of adapters for the same calling convention.
2506 class AdapterFingerPrint : public CHeapObj<mtCode> {
2507  private:
2508   enum {
2509     _basic_type_bits = 4,
2510     _basic_type_mask = right_n_bits(_basic_type_bits),
2511     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2512     _compact_int_count = 3
2513   };
2514   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2515   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2516 
2517   union {
2518     int  _compact[_compact_int_count];
2519     int* _fingerprint;
2520   } _value;
2521   int _length; // A negative length indicates the fingerprint is in the compact form,
2522                // Otherwise _value._fingerprint is the array.
2523 
2524   // Remap BasicTypes that are handled equivalently by the adapters.
2525   // These are correct for the current system but someday it might be
2526   // necessary to make this mapping platform dependent.
2527   static BasicType adapter_encoding(BasicType in) {
2528     switch (in) {
2529       case T_BOOLEAN:
2530       case T_BYTE:
2531       case T_SHORT:
2532       case T_CHAR:
2533         // They are all promoted to T_INT in the calling convention
2534         return T_INT;
2535 
2536       case T_OBJECT:
2537       case T_ARRAY:
2538         // In other words, we assume that any register good enough for
2539         // an int or long is good enough for a managed pointer.
2540 #ifdef _LP64
2541         return T_LONG;
2542 #else
2543         return T_INT;
2544 #endif
2545 
2546       case T_INT:
2547       case T_LONG:
2548       case T_FLOAT:
2549       case T_DOUBLE:
2550       case T_VOID:
2551         return in;
2552 
2553       default:
2554         ShouldNotReachHere();
2555         return T_CONFLICT;
2556     }
2557   }
2558 
2559  public:
2560   AdapterFingerPrint(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2561     // The fingerprint is based on the BasicType signature encoded
2562     // into an array of ints with eight entries per int.
2563     int total_args_passed = (sig != nullptr) ? sig->length() : 0;
2564     int* ptr;
2565     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2566     if (len <= _compact_int_count) {
2567       assert(_compact_int_count == 3, "else change next line");
2568       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2569       // Storing the signature encoded as signed chars hits about 98%
2570       // of the time.
2571       _length = -len;
2572       ptr = _value._compact;
2573     } else {
2574       _length = len;
2575       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2576       ptr = _value._fingerprint;
2577     }
2578 
2579     // Now pack the BasicTypes with 8 per int
2580     int sig_index = 0;
2581     BasicType prev_bt = T_ILLEGAL;
2582     int vt_count = 0;
2583     for (int index = 0; index < len; index++) {
2584       int value = 0;
2585       for (int byte = 0; byte < _basic_types_per_int; byte++) {
2586         BasicType bt = T_ILLEGAL;
2587         if (sig_index < total_args_passed) {
2588           bt = sig->at(sig_index++)._bt;
2589           if (bt == T_PRIMITIVE_OBJECT) {
2590             // Found start of inline type in signature
2591             assert(InlineTypePassFieldsAsArgs, "unexpected start of inline type");
2592             if (sig_index == 1 && has_ro_adapter) {
2593               // With a ro_adapter, replace receiver inline type delimiter by T_VOID to prevent matching
2594               // with other adapters that have the same inline type as first argument and no receiver.
2595               bt = T_VOID;
2596             }
2597             vt_count++;
2598           } else if (bt == T_VOID && prev_bt != T_LONG && prev_bt != T_DOUBLE) {
2599             // Found end of inline type in signature
2600             assert(InlineTypePassFieldsAsArgs, "unexpected end of inline type");
2601             vt_count--;
2602             assert(vt_count >= 0, "invalid vt_count");
2603           } else if (vt_count == 0) {
2604             // Widen fields that are not part of a scalarized inline type argument
2605             bt = adapter_encoding(bt);
2606           }
2607           prev_bt = bt;
2608         }
2609         int bt_val = (bt == T_ILLEGAL) ? 0 : bt;
2610         assert((bt_val & _basic_type_mask) == bt_val, "must fit in 4 bits");
2611         value = (value << _basic_type_bits) | bt_val;
2612       }
2613       ptr[index] = value;
2614     }
2615     assert(vt_count == 0, "invalid vt_count");
2616   }
2617 
2618   ~AdapterFingerPrint() {
2619     if (_length > 0) {
2620       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2621     }
2622   }
2623 
2624   int value(int index) {
2625     if (_length < 0) {
2626       return _value._compact[index];
2627     }
2628     return _value._fingerprint[index];
2629   }
2630   int length() {
2631     if (_length < 0) return -_length;
2632     return _length;
2633   }
2634 
2635   bool is_compact() {
2636     return _length <= 0;
2637   }
2638 
2639   unsigned int compute_hash() {
2640     int hash = 0;
2641     for (int i = 0; i < length(); i++) {
2642       int v = value(i);
2643       hash = (hash << 8) ^ v ^ (hash >> 5);
2644     }
2645     return (unsigned int)hash;
2646   }
2647 
2648   const char* as_string() {
2649     stringStream st;
2650     st.print("0x");
2651     for (int i = 0; i < length(); i++) {
2652       st.print("%x", value(i));
2653     }
2654     return st.as_string();
2655   }
2656 
2657 #ifndef PRODUCT
2658   // Reconstitutes the basic type arguments from the fingerprint,
2659   // producing strings like LIJDF
2660   const char* as_basic_args_string() {
2661     stringStream st;
2662     bool long_prev = false;
2663     for (int i = 0; i < length(); i++) {
2664       unsigned val = (unsigned)value(i);
2665       // args are packed so that first/lower arguments are in the highest
2666       // bits of each int value, so iterate from highest to the lowest
2667       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2668         unsigned v = (val >> j) & _basic_type_mask;
2669         if (v == 0) {
2670           assert(i == length() - 1, "Only expect zeroes in the last word");
2671           continue;
2672         }
2673         if (long_prev) {
2674           long_prev = false;
2675           if (v == T_VOID) {
2676             st.print("J");
2677           } else {
2678             st.print("L");
2679           }
2680         } else if (v == T_LONG) {
2681           long_prev = true;
2682         } else if (v != T_VOID){
2683           st.print("%c", type2char((BasicType)v));
2684         }
2685       }
2686     }
2687     if (long_prev) {
2688       st.print("L");
2689     }
2690     return st.as_string();
2691   }
2692 #endif // !product
2693 
2694   bool equals(AdapterFingerPrint* other) {
2695     if (other->_length != _length) {
2696       return false;
2697     }
2698     if (_length < 0) {
2699       assert(_compact_int_count == 3, "else change next line");
2700       return _value._compact[0] == other->_value._compact[0] &&
2701              _value._compact[1] == other->_value._compact[1] &&
2702              _value._compact[2] == other->_value._compact[2];
2703     } else {
2704       for (int i = 0; i < _length; i++) {
2705         if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
2706           return false;
2707         }
2708       }
2709     }
2710     return true;
2711   }
2712 
2713   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2714     NOT_PRODUCT(_equals++);
2715     return fp1->equals(fp2);
2716   }
2717 
2718   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2719     return fp->compute_hash();
2720   }
2721 };
2722 
2723 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2724 ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2725                   AnyObj::C_HEAP, mtCode,
2726                   AdapterFingerPrint::compute_hash,
2727                   AdapterFingerPrint::equals> _adapter_handler_table;
2728 
2729 // Find a entry with the same fingerprint if it exists
2730 static AdapterHandlerEntry* lookup(const GrowableArray<SigEntry>* sig, bool has_ro_adapter = false) {
2731   NOT_PRODUCT(_lookups++);
2732   assert_lock_strong(AdapterHandlerLibrary_lock);
2733   AdapterFingerPrint fp(sig, has_ro_adapter);
2734   AdapterHandlerEntry** entry = _adapter_handler_table.get(&fp);
2735   if (entry != nullptr) {
2736 #ifndef PRODUCT
2737     if (fp.is_compact()) _compact++;
2738     _hits++;
2739 #endif
2740     return *entry;
2741   }
2742   return nullptr;
2743 }
2744 
2745 #ifndef PRODUCT
2746 static void print_table_statistics() {
2747   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2748     return sizeof(*key) + sizeof(*a);
2749   };
2750   TableStatistics ts = _adapter_handler_table.statistics_calculate(size);
2751   ts.print(tty, "AdapterHandlerTable");
2752   tty->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2753                 _adapter_handler_table.table_size(), _adapter_handler_table.number_of_entries());
2754   tty->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d compact %d",
2755                 _lookups, _equals, _hits, _compact);
2756 }
2757 #endif
2758 
2759 // ---------------------------------------------------------------------------
2760 // Implementation of AdapterHandlerLibrary
2761 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2762 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2763 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2764 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2765 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2766 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2767 const int AdapterHandlerLibrary_size = 48*K;
2768 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2769 
2770 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2771   return _buffer;
2772 }
2773 
2774 static void post_adapter_creation(const AdapterBlob* new_adapter,
2775                                   const AdapterHandlerEntry* entry) {
2776   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2777     char blob_id[256];
2778     jio_snprintf(blob_id,
2779                  sizeof(blob_id),
2780                  "%s(%s)",
2781                  new_adapter->name(),
2782                  entry->fingerprint()->as_string());
2783     if (Forte::is_enabled()) {
2784       Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2785     }
2786 
2787     if (JvmtiExport::should_post_dynamic_code_generated()) {
2788       JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2789     }
2790   }
2791 }
2792 
2793 void AdapterHandlerLibrary::initialize() {
2794   ResourceMark rm;
2795   AdapterBlob* no_arg_blob = nullptr;
2796   AdapterBlob* int_arg_blob = nullptr;
2797   AdapterBlob* obj_arg_blob = nullptr;
2798   AdapterBlob* obj_int_arg_blob = nullptr;
2799   AdapterBlob* obj_obj_arg_blob = nullptr;
2800   {
2801     MutexLocker mu(AdapterHandlerLibrary_lock);
2802 
2803     // Create a special handler for abstract methods.  Abstract methods
2804     // are never compiled so an i2c entry is somewhat meaningless, but
2805     // throw AbstractMethodError just in case.
2806     // Pass wrong_method_abstract for the c2i transitions to return
2807     // AbstractMethodError for invalid invocations.
2808     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2809     _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
2810                                                                 StubRoutines::throw_AbstractMethodError_entry(),
2811                                                                 wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
2812                                                                 wrong_method_abstract, wrong_method_abstract);
2813     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2814 
2815     CompiledEntrySignature no_args;
2816     no_args.compute_calling_conventions();
2817     _no_arg_handler = create_adapter(no_arg_blob, no_args, true);
2818 
2819     CompiledEntrySignature obj_args;
2820     SigEntry::add_entry(obj_args.sig(), T_OBJECT, nullptr);
2821     obj_args.compute_calling_conventions();
2822     _obj_arg_handler = create_adapter(obj_arg_blob, obj_args, true);
2823 
2824     CompiledEntrySignature int_args;
2825     SigEntry::add_entry(int_args.sig(), T_INT, nullptr);
2826     int_args.compute_calling_conventions();
2827     _int_arg_handler = create_adapter(int_arg_blob, int_args, true);
2828 
2829     CompiledEntrySignature obj_int_args;
2830     SigEntry::add_entry(obj_int_args.sig(), T_OBJECT, nullptr);
2831     SigEntry::add_entry(obj_int_args.sig(), T_INT, nullptr);
2832     obj_int_args.compute_calling_conventions();
2833     _obj_int_arg_handler = create_adapter(obj_int_arg_blob, obj_int_args, true);
2834 
2835     CompiledEntrySignature obj_obj_args;
2836     SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2837     SigEntry::add_entry(obj_obj_args.sig(), T_OBJECT, nullptr);
2838     obj_obj_args.compute_calling_conventions();
2839     _obj_obj_arg_handler = create_adapter(obj_obj_arg_blob, obj_obj_args, true);
2840 
2841     assert(no_arg_blob != nullptr &&
2842           obj_arg_blob != nullptr &&
2843           int_arg_blob != nullptr &&
2844           obj_int_arg_blob != nullptr &&
2845           obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2846   }
2847   return;
2848 
2849   // Outside of the lock
2850   post_adapter_creation(no_arg_blob, _no_arg_handler);
2851   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2852   post_adapter_creation(int_arg_blob, _int_arg_handler);
2853   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2854   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2855 }
2856 
2857 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2858                                                       address i2c_entry,
2859                                                       address c2i_entry,
2860                                                       address c2i_inline_entry,
2861                                                       address c2i_inline_ro_entry,
2862                                                       address c2i_unverified_entry,
2863                                                       address c2i_unverified_inline_entry,
2864                                                       address c2i_no_clinit_check_entry) {
2865   return new AdapterHandlerEntry(fingerprint, i2c_entry, c2i_entry, c2i_inline_entry, c2i_inline_ro_entry, c2i_unverified_entry,
2866                               c2i_unverified_inline_entry, c2i_no_clinit_check_entry);
2867 }
2868 
2869 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2870   if (method->is_abstract()) {
2871     return nullptr;
2872   }
2873   int total_args_passed = method->size_of_parameters(); // All args on stack
2874   if (total_args_passed == 0) {
2875     return _no_arg_handler;
2876   } else if (total_args_passed == 1) {
2877     if (!method->is_static()) {
2878       if (InlineTypePassFieldsAsArgs && method->method_holder()->is_inline_klass()) {
2879         return nullptr;
2880       }
2881       return _obj_arg_handler;
2882     }
2883     switch (method->signature()->char_at(1)) {
2884       case JVM_SIGNATURE_CLASS: {
2885         if (InlineTypePassFieldsAsArgs) {
2886           SignatureStream ss(method->signature());
2887           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2888           if (vk != nullptr) {
2889             return nullptr;
2890           }
2891         }
2892         return _obj_arg_handler;
2893       }
2894       case JVM_SIGNATURE_ARRAY:
2895         return _obj_arg_handler;
2896       case JVM_SIGNATURE_INT:
2897       case JVM_SIGNATURE_BOOLEAN:
2898       case JVM_SIGNATURE_CHAR:
2899       case JVM_SIGNATURE_BYTE:
2900       case JVM_SIGNATURE_SHORT:
2901         return _int_arg_handler;
2902     }
2903   } else if (total_args_passed == 2 &&
2904              !method->is_static() && (!InlineTypePassFieldsAsArgs || !method->method_holder()->is_inline_klass())) {
2905     switch (method->signature()->char_at(1)) {
2906       case JVM_SIGNATURE_CLASS: {
2907         if (InlineTypePassFieldsAsArgs) {
2908           SignatureStream ss(method->signature());
2909           InlineKlass* vk = ss.as_inline_klass(method->method_holder());
2910           if (vk != nullptr) {
2911             return nullptr;
2912           }
2913         }
2914         return _obj_obj_arg_handler;
2915       }
2916       case JVM_SIGNATURE_ARRAY:
2917         return _obj_obj_arg_handler;
2918       case JVM_SIGNATURE_INT:
2919       case JVM_SIGNATURE_BOOLEAN:
2920       case JVM_SIGNATURE_CHAR:
2921       case JVM_SIGNATURE_BYTE:
2922       case JVM_SIGNATURE_SHORT:
2923         return _obj_int_arg_handler;
2924     }
2925   }
2926   return nullptr;
2927 }
2928 
2929 CompiledEntrySignature::CompiledEntrySignature(Method* method) :
2930   _method(method), _num_inline_args(0), _has_inline_recv(false),
2931   _regs(nullptr), _regs_cc(nullptr), _regs_cc_ro(nullptr),
2932   _args_on_stack(0), _args_on_stack_cc(0), _args_on_stack_cc_ro(0),
2933   _c1_needs_stack_repair(false), _c2_needs_stack_repair(false), _supers(nullptr) {
2934   _sig = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2935   _sig_cc = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2936   _sig_cc_ro = new GrowableArray<SigEntry>((method != nullptr) ? method->size_of_parameters() : 1);
2937 }
2938 
2939 // See if we can save space by sharing the same entry for VIEP and VIEP(RO),
2940 // or the same entry for VEP and VIEP(RO).
2941 CodeOffsets::Entries CompiledEntrySignature::c1_inline_ro_entry_type() const {
2942   if (!has_scalarized_args()) {
2943     // VEP/VIEP/VIEP(RO) all share the same entry. There's no packing.
2944     return CodeOffsets::Verified_Entry;
2945   }
2946   if (_method->is_static()) {
2947     // Static methods don't need VIEP(RO)
2948     return CodeOffsets::Verified_Entry;
2949   }
2950 
2951   if (has_inline_recv()) {
2952     if (num_inline_args() == 1) {
2953       // Share same entry for VIEP and VIEP(RO).
2954       // This is quite common: we have an instance method in an InlineKlass that has
2955       // no inline type args other than <this>.
2956       return CodeOffsets::Verified_Inline_Entry;
2957     } else {
2958       assert(num_inline_args() > 1, "must be");
2959       // No sharing:
2960       //   VIEP(RO) -- <this> is passed as object
2961       //   VEP      -- <this> is passed as fields
2962       return CodeOffsets::Verified_Inline_Entry_RO;
2963     }
2964   }
2965 
2966   // Either a static method, or <this> is not an inline type
2967   if (args_on_stack_cc() != args_on_stack_cc_ro()) {
2968     // No sharing:
2969     // Some arguments are passed on the stack, and we have inserted reserved entries
2970     // into the VEP, but we never insert reserved entries into the VIEP(RO).
2971     return CodeOffsets::Verified_Inline_Entry_RO;
2972   } else {
2973     // Share same entry for VEP and VIEP(RO).
2974     return CodeOffsets::Verified_Entry;
2975   }
2976 }
2977 
2978 // Returns all super methods (transitive) in classes and interfaces that are overridden by the current method.
2979 GrowableArray<Method*>* CompiledEntrySignature::get_supers() {
2980   if (_supers != nullptr) {
2981     return _supers;
2982   }
2983   _supers = new GrowableArray<Method*>();
2984   // Skip private, static, and <init> methods
2985   if (_method->is_private() || _method->is_static() || _method->is_object_constructor()) {
2986     return _supers;
2987   }
2988   Symbol* name = _method->name();
2989   Symbol* signature = _method->signature();
2990   const Klass* holder = _method->method_holder()->super();
2991   Symbol* holder_name = holder->name();
2992   ThreadInVMfromUnknown tiv;
2993   JavaThread* current = JavaThread::current();
2994   HandleMark hm(current);
2995   Handle loader(current, _method->method_holder()->class_loader());
2996 
2997   // Walk up the class hierarchy and search for super methods
2998   while (holder != nullptr) {
2999     Method* super_method = holder->lookup_method(name, signature);
3000     if (super_method == nullptr) {
3001       break;
3002     }
3003     if (!super_method->is_static() && !super_method->is_private() &&
3004         (!super_method->is_package_private() ||
3005          super_method->method_holder()->is_same_class_package(loader(), holder_name))) {
3006       _supers->push(super_method);
3007     }
3008     holder = super_method->method_holder()->super();
3009   }
3010   // Search interfaces for super methods
3011   Array<InstanceKlass*>* interfaces = _method->method_holder()->transitive_interfaces();
3012   for (int i = 0; i < interfaces->length(); ++i) {
3013     Method* m = interfaces->at(i)->lookup_method(name, signature);
3014     if (m != nullptr && !m->is_static() && m->is_public()) {
3015       _supers->push(m);
3016     }
3017   }
3018   return _supers;
3019 }
3020 
3021 // Iterate over arguments and compute scalarized and non-scalarized signatures
3022 void CompiledEntrySignature::compute_calling_conventions(bool init) {
3023   bool has_scalarized = false;
3024   if (_method != nullptr) {
3025     InstanceKlass* holder = _method->method_holder();
3026     int arg_num = 0;
3027     if (!_method->is_static()) {
3028       if (holder->is_inline_klass() && InlineKlass::cast(holder)->can_be_passed_as_fields() &&
3029           (init || _method->is_scalarized_arg(arg_num))) {
3030         _sig_cc->appendAll(InlineKlass::cast(holder)->extended_sig());
3031         has_scalarized = true;
3032         _has_inline_recv = true;
3033         _num_inline_args++;
3034       } else {
3035         SigEntry::add_entry(_sig_cc, T_OBJECT, holder->name());
3036       }
3037       SigEntry::add_entry(_sig, T_OBJECT, holder->name());
3038       SigEntry::add_entry(_sig_cc_ro, T_OBJECT, holder->name());
3039       arg_num++;
3040     }
3041     for (SignatureStream ss(_method->signature()); !ss.at_return_type(); ss.next()) {
3042       BasicType bt = ss.type();
3043       if (bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) {
3044         InlineKlass* vk = ss.as_inline_klass(holder);
3045         if (vk != nullptr && vk->can_be_passed_as_fields() && (init || _method->is_scalarized_arg(arg_num))) {
3046           // Check for a calling convention mismatch with super method(s)
3047           bool scalar_super = false;
3048           bool non_scalar_super = false;
3049           GrowableArray<Method*>* supers = get_supers();
3050           for (int i = 0; i < supers->length(); ++i) {
3051             Method* super_method = supers->at(i);
3052             if (super_method->is_scalarized_arg(arg_num)) {
3053               scalar_super = true;
3054             } else {
3055               non_scalar_super = true;
3056             }
3057           }
3058 #ifdef ASSERT
3059           // Randomly enable below code paths for stress testing
3060           bool stress = init && StressCallingConvention;
3061           if (stress && (os::random() & 1) == 1) {
3062             non_scalar_super = true;
3063             if ((os::random() & 1) == 1) {
3064               scalar_super = true;
3065             }
3066           }
3067 #endif
3068           if (non_scalar_super) {
3069             // Found a super method with a non-scalarized argument. Fall back to the non-scalarized calling convention.
3070             if (scalar_super) {
3071               // Found non-scalar *and* scalar super methods. We can't handle both.
3072               // Mark the scalar method as mismatch and re-compile call sites to use non-scalarized calling convention.
3073               for (int i = 0; i < supers->length(); ++i) {
3074                 Method* super_method = supers->at(i);
3075                 if (super_method->is_scalarized_arg(arg_num) debug_only(|| (stress && (os::random() & 1) == 1))) {
3076                   super_method->set_mismatch();
3077                   MutexLocker ml(Compile_lock, Mutex::_safepoint_check_flag);
3078                   JavaThread* thread = JavaThread::current();
3079                   HandleMark hm(thread);
3080                   methodHandle mh(thread, super_method);
3081                   DeoptimizationScope deopt_scope;
3082                   CodeCache::mark_for_deoptimization(&deopt_scope, mh());
3083                   deopt_scope.deoptimize_marked();
3084                 }
3085               }
3086             }
3087             // Fall back to non-scalarized calling convention
3088             SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3089             SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3090           } else {
3091             _num_inline_args++;
3092             has_scalarized = true;
3093             int last = _sig_cc->length();
3094             int last_ro = _sig_cc_ro->length();
3095             _sig_cc->appendAll(vk->extended_sig());
3096             _sig_cc_ro->appendAll(vk->extended_sig());
3097             if (bt == T_OBJECT) {
3098               // Nullable inline type argument, insert InlineTypeNode::IsInit field right after T_PRIMITIVE_OBJECT
3099               _sig_cc->insert_before(last+1, SigEntry(T_BOOLEAN, -1, nullptr));
3100               _sig_cc_ro->insert_before(last_ro+1, SigEntry(T_BOOLEAN, -1, nullptr));
3101             }
3102           }
3103         } else {
3104           SigEntry::add_entry(_sig_cc, T_OBJECT, ss.as_symbol());
3105           SigEntry::add_entry(_sig_cc_ro, T_OBJECT, ss.as_symbol());
3106         }
3107         bt = T_OBJECT;
3108       } else {
3109         SigEntry::add_entry(_sig_cc, ss.type(), ss.as_symbol());
3110         SigEntry::add_entry(_sig_cc_ro, ss.type(), ss.as_symbol());
3111       }
3112       SigEntry::add_entry(_sig, bt, ss.as_symbol());
3113       if (bt != T_VOID) {
3114         arg_num++;
3115       }
3116     }
3117   }
3118 
3119   // Compute the non-scalarized calling convention
3120   _regs = NEW_RESOURCE_ARRAY(VMRegPair, _sig->length());
3121   _args_on_stack = SharedRuntime::java_calling_convention(_sig, _regs);
3122 
3123   // Compute the scalarized calling conventions if there are scalarized inline types in the signature
3124   if (has_scalarized && !_method->is_native()) {
3125     _regs_cc = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc->length());
3126     _args_on_stack_cc = SharedRuntime::java_calling_convention(_sig_cc, _regs_cc);
3127 
3128     _regs_cc_ro = NEW_RESOURCE_ARRAY(VMRegPair, _sig_cc_ro->length());
3129     _args_on_stack_cc_ro = SharedRuntime::java_calling_convention(_sig_cc_ro, _regs_cc_ro);
3130 
3131     _c1_needs_stack_repair = (_args_on_stack_cc < _args_on_stack) || (_args_on_stack_cc_ro < _args_on_stack);
3132     _c2_needs_stack_repair = (_args_on_stack_cc > _args_on_stack) || (_args_on_stack_cc > _args_on_stack_cc_ro);
3133 
3134     // Upper bound on stack arguments to avoid hitting the argument limit and
3135     // bailing out of compilation ("unsupported incoming calling sequence").
3136     // TODO we need a reasonable limit (flag?) here
3137     if (MAX2(_args_on_stack_cc, _args_on_stack_cc_ro) <= 60) {
3138       return; // Success
3139     }
3140   }
3141 
3142   // No scalarized args
3143   _sig_cc = _sig;
3144   _regs_cc = _regs;
3145   _args_on_stack_cc = _args_on_stack;
3146 
3147   _sig_cc_ro = _sig;
3148   _regs_cc_ro = _regs;
3149   _args_on_stack_cc_ro = _args_on_stack;
3150 }
3151 
3152 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
3153   // Use customized signature handler.  Need to lock around updates to
3154   // the _adapter_handler_table (it is not safe for concurrent readers
3155   // and a single writer: this could be fixed if it becomes a
3156   // problem).
3157 
3158   // Fast-path for trivial adapters
3159   AdapterHandlerEntry* entry = get_simple_adapter(method);
3160   if (entry != nullptr) {
3161     return entry;
3162   }
3163 
3164   ResourceMark rm;
3165   AdapterBlob* new_adapter = nullptr;
3166 
3167   CompiledEntrySignature ces(method());
3168   ces.compute_calling_conventions();
3169   if (ces.has_scalarized_args()) {
3170     method->set_has_scalarized_args();
3171     if (ces.c1_needs_stack_repair()) {
3172       method->set_c1_needs_stack_repair();
3173     }
3174     if (ces.c2_needs_stack_repair()) {
3175       method->set_c2_needs_stack_repair();
3176     }
3177   } else if (method->is_abstract()) {
3178     return _abstract_method_handler;
3179   }
3180 
3181   {
3182     MutexLocker mu(AdapterHandlerLibrary_lock);
3183 
3184     if (ces.has_scalarized_args() && method->is_abstract()) {
3185       // Save a C heap allocated version of the signature for abstract methods with scalarized inline type arguments
3186       address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
3187       entry = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(nullptr),
3188                                                StubRoutines::throw_AbstractMethodError_entry(),
3189                                                wrong_method_abstract, wrong_method_abstract, wrong_method_abstract,
3190                                                wrong_method_abstract, wrong_method_abstract);
3191       GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc_ro()->length(), mtInternal);
3192       heap_sig->appendAll(ces.sig_cc_ro());
3193       entry->set_sig_cc(heap_sig);
3194       return entry;
3195     }
3196 
3197     // Lookup method signature's fingerprint
3198     entry = lookup(ces.sig_cc(), ces.has_inline_recv());
3199 
3200     if (entry != nullptr) {
3201 #ifdef ASSERT
3202       if (VerifyAdapterSharing) {
3203         AdapterBlob* comparison_blob = nullptr;
3204         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, ces, false);
3205         assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
3206         assert(comparison_entry->compare_code(entry), "code must match");
3207         // Release the one just created and return the original
3208         delete comparison_entry;
3209       }
3210 #endif
3211       return entry;
3212     }
3213 
3214     entry = create_adapter(new_adapter, ces, /* allocate_code_blob */ true);
3215   }
3216 
3217   // Outside of the lock
3218   if (new_adapter != nullptr) {
3219     post_adapter_creation(new_adapter, entry);
3220   }
3221   return entry;
3222 }
3223 
3224 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& new_adapter,
3225                                                            CompiledEntrySignature& ces,
3226                                                            bool allocate_code_blob) {
3227 
3228   // StubRoutines::_final_stubs_code is initialized after this function can be called. As a result,
3229   // VerifyAdapterCalls and VerifyAdapterSharing can fail if we re-use code that generated prior
3230   // to all StubRoutines::_final_stubs_code being set. Checks refer to runtime range checks generated
3231   // in an I2C stub that ensure that an I2C stub is called from an interpreter frame or stubs.
3232   bool contains_all_checks = StubRoutines::final_stubs_code() != nullptr;
3233 
3234   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
3235   CodeBuffer buffer(buf);
3236   short buffer_locs[20];
3237   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
3238                                           sizeof(buffer_locs)/sizeof(relocInfo));
3239 
3240   // Make a C heap allocated version of the fingerprint to store in the adapter
3241   AdapterFingerPrint* fingerprint = new AdapterFingerPrint(ces.sig_cc(), ces.has_inline_recv());
3242   MacroAssembler _masm(&buffer);
3243   AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
3244                                                 ces.args_on_stack(),
3245                                                 ces.sig(),
3246                                                 ces.regs(),
3247                                                 ces.sig_cc(),
3248                                                 ces.regs_cc(),
3249                                                 ces.sig_cc_ro(),
3250                                                 ces.regs_cc_ro(),
3251                                                 fingerprint,
3252                                                 new_adapter,
3253                                                 allocate_code_blob);
3254 
3255   if (ces.has_scalarized_args()) {
3256     // Save a C heap allocated version of the scalarized signature and store it in the adapter
3257     GrowableArray<SigEntry>* heap_sig = new (mtInternal) GrowableArray<SigEntry>(ces.sig_cc()->length(), mtInternal);
3258     heap_sig->appendAll(ces.sig_cc());
3259     entry->set_sig_cc(heap_sig);
3260   }
3261 
3262 #ifdef ASSERT
3263   if (VerifyAdapterSharing) {
3264     entry->save_code(buf->code_begin(), buffer.insts_size());
3265     if (!allocate_code_blob) {
3266       return entry;
3267     }
3268   }
3269 #endif
3270 
3271   NOT_PRODUCT(int insts_size = buffer.insts_size());
3272   if (new_adapter == nullptr) {
3273     // CodeCache is full, disable compilation
3274     // Ought to log this but compile log is only per compile thread
3275     // and we're some non descript Java thread.
3276     return nullptr;
3277   }
3278   entry->relocate(new_adapter->content_begin());
3279 #ifndef PRODUCT
3280   // debugging support
3281   if (PrintAdapterHandlers || PrintStubCode) {
3282     ttyLocker ttyl;
3283     entry->print_adapter_on(tty);
3284     tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
3285                   _adapter_handler_table.number_of_entries(), fingerprint->as_basic_args_string(),
3286                   fingerprint->as_string(), insts_size);
3287     tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(entry->get_c2i_entry()));
3288     if (Verbose || PrintStubCode) {
3289       address first_pc = entry->base_address();
3290       if (first_pc != nullptr) {
3291         Disassembler::decode(first_pc, first_pc + insts_size, tty
3292                              NOT_PRODUCT(COMMA &new_adapter->asm_remarks()));
3293         tty->cr();
3294       }
3295     }
3296   }
3297 #endif
3298 
3299   // Add the entry only if the entry contains all required checks (see sharedRuntime_xxx.cpp)
3300   // The checks are inserted only if -XX:+VerifyAdapterCalls is specified.
3301   if (contains_all_checks || !VerifyAdapterCalls) {
3302     assert_lock_strong(AdapterHandlerLibrary_lock);
3303     _adapter_handler_table.put(fingerprint, entry);
3304   }
3305   return entry;
3306 }
3307 
3308 address AdapterHandlerEntry::base_address() {
3309   address base = _i2c_entry;
3310   if (base == nullptr)  base = _c2i_entry;
3311   assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3312   assert(base <= _c2i_inline_entry || _c2i_inline_entry == nullptr, "");
3313   assert(base <= _c2i_inline_ro_entry || _c2i_inline_ro_entry == nullptr, "");
3314   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3315   assert(base <= _c2i_unverified_inline_entry || _c2i_unverified_inline_entry == nullptr, "");
3316   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3317   return base;
3318 }
3319 
3320 void AdapterHandlerEntry::relocate(address new_base) {
3321   address old_base = base_address();
3322   assert(old_base != nullptr, "");
3323   ptrdiff_t delta = new_base - old_base;
3324   if (_i2c_entry != nullptr)
3325     _i2c_entry += delta;
3326   if (_c2i_entry != nullptr)
3327     _c2i_entry += delta;
3328   if (_c2i_inline_entry != nullptr)
3329     _c2i_inline_entry += delta;
3330   if (_c2i_inline_ro_entry != nullptr)
3331     _c2i_inline_ro_entry += delta;
3332   if (_c2i_unverified_entry != nullptr)
3333     _c2i_unverified_entry += delta;
3334   if (_c2i_unverified_inline_entry != nullptr)
3335     _c2i_unverified_inline_entry += delta;
3336   if (_c2i_no_clinit_check_entry != nullptr)
3337     _c2i_no_clinit_check_entry += delta;
3338   assert(base_address() == new_base, "");
3339 }
3340 
3341 
3342 AdapterHandlerEntry::~AdapterHandlerEntry() {
3343   delete _fingerprint;
3344   if (_sig_cc != nullptr) {
3345     delete _sig_cc;
3346   }
3347 #ifdef ASSERT
3348   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3349 #endif
3350 }
3351 
3352 
3353 #ifdef ASSERT
3354 // Capture the code before relocation so that it can be compared
3355 // against other versions.  If the code is captured after relocation
3356 // then relative instructions won't be equivalent.
3357 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3358   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3359   _saved_code_length = length;
3360   memcpy(_saved_code, buffer, length);
3361 }
3362 
3363 
3364 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3365   assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3366 
3367   if (other->_saved_code_length != _saved_code_length) {
3368     return false;
3369   }
3370 
3371   return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3372 }
3373 #endif
3374 
3375 
3376 /**
3377  * Create a native wrapper for this native method.  The wrapper converts the
3378  * Java-compiled calling convention to the native convention, handles
3379  * arguments, and transitions to native.  On return from the native we transition
3380  * back to java blocking if a safepoint is in progress.
3381  */
3382 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3383   ResourceMark rm;
3384   nmethod* nm = nullptr;
3385 
3386   // Check if memory should be freed before allocation
3387   CodeCache::gc_on_allocation();
3388 
3389   assert(method->is_native(), "must be native");
3390   assert(method->is_special_native_intrinsic() ||
3391          method->has_native_function(), "must have something valid to call!");
3392 
3393   {
3394     // Perform the work while holding the lock, but perform any printing outside the lock
3395     MutexLocker mu(AdapterHandlerLibrary_lock);
3396     // See if somebody beat us to it
3397     if (method->code() != nullptr) {
3398       return;
3399     }
3400 
3401     const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3402     assert(compile_id > 0, "Must generate native wrapper");
3403 
3404 
3405     ResourceMark rm;
3406     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
3407     if (buf != nullptr) {
3408       CodeBuffer buffer(buf);
3409 
3410       if (method->is_continuation_enter_intrinsic()) {
3411         buffer.initialize_stubs_size(192);
3412       }
3413 
3414       struct { double data[20]; } locs_buf;
3415       struct { double data[20]; } stubs_locs_buf;
3416       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3417 #if defined(AARCH64) || defined(PPC64)
3418       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3419       // in the constant pool to ensure ordering between the barrier and oops
3420       // accesses. For native_wrappers we need a constant.
3421       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3422       // static java call that is resolved in the runtime.
3423       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3424         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3425       }
3426 #endif
3427       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3428       MacroAssembler _masm(&buffer);
3429 
3430       // Fill in the signature array, for the calling-convention call.
3431       const int total_args_passed = method->size_of_parameters();
3432 
3433       BasicType stack_sig_bt[16];
3434       VMRegPair stack_regs[16];
3435       BasicType* sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
3436       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3437 
3438       int i = 0;
3439       if (!method->is_static()) {  // Pass in receiver first
3440         sig_bt[i++] = T_OBJECT;
3441       }
3442       SignatureStream ss(method->signature());
3443       for (; !ss.at_return_type(); ss.next()) {
3444         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
3445         if (ss.type() == T_LONG || ss.type() == T_DOUBLE) {
3446           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
3447         }
3448       }
3449       assert(i == total_args_passed, "");
3450       BasicType ret_type = ss.type();
3451 
3452       // Now get the compiled-Java arguments layout.
3453       int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3454 
3455       // Generate the compiled-to-native wrapper code
3456       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3457 
3458       if (nm != nullptr) {
3459         {
3460           MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
3461           if (nm->make_in_use()) {
3462             method->set_code(method, nm);
3463           }
3464         }
3465 
3466         DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_simple));
3467         if (directive->PrintAssemblyOption) {
3468           nm->print_code();
3469         }
3470         DirectivesStack::release(directive);
3471       }
3472     }
3473   } // Unlock AdapterHandlerLibrary_lock
3474 
3475 
3476   // Install the generated code.
3477   if (nm != nullptr) {
3478     const char *msg = method->is_static() ? "(static)" : "";
3479     CompileTask::print_ul(nm, msg);
3480     if (PrintCompilation) {
3481       ttyLocker ttyl;
3482       CompileTask::print(tty, nm, msg);
3483     }
3484     nm->post_compiled_method_load_event();
3485   }
3486 }
3487 
3488 // -------------------------------------------------------------------------
3489 // Java-Java calling convention
3490 // (what you use when Java calls Java)
3491 
3492 //------------------------------name_for_receiver----------------------------------
3493 // For a given signature, return the VMReg for parameter 0.
3494 VMReg SharedRuntime::name_for_receiver() {
3495   VMRegPair regs;
3496   BasicType sig_bt = T_OBJECT;
3497   (void) java_calling_convention(&sig_bt, &regs, 1);
3498   // Return argument 0 register.  In the LP64 build pointers
3499   // take 2 registers, but the VM wants only the 'main' name.
3500   return regs.first();
3501 }
3502 
3503 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3504   // This method is returning a data structure allocating as a
3505   // ResourceObject, so do not put any ResourceMarks in here.
3506 
3507   BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3508   VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3509   int cnt = 0;
3510   if (has_receiver) {
3511     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3512   }
3513 
3514   for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3515     BasicType type = ss.type();
3516     sig_bt[cnt++] = type;
3517     if (is_double_word_type(type))
3518       sig_bt[cnt++] = T_VOID;
3519   }
3520 
3521   if (has_appendix) {
3522     sig_bt[cnt++] = T_OBJECT;
3523   }
3524 
3525   assert(cnt < 256, "grow table size");
3526 
3527   int comp_args_on_stack;
3528   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3529 
3530   // the calling convention doesn't count out_preserve_stack_slots so
3531   // we must add that in to get "true" stack offsets.
3532 
3533   if (comp_args_on_stack) {
3534     for (int i = 0; i < cnt; i++) {
3535       VMReg reg1 = regs[i].first();
3536       if (reg1->is_stack()) {
3537         // Yuck
3538         reg1 = reg1->bias(out_preserve_stack_slots());
3539       }
3540       VMReg reg2 = regs[i].second();
3541       if (reg2->is_stack()) {
3542         // Yuck
3543         reg2 = reg2->bias(out_preserve_stack_slots());
3544       }
3545       regs[i].set_pair(reg2, reg1);
3546     }
3547   }
3548 
3549   // results
3550   *arg_size = cnt;
3551   return regs;
3552 }
3553 
3554 // OSR Migration Code
3555 //
3556 // This code is used convert interpreter frames into compiled frames.  It is
3557 // called from very start of a compiled OSR nmethod.  A temp array is
3558 // allocated to hold the interesting bits of the interpreter frame.  All
3559 // active locks are inflated to allow them to move.  The displaced headers and
3560 // active interpreter locals are copied into the temp buffer.  Then we return
3561 // back to the compiled code.  The compiled code then pops the current
3562 // interpreter frame off the stack and pushes a new compiled frame.  Then it
3563 // copies the interpreter locals and displaced headers where it wants.
3564 // Finally it calls back to free the temp buffer.
3565 //
3566 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3567 
3568 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3569   assert(current == JavaThread::current(), "pre-condition");
3570 
3571   // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3572   // frame. The stack watermark code below ensures that the interpreted frame is processed
3573   // before it gets unwound. This is helpful as the size of the compiled frame could be
3574   // larger than the interpreted frame, which could result in the new frame not being
3575   // processed correctly.
3576   StackWatermarkSet::before_unwind(current);
3577 
3578   //
3579   // This code is dependent on the memory layout of the interpreter local
3580   // array and the monitors. On all of our platforms the layout is identical
3581   // so this code is shared. If some platform lays the their arrays out
3582   // differently then this code could move to platform specific code or
3583   // the code here could be modified to copy items one at a time using
3584   // frame accessor methods and be platform independent.
3585 
3586   frame fr = current->last_frame();
3587   assert(fr.is_interpreted_frame(), "");
3588   assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3589 
3590   // Figure out how many monitors are active.
3591   int active_monitor_count = 0;
3592   for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3593        kptr < fr.interpreter_frame_monitor_begin();
3594        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3595     if (kptr->obj() != nullptr) active_monitor_count++;
3596   }
3597 
3598   // QQQ we could place number of active monitors in the array so that compiled code
3599   // could double check it.
3600 
3601   Method* moop = fr.interpreter_frame_method();
3602   int max_locals = moop->max_locals();
3603   // Allocate temp buffer, 1 word per local & 2 per active monitor
3604   int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3605   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3606 
3607   // Copy the locals.  Order is preserved so that loading of longs works.
3608   // Since there's no GC I can copy the oops blindly.
3609   assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3610   Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3611                        (HeapWord*)&buf[0],
3612                        max_locals);
3613 
3614   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
3615   int i = max_locals;
3616   for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3617        kptr2 < fr.interpreter_frame_monitor_begin();
3618        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3619     if (kptr2->obj() != nullptr) {         // Avoid 'holes' in the monitor array
3620       BasicLock *lock = kptr2->lock();
3621       // Inflate so the object's header no longer refers to the BasicLock.
3622       if (lock->displaced_header().is_unlocked()) {
3623         // The object is locked and the resulting ObjectMonitor* will also be
3624         // locked so it can't be async deflated until ownership is dropped.
3625         // See the big comment in basicLock.cpp: BasicLock::move_to().
3626         ObjectSynchronizer::inflate_helper(kptr2->obj());
3627       }
3628       // Now the displaced header is free to move because the
3629       // object's header no longer refers to it.
3630       buf[i++] = (intptr_t)lock->displaced_header().value();
3631       buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3632     }
3633   }
3634   assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3635 
3636   RegisterMap map(current,
3637                   RegisterMap::UpdateMap::skip,
3638                   RegisterMap::ProcessFrames::include,
3639                   RegisterMap::WalkContinuation::skip);
3640   frame sender = fr.sender(&map);
3641   if (sender.is_interpreted_frame()) {
3642     current->push_cont_fastpath(sender.sp());
3643   }
3644 
3645   return buf;
3646 JRT_END
3647 
3648 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3649   FREE_C_HEAP_ARRAY(intptr_t, buf);
3650 JRT_END
3651 
3652 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3653   bool found = false;
3654   auto findblob = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3655     return (found = (b == CodeCache::find_blob(a->get_i2c_entry())));
3656   };
3657   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3658   _adapter_handler_table.iterate(findblob);
3659   return found;
3660 }
3661 
3662 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3663   bool found = false;
3664   auto findblob = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3665     if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3666       found = true;
3667       st->print("Adapter for signature: ");
3668       a->print_adapter_on(st);
3669       return true;
3670     } else {
3671       return false; // keep looking
3672     }
3673   };
3674   assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3675   _adapter_handler_table.iterate(findblob);
3676   assert(found, "Should have found handler");
3677 }
3678 
3679 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3680   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3681   if (get_i2c_entry() != nullptr) {
3682     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3683   }
3684   if (get_c2i_entry() != nullptr) {
3685     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3686   }
3687   if (get_c2i_entry() != nullptr) {
3688     st->print(" c2iVE: " INTPTR_FORMAT, p2i(get_c2i_inline_entry()));
3689   }
3690   if (get_c2i_entry() != nullptr) {
3691     st->print(" c2iVROE: " INTPTR_FORMAT, p2i(get_c2i_inline_ro_entry()));
3692   }
3693   if (get_c2i_unverified_entry() != nullptr) {
3694     st->print(" c2iUE: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3695   }
3696   if (get_c2i_unverified_entry() != nullptr) {
3697     st->print(" c2iUVE: " INTPTR_FORMAT, p2i(get_c2i_unverified_inline_entry()));
3698   }
3699   if (get_c2i_no_clinit_check_entry() != nullptr) {
3700     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3701   }
3702   st->cr();
3703 }
3704 
3705 #ifndef PRODUCT
3706 
3707 void AdapterHandlerLibrary::print_statistics() {
3708   print_table_statistics();
3709 }
3710 
3711 #endif /* PRODUCT */
3712 
3713 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3714   assert(current == JavaThread::current(), "pre-condition");
3715   StackOverflow* overflow_state = current->stack_overflow_state();
3716   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3717   overflow_state->set_reserved_stack_activation(current->stack_base());
3718 JRT_END
3719 
3720 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3721   ResourceMark rm(current);
3722   frame activation;
3723   CompiledMethod* nm = nullptr;
3724   int count = 1;
3725 
3726   assert(fr.is_java_frame(), "Must start on Java frame");
3727 
3728   RegisterMap map(JavaThread::current(),
3729                   RegisterMap::UpdateMap::skip,
3730                   RegisterMap::ProcessFrames::skip,
3731                   RegisterMap::WalkContinuation::skip); // don't walk continuations
3732   for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3733     if (!fr.is_java_frame()) {
3734       continue;
3735     }
3736 
3737     Method* method = nullptr;
3738     bool found = false;
3739     if (fr.is_interpreted_frame()) {
3740       method = fr.interpreter_frame_method();
3741       if (method != nullptr && method->has_reserved_stack_access()) {
3742         found = true;
3743       }
3744     } else {
3745       CodeBlob* cb = fr.cb();
3746       if (cb != nullptr && cb->is_compiled()) {
3747         nm = cb->as_compiled_method();
3748         method = nm->method();
3749         // scope_desc_near() must be used, instead of scope_desc_at() because on
3750         // SPARC, the pcDesc can be on the delay slot after the call instruction.
3751         for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
3752           method = sd->method();
3753           if (method != nullptr && method->has_reserved_stack_access()) {
3754             found = true;
3755       }
3756     }
3757       }
3758     }
3759     if (found) {
3760       activation = fr;
3761       warning("Potentially dangerous stack overflow in "
3762               "ReservedStackAccess annotated method %s [%d]",
3763               method->name_and_sig_as_C_string(), count++);
3764       EventReservedStackActivation event;
3765       if (event.should_commit()) {
3766         event.set_method(method);
3767         event.commit();
3768       }
3769     }
3770   }
3771   return activation;
3772 }
3773 
3774 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3775   // After any safepoint, just before going back to compiled code,
3776   // we inform the GC that we will be doing initializing writes to
3777   // this object in the future without emitting card-marks, so
3778   // GC may take any compensating steps.
3779 
3780   oop new_obj = current->vm_result();
3781   if (new_obj == nullptr) return;
3782 
3783   BarrierSet *bs = BarrierSet::barrier_set();
3784   bs->on_slowpath_allocation_exit(current, new_obj);
3785 }
3786 
3787 // We are at a compiled code to interpreter call. We need backing
3788 // buffers for all inline type arguments. Allocate an object array to
3789 // hold them (convenient because once we're done with it we don't have
3790 // to worry about freeing it).
3791 oop SharedRuntime::allocate_inline_types_impl(JavaThread* current, methodHandle callee, bool allocate_receiver, TRAPS) {
3792   assert(InlineTypePassFieldsAsArgs, "no reason to call this");
3793   ResourceMark rm;
3794 
3795   int nb_slots = 0;
3796   InstanceKlass* holder = callee->method_holder();
3797   allocate_receiver &= !callee->is_static() && holder->is_inline_klass() && callee->is_scalarized_arg(0);
3798   if (allocate_receiver) {
3799     nb_slots++;
3800   }
3801   int arg_num = callee->is_static() ? 0 : 1;
3802   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3803     BasicType bt = ss.type();
3804     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3805       nb_slots++;
3806     }
3807     if (bt != T_VOID) {
3808       arg_num++;
3809     }
3810   }
3811   objArrayOop array_oop = oopFactory::new_objectArray(nb_slots, CHECK_NULL);
3812   objArrayHandle array(THREAD, array_oop);
3813   arg_num = callee->is_static() ? 0 : 1;
3814   int i = 0;
3815   if (allocate_receiver) {
3816     InlineKlass* vk = InlineKlass::cast(holder);
3817     oop res = vk->allocate_instance(CHECK_NULL);
3818     array->obj_at_put(i++, res);
3819   }
3820   for (SignatureStream ss(callee->signature()); !ss.at_return_type(); ss.next()) {
3821     BasicType bt = ss.type();
3822     if ((bt == T_OBJECT || bt == T_PRIMITIVE_OBJECT) && callee->is_scalarized_arg(arg_num)) {
3823       InlineKlass* vk = ss.as_inline_klass(holder);
3824       assert(vk != nullptr, "Unexpected klass");
3825       oop res = vk->allocate_instance(CHECK_NULL);
3826       array->obj_at_put(i++, res);
3827     }
3828     if (bt != T_VOID) {
3829       arg_num++;
3830     }
3831   }
3832   return array();
3833 }
3834 
3835 JRT_ENTRY(void, SharedRuntime::allocate_inline_types(JavaThread* current, Method* callee_method, bool allocate_receiver))
3836   methodHandle callee(current, callee_method);
3837   oop array = SharedRuntime::allocate_inline_types_impl(current, callee, allocate_receiver, CHECK);
3838   current->set_vm_result(array);
3839   current->set_vm_result_2(callee()); // TODO: required to keep callee live?
3840 JRT_END
3841 
3842 // We're returning from an interpreted method: load each field into a
3843 // register following the calling convention
3844 JRT_LEAF(void, SharedRuntime::load_inline_type_fields_in_regs(JavaThread* current, oopDesc* res))
3845 {
3846   assert(res->klass()->is_inline_klass(), "only inline types here");
3847   ResourceMark rm;
3848   RegisterMap reg_map(current,
3849                       RegisterMap::UpdateMap::include,
3850                       RegisterMap::ProcessFrames::include,
3851                       RegisterMap::WalkContinuation::skip);
3852   frame stubFrame = current->last_frame();
3853   frame callerFrame = stubFrame.sender(&reg_map);
3854   assert(callerFrame.is_interpreted_frame(), "should be coming from interpreter");
3855 
3856   InlineKlass* vk = InlineKlass::cast(res->klass());
3857 
3858   const Array<SigEntry>* sig_vk = vk->extended_sig();
3859   const Array<VMRegPair>* regs = vk->return_regs();
3860 
3861   if (regs == nullptr) {
3862     // The fields of the inline klass don't fit in registers, bail out
3863     return;
3864   }
3865 
3866   int j = 1;
3867   for (int i = 0; i < sig_vk->length(); i++) {
3868     BasicType bt = sig_vk->at(i)._bt;
3869     if (bt == T_PRIMITIVE_OBJECT) {
3870       continue;
3871     }
3872     if (bt == T_VOID) {
3873       if (sig_vk->at(i-1)._bt == T_LONG ||
3874           sig_vk->at(i-1)._bt == T_DOUBLE) {
3875         j++;
3876       }
3877       continue;
3878     }
3879     int off = sig_vk->at(i)._offset;
3880     assert(off > 0, "offset in object should be positive");
3881     VMRegPair pair = regs->at(j);
3882     address loc = reg_map.location(pair.first(), nullptr);
3883     switch(bt) {
3884     case T_BOOLEAN:
3885       *(jboolean*)loc = res->bool_field(off);
3886       break;
3887     case T_CHAR:
3888       *(jchar*)loc = res->char_field(off);
3889       break;
3890     case T_BYTE:
3891       *(jbyte*)loc = res->byte_field(off);
3892       break;
3893     case T_SHORT:
3894       *(jshort*)loc = res->short_field(off);
3895       break;
3896     case T_INT: {
3897       *(jint*)loc = res->int_field(off);
3898       break;
3899     }
3900     case T_LONG:
3901 #ifdef _LP64
3902       *(intptr_t*)loc = res->long_field(off);
3903 #else
3904       Unimplemented();
3905 #endif
3906       break;
3907     case T_OBJECT:
3908     case T_ARRAY: {
3909       *(oop*)loc = res->obj_field(off);
3910       break;
3911     }
3912     case T_FLOAT:
3913       *(jfloat*)loc = res->float_field(off);
3914       break;
3915     case T_DOUBLE:
3916       *(jdouble*)loc = res->double_field(off);
3917       break;
3918     default:
3919       ShouldNotReachHere();
3920     }
3921     j++;
3922   }
3923   assert(j == regs->length(), "missed a field?");
3924 
3925 #ifdef ASSERT
3926   VMRegPair pair = regs->at(0);
3927   address loc = reg_map.location(pair.first(), nullptr);
3928   assert(*(oopDesc**)loc == res, "overwritten object");
3929 #endif
3930 
3931   current->set_vm_result(res);
3932 }
3933 JRT_END
3934 
3935 // We've returned to an interpreted method, the interpreter needs a
3936 // reference to an inline type instance. Allocate it and initialize it
3937 // from field's values in registers.
3938 JRT_BLOCK_ENTRY(void, SharedRuntime::store_inline_type_fields_to_buf(JavaThread* current, intptr_t res))
3939 {
3940   ResourceMark rm;
3941   RegisterMap reg_map(current,
3942                       RegisterMap::UpdateMap::include,
3943                       RegisterMap::ProcessFrames::include,
3944                       RegisterMap::WalkContinuation::skip);
3945   frame stubFrame = current->last_frame();
3946   frame callerFrame = stubFrame.sender(&reg_map);
3947 
3948 #ifdef ASSERT
3949   InlineKlass* verif_vk = InlineKlass::returned_inline_klass(reg_map);
3950 #endif
3951 
3952   if (!is_set_nth_bit(res, 0)) {
3953     // We're not returning with inline type fields in registers (the
3954     // calling convention didn't allow it for this inline klass)
3955     assert(!Metaspace::contains((void*)res), "should be oop or pointer in buffer area");
3956     current->set_vm_result((oopDesc*)res);
3957     assert(verif_vk == nullptr, "broken calling convention");
3958     return;
3959   }
3960 
3961   clear_nth_bit(res, 0);
3962   InlineKlass* vk = (InlineKlass*)res;
3963   assert(verif_vk == vk, "broken calling convention");
3964   assert(Metaspace::contains((void*)res), "should be klass");
3965 
3966   // Allocate handles for every oop field so they are safe in case of
3967   // a safepoint when allocating
3968   GrowableArray<Handle> handles;
3969   vk->save_oop_fields(reg_map, handles);
3970 
3971   // It's unsafe to safepoint until we are here
3972   JRT_BLOCK;
3973   {
3974     JavaThread* THREAD = current;
3975     oop vt = vk->realloc_result(reg_map, handles, CHECK);
3976     current->set_vm_result(vt);
3977   }
3978   JRT_BLOCK_END;
3979 }
3980 JRT_END
3981