1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/archiveBuilder.hpp"
  26 #include "cds/archiveUtils.inline.hpp"
  27 #include "cds/cdsConfig.hpp"
  28 #include "classfile/classLoader.hpp"
  29 #include "classfile/javaClasses.inline.hpp"
  30 #include "classfile/stringTable.hpp"
  31 #include "classfile/vmClasses.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/SCCache.hpp"
  34 #include "code/codeCache.hpp"
  35 #include "code/compiledIC.hpp"
  36 #include "code/nmethod.inline.hpp"
  37 #include "code/scopeDesc.hpp"
  38 #include "code/vtableStubs.hpp"
  39 #include "compiler/abstractCompiler.hpp"
  40 #include "compiler/compileBroker.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "interpreter/interpreter.hpp"
  45 #include "interpreter/interpreterRuntime.hpp"
  46 #include "jvm.h"
  47 #include "jfr/jfrEvents.hpp"
  48 #include "logging/log.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "metaprogramming/primitiveConversions.hpp"
  52 #include "oops/klass.hpp"
  53 #include "oops/method.inline.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "prims/forte.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "prims/jvmtiThreadState.hpp"
  59 #include "prims/methodHandles.hpp"
  60 #include "prims/nativeLookup.hpp"
  61 #include "runtime/arguments.hpp"
  62 #include "runtime/atomic.hpp"
  63 #include "runtime/basicLock.inline.hpp"
  64 #include "runtime/frame.inline.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/init.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"
  68 #include "runtime/java.hpp"
  69 #include "runtime/javaCalls.hpp"
  70 #include "runtime/jniHandles.inline.hpp"
  71 #include "runtime/perfData.inline.hpp"
  72 #include "runtime/sharedRuntime.hpp"
  73 #include "runtime/stackWatermarkSet.hpp"
  74 #include "runtime/stubRoutines.hpp"
  75 #include "runtime/synchronizer.inline.hpp"
  76 #include "runtime/timerTrace.hpp"
  77 #include "runtime/vframe.inline.hpp"
  78 #include "runtime/vframeArray.hpp"
  79 #include "runtime/vm_version.hpp"
  80 #include "services/management.hpp"
  81 #include "utilities/copy.hpp"
  82 #include "utilities/dtrace.hpp"
  83 #include "utilities/events.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/resourceHash.hpp"
  86 #include "utilities/macros.hpp"
  87 #include "utilities/xmlstream.hpp"
  88 #ifdef COMPILER1
  89 #include "c1/c1_Runtime1.hpp"
  90 #endif
  91 #if INCLUDE_JFR
  92 #include "jfr/jfr.hpp"
  93 #endif
  94 
  95 // Shared runtime stub routines reside in their own unique blob with a
  96 // single entry point
  97 
  98 
  99 #define SHARED_STUB_FIELD_DEFINE(name, type) \
 100   type        SharedRuntime::BLOB_FIELD_NAME(name);
 101   SHARED_STUBS_DO(SHARED_STUB_FIELD_DEFINE)
 102 #undef SHARED_STUB_FIELD_DEFINE
 103 
 104 nmethod*            SharedRuntime::_cont_doYield_stub;
 105 
 106 PerfTickCounters* SharedRuntime::_perf_resolve_opt_virtual_total_time = nullptr;
 107 PerfTickCounters* SharedRuntime::_perf_resolve_virtual_total_time     = nullptr;
 108 PerfTickCounters* SharedRuntime::_perf_resolve_static_total_time      = nullptr;
 109 PerfTickCounters* SharedRuntime::_perf_handle_wrong_method_total_time = nullptr;
 110 PerfTickCounters* SharedRuntime::_perf_ic_miss_total_time             = nullptr;
 111 
 112 #define SHARED_STUB_NAME_DECLARE(name, type) "Shared Runtime " # name "_blob",
 113 const char *SharedRuntime::_stub_names[] = {
 114   SHARED_STUBS_DO(SHARED_STUB_NAME_DECLARE)
 115 };
 116 
 117 //----------------------------generate_stubs-----------------------------------
 118 void SharedRuntime::generate_initial_stubs() {
 119   // Build this early so it's available for the interpreter.
 120   _throw_StackOverflowError_blob =
 121     generate_throw_exception(SharedStubId::throw_StackOverflowError_id,
 122                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError));
 123 }
 124 
 125 void SharedRuntime::generate_stubs() {
 126   _wrong_method_blob =
 127     generate_resolve_blob(SharedStubId::wrong_method_id,
 128                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method));
 129   _wrong_method_abstract_blob =
 130     generate_resolve_blob(SharedStubId::wrong_method_abstract_id,
 131                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_abstract));
 132   _ic_miss_blob =
 133     generate_resolve_blob(SharedStubId::ic_miss_id,
 134                           CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss));
 135   _resolve_opt_virtual_call_blob =
 136     generate_resolve_blob(SharedStubId::resolve_opt_virtual_call_id,
 137                           CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C));
 138   _resolve_virtual_call_blob =
 139     generate_resolve_blob(SharedStubId::resolve_virtual_call_id,
 140                           CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C));
 141   _resolve_static_call_blob =
 142     generate_resolve_blob(SharedStubId::resolve_static_call_id,
 143                           CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C));
 144 
 145   _throw_delayed_StackOverflowError_blob =
 146     generate_throw_exception(SharedStubId::throw_delayed_StackOverflowError_id,
 147                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError));
 148 
 149   _throw_AbstractMethodError_blob =
 150     generate_throw_exception(SharedStubId::throw_AbstractMethodError_id,
 151                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError));
 152 
 153   _throw_IncompatibleClassChangeError_blob =
 154     generate_throw_exception(SharedStubId::throw_IncompatibleClassChangeError_id,
 155                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError));
 156 
 157   _throw_NullPointerException_at_call_blob =
 158     generate_throw_exception(SharedStubId::throw_NullPointerException_at_call_id,
 159                              CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call));
 160 
 161 #if COMPILER2_OR_JVMCI
 162   // Vectors are generated only by C2 and JVMCI.
 163   bool support_wide = is_wide_vector(MaxVectorSize);
 164   if (support_wide) {
 165     _polling_page_vectors_safepoint_handler_blob =
 166       generate_handler_blob(SharedStubId::polling_page_vectors_safepoint_handler_id,
 167                             CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 168   }
 169 #endif // COMPILER2_OR_JVMCI
 170   _polling_page_safepoint_handler_blob =
 171     generate_handler_blob(SharedStubId::polling_page_safepoint_handler_id,
 172                           CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 173   _polling_page_return_handler_blob =
 174     generate_handler_blob(SharedStubId::polling_page_return_handler_id,
 175                           CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception));
 176 
 177   generate_deopt_blob();
 178 
 179   if (UsePerfData) {
 180     EXCEPTION_MARK;
 181     NEWPERFTICKCOUNTERS(_perf_resolve_opt_virtual_total_time, SUN_CI, "resovle_opt_virtual_call");
 182     NEWPERFTICKCOUNTERS(_perf_resolve_virtual_total_time,     SUN_CI, "resovle_virtual_call");
 183     NEWPERFTICKCOUNTERS(_perf_resolve_static_total_time,      SUN_CI, "resovle_static_call");
 184     NEWPERFTICKCOUNTERS(_perf_handle_wrong_method_total_time, SUN_CI, "handle_wrong_method");
 185     NEWPERFTICKCOUNTERS(_perf_ic_miss_total_time ,            SUN_CI, "ic_miss");
 186     if (HAS_PENDING_EXCEPTION) {
 187       vm_exit_during_initialization("SharedRuntime::generate_stubs() failed unexpectedly");
 188     }
 189   }
 190 }
 191 
 192 void SharedRuntime::init_adapter_library() {
 193   AdapterHandlerLibrary::initialize();
 194 }
 195 
 196 static void print_counter_on(outputStream* st, const char* name, PerfTickCounters* counter, uint cnt) {
 197   st->print("  %-28s " JLONG_FORMAT_W(6) "us", name, counter->elapsed_counter_value_us());
 198   if (TraceThreadTime) {
 199     st->print(" (elapsed) " JLONG_FORMAT_W(6) "us (thread)", counter->thread_counter_value_us());
 200   }
 201   st->print(" / %5d events", cnt);
 202   st->cr();
 203 }
 204 
 205 void SharedRuntime::print_counters_on(outputStream* st) {
 206   st->print_cr("SharedRuntime:");
 207   if (UsePerfData) {
 208     print_counter_on(st, "resolve_opt_virtual_call:", _perf_resolve_opt_virtual_total_time, _resolve_opt_virtual_ctr);
 209     print_counter_on(st, "resolve_virtual_call:",     _perf_resolve_virtual_total_time,     _resolve_virtual_ctr);
 210     print_counter_on(st, "resolve_static_call:",      _perf_resolve_static_total_time,      _resolve_static_ctr);
 211     print_counter_on(st, "handle_wrong_method:",      _perf_handle_wrong_method_total_time, _wrong_method_ctr);
 212     print_counter_on(st, "ic_miss:",                  _perf_ic_miss_total_time,             _ic_miss_ctr);
 213 
 214     jlong total_elapsed_time_us = Management::ticks_to_us(_perf_resolve_opt_virtual_total_time->elapsed_counter_value() +
 215                                                           _perf_resolve_virtual_total_time->elapsed_counter_value() +
 216                                                           _perf_resolve_static_total_time->elapsed_counter_value() +
 217                                                           _perf_handle_wrong_method_total_time->elapsed_counter_value() +
 218                                                           _perf_ic_miss_total_time->elapsed_counter_value());
 219     st->print("Total:                      " JLONG_FORMAT_W(5) "us", total_elapsed_time_us);
 220     if (TraceThreadTime) {
 221       jlong total_thread_time_us = Management::ticks_to_us(_perf_resolve_opt_virtual_total_time->thread_counter_value() +
 222                                                            _perf_resolve_virtual_total_time->thread_counter_value() +
 223                                                            _perf_resolve_static_total_time->thread_counter_value() +
 224                                                            _perf_handle_wrong_method_total_time->thread_counter_value() +
 225                                                            _perf_ic_miss_total_time->thread_counter_value());
 226       st->print(" (elapsed) " JLONG_FORMAT_W(5) "us (thread)", total_thread_time_us);
 227 
 228     }
 229     st->cr();
 230   } else {
 231     st->print_cr("  no data (UsePerfData is turned off)");
 232   }
 233 }
 234 
 235 #if INCLUDE_JFR
 236 //------------------------------generate jfr runtime stubs ------
 237 void SharedRuntime::generate_jfr_stubs() {
 238   ResourceMark rm;
 239   const char* timer_msg = "SharedRuntime generate_jfr_stubs";
 240   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
 241 
 242   _jfr_write_checkpoint_blob = generate_jfr_write_checkpoint();
 243   _jfr_return_lease_blob = generate_jfr_return_lease();
 244 }
 245 
 246 #endif // INCLUDE_JFR
 247 
 248 #include <math.h>
 249 
 250 // Implementation of SharedRuntime
 251 
 252 // For statistics
 253 uint SharedRuntime::_ic_miss_ctr = 0;
 254 uint SharedRuntime::_wrong_method_ctr = 0;
 255 uint SharedRuntime::_resolve_static_ctr = 0;
 256 uint SharedRuntime::_resolve_virtual_ctr = 0;
 257 uint SharedRuntime::_resolve_opt_virtual_ctr = 0;
 258 
 259 #ifndef PRODUCT
 260 uint SharedRuntime::_implicit_null_throws = 0;
 261 uint SharedRuntime::_implicit_div0_throws = 0;
 262 
 263 int64_t SharedRuntime::_nof_normal_calls = 0;
 264 int64_t SharedRuntime::_nof_inlined_calls = 0;
 265 int64_t SharedRuntime::_nof_megamorphic_calls = 0;
 266 int64_t SharedRuntime::_nof_static_calls = 0;
 267 int64_t SharedRuntime::_nof_inlined_static_calls = 0;
 268 int64_t SharedRuntime::_nof_interface_calls = 0;
 269 int64_t SharedRuntime::_nof_inlined_interface_calls = 0;
 270 
 271 uint SharedRuntime::_new_instance_ctr=0;
 272 uint SharedRuntime::_new_array_ctr=0;
 273 uint SharedRuntime::_multi2_ctr=0;
 274 uint SharedRuntime::_multi3_ctr=0;
 275 uint SharedRuntime::_multi4_ctr=0;
 276 uint SharedRuntime::_multi5_ctr=0;
 277 uint SharedRuntime::_mon_enter_stub_ctr=0;
 278 uint SharedRuntime::_mon_exit_stub_ctr=0;
 279 uint SharedRuntime::_mon_enter_ctr=0;
 280 uint SharedRuntime::_mon_exit_ctr=0;
 281 uint SharedRuntime::_partial_subtype_ctr=0;
 282 uint SharedRuntime::_jbyte_array_copy_ctr=0;
 283 uint SharedRuntime::_jshort_array_copy_ctr=0;
 284 uint SharedRuntime::_jint_array_copy_ctr=0;
 285 uint SharedRuntime::_jlong_array_copy_ctr=0;
 286 uint SharedRuntime::_oop_array_copy_ctr=0;
 287 uint SharedRuntime::_checkcast_array_copy_ctr=0;
 288 uint SharedRuntime::_unsafe_array_copy_ctr=0;
 289 uint SharedRuntime::_generic_array_copy_ctr=0;
 290 uint SharedRuntime::_slow_array_copy_ctr=0;
 291 uint SharedRuntime::_find_handler_ctr=0;
 292 uint SharedRuntime::_rethrow_ctr=0;
 293 uint SharedRuntime::_unsafe_set_memory_ctr=0;
 294 
 295 int     SharedRuntime::_ICmiss_index                    = 0;
 296 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
 297 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
 298 
 299 
 300 void SharedRuntime::trace_ic_miss(address at) {
 301   for (int i = 0; i < _ICmiss_index; i++) {
 302     if (_ICmiss_at[i] == at) {
 303       _ICmiss_count[i]++;
 304       return;
 305     }
 306   }
 307   int index = _ICmiss_index++;
 308   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
 309   _ICmiss_at[index] = at;
 310   _ICmiss_count[index] = 1;
 311 }
 312 
 313 void SharedRuntime::print_ic_miss_histogram_on(outputStream* st) {
 314   if (ICMissHistogram) {
 315     st->print_cr("IC Miss Histogram:");
 316     int tot_misses = 0;
 317     for (int i = 0; i < _ICmiss_index; i++) {
 318       st->print_cr("  at: " INTPTR_FORMAT "  nof: %d", p2i(_ICmiss_at[i]), _ICmiss_count[i]);
 319       tot_misses += _ICmiss_count[i];
 320     }
 321     st->print_cr("Total IC misses: %7d", tot_misses);
 322   }
 323 }
 324 #endif // !PRODUCT
 325 
 326 
 327 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
 328   return x * y;
 329 JRT_END
 330 
 331 
 332 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
 333   if (x == min_jlong && y == CONST64(-1)) {
 334     return x;
 335   } else {
 336     return x / y;
 337   }
 338 JRT_END
 339 
 340 
 341 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
 342   if (x == min_jlong && y == CONST64(-1)) {
 343     return 0;
 344   } else {
 345     return x % y;
 346   }
 347 JRT_END
 348 
 349 
 350 #ifdef _WIN64
 351 const juint  float_sign_mask  = 0x7FFFFFFF;
 352 const juint  float_infinity   = 0x7F800000;
 353 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
 354 const julong double_infinity  = CONST64(0x7FF0000000000000);
 355 #endif
 356 
 357 #if !defined(X86)
 358 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
 359 #ifdef _WIN64
 360   // 64-bit Windows on amd64 returns the wrong values for
 361   // infinity operands.
 362   juint xbits = PrimitiveConversions::cast<juint>(x);
 363   juint ybits = PrimitiveConversions::cast<juint>(y);
 364   // x Mod Infinity == x unless x is infinity
 365   if (((xbits & float_sign_mask) != float_infinity) &&
 366        ((ybits & float_sign_mask) == float_infinity) ) {
 367     return x;
 368   }
 369   return ((jfloat)fmod_winx64((double)x, (double)y));
 370 #else
 371   return ((jfloat)fmod((double)x,(double)y));
 372 #endif
 373 JRT_END
 374 
 375 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
 376 #ifdef _WIN64
 377   julong xbits = PrimitiveConversions::cast<julong>(x);
 378   julong ybits = PrimitiveConversions::cast<julong>(y);
 379   // x Mod Infinity == x unless x is infinity
 380   if (((xbits & double_sign_mask) != double_infinity) &&
 381        ((ybits & double_sign_mask) == double_infinity) ) {
 382     return x;
 383   }
 384   return ((jdouble)fmod_winx64((double)x, (double)y));
 385 #else
 386   return ((jdouble)fmod((double)x,(double)y));
 387 #endif
 388 JRT_END
 389 #endif // !X86
 390 
 391 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
 392   return (jfloat)x;
 393 JRT_END
 394 
 395 #ifdef __SOFTFP__
 396 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
 397   return x + y;
 398 JRT_END
 399 
 400 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
 401   return x - y;
 402 JRT_END
 403 
 404 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
 405   return x * y;
 406 JRT_END
 407 
 408 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
 409   return x / y;
 410 JRT_END
 411 
 412 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
 413   return x + y;
 414 JRT_END
 415 
 416 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
 417   return x - y;
 418 JRT_END
 419 
 420 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
 421   return x * y;
 422 JRT_END
 423 
 424 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
 425   return x / y;
 426 JRT_END
 427 
 428 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
 429   return (jdouble)x;
 430 JRT_END
 431 
 432 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
 433   return (jdouble)x;
 434 JRT_END
 435 
 436 JRT_LEAF(int,  SharedRuntime::fcmpl(float x, float y))
 437   return x>y ? 1 : (x==y ? 0 : -1);  /* x<y or is_nan*/
 438 JRT_END
 439 
 440 JRT_LEAF(int,  SharedRuntime::fcmpg(float x, float y))
 441   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 442 JRT_END
 443 
 444 JRT_LEAF(int,  SharedRuntime::dcmpl(double x, double y))
 445   return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
 446 JRT_END
 447 
 448 JRT_LEAF(int,  SharedRuntime::dcmpg(double x, double y))
 449   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
 450 JRT_END
 451 
 452 // Functions to return the opposite of the aeabi functions for nan.
 453 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
 454   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 455 JRT_END
 456 
 457 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
 458   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 459 JRT_END
 460 
 461 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
 462   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 463 JRT_END
 464 
 465 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
 466   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 467 JRT_END
 468 
 469 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
 470   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 471 JRT_END
 472 
 473 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
 474   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 475 JRT_END
 476 
 477 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
 478   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 479 JRT_END
 480 
 481 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
 482   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
 483 JRT_END
 484 
 485 // Intrinsics make gcc generate code for these.
 486 float  SharedRuntime::fneg(float f)   {
 487   return -f;
 488 }
 489 
 490 double SharedRuntime::dneg(double f)  {
 491   return -f;
 492 }
 493 
 494 #endif // __SOFTFP__
 495 
 496 #if defined(__SOFTFP__) || defined(E500V2)
 497 // Intrinsics make gcc generate code for these.
 498 double SharedRuntime::dabs(double f)  {
 499   return (f <= (double)0.0) ? (double)0.0 - f : f;
 500 }
 501 
 502 #endif
 503 
 504 #if defined(__SOFTFP__) || defined(PPC)
 505 double SharedRuntime::dsqrt(double f) {
 506   return sqrt(f);
 507 }
 508 #endif
 509 
 510 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
 511   if (g_isnan(x))
 512     return 0;
 513   if (x >= (jfloat) max_jint)
 514     return max_jint;
 515   if (x <= (jfloat) min_jint)
 516     return min_jint;
 517   return (jint) x;
 518 JRT_END
 519 
 520 
 521 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
 522   if (g_isnan(x))
 523     return 0;
 524   if (x >= (jfloat) max_jlong)
 525     return max_jlong;
 526   if (x <= (jfloat) min_jlong)
 527     return min_jlong;
 528   return (jlong) x;
 529 JRT_END
 530 
 531 
 532 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
 533   if (g_isnan(x))
 534     return 0;
 535   if (x >= (jdouble) max_jint)
 536     return max_jint;
 537   if (x <= (jdouble) min_jint)
 538     return min_jint;
 539   return (jint) x;
 540 JRT_END
 541 
 542 
 543 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
 544   if (g_isnan(x))
 545     return 0;
 546   if (x >= (jdouble) max_jlong)
 547     return max_jlong;
 548   if (x <= (jdouble) min_jlong)
 549     return min_jlong;
 550   return (jlong) x;
 551 JRT_END
 552 
 553 
 554 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
 555   return (jfloat)x;
 556 JRT_END
 557 
 558 
 559 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
 560   return (jfloat)x;
 561 JRT_END
 562 
 563 
 564 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
 565   return (jdouble)x;
 566 JRT_END
 567 
 568 
 569 // Exception handling across interpreter/compiler boundaries
 570 //
 571 // exception_handler_for_return_address(...) returns the continuation address.
 572 // The continuation address is the entry point of the exception handler of the
 573 // previous frame depending on the return address.
 574 
 575 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* current, address return_address) {
 576   // Note: This is called when we have unwound the frame of the callee that did
 577   // throw an exception. So far, no check has been performed by the StackWatermarkSet.
 578   // Notably, the stack is not walkable at this point, and hence the check must
 579   // be deferred until later. Specifically, any of the handlers returned here in
 580   // this function, will get dispatched to, and call deferred checks to
 581   // StackWatermarkSet::after_unwind at a point where the stack is walkable.
 582   assert(frame::verify_return_pc(return_address), "must be a return address: " INTPTR_FORMAT, p2i(return_address));
 583   assert(current->frames_to_pop_failed_realloc() == 0 || Interpreter::contains(return_address), "missed frames to pop?");
 584 
 585   // Reset method handle flag.
 586   current->set_is_method_handle_return(false);
 587 
 588 #if INCLUDE_JVMCI
 589   // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear
 590   // and other exception handler continuations do not read it
 591   current->set_exception_pc(nullptr);
 592 #endif // INCLUDE_JVMCI
 593 
 594   if (Continuation::is_return_barrier_entry(return_address)) {
 595     return StubRoutines::cont_returnBarrierExc();
 596   }
 597 
 598   // The fastest case first
 599   CodeBlob* blob = CodeCache::find_blob(return_address);
 600   nmethod* nm = (blob != nullptr) ? blob->as_nmethod_or_null() : nullptr;
 601   if (nm != nullptr) {
 602     // Set flag if return address is a method handle call site.
 603     current->set_is_method_handle_return(nm->is_method_handle_return(return_address));
 604     // native nmethods don't have exception handlers
 605     assert(!nm->is_native_method() || nm->method()->is_continuation_enter_intrinsic(), "no exception handler");
 606     assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
 607     if (nm->is_deopt_pc(return_address)) {
 608       // If we come here because of a stack overflow, the stack may be
 609       // unguarded. Reguard the stack otherwise if we return to the
 610       // deopt blob and the stack bang causes a stack overflow we
 611       // crash.
 612       StackOverflow* overflow_state = current->stack_overflow_state();
 613       bool guard_pages_enabled = overflow_state->reguard_stack_if_needed();
 614       if (overflow_state->reserved_stack_activation() != current->stack_base()) {
 615         overflow_state->set_reserved_stack_activation(current->stack_base());
 616       }
 617       assert(guard_pages_enabled, "stack banging in deopt blob may cause crash");
 618       // The deferred StackWatermarkSet::after_unwind check will be performed in
 619       // Deoptimization::fetch_unroll_info (with exec_mode == Unpack_exception)
 620       return SharedRuntime::deopt_blob()->unpack_with_exception();
 621     } else {
 622       // The deferred StackWatermarkSet::after_unwind check will be performed in
 623       // * OptoRuntime::handle_exception_C_helper for C2 code
 624       // * exception_handler_for_pc_helper via Runtime1::handle_exception_from_callee_id for C1 code
 625       return nm->exception_begin();
 626     }
 627   }
 628 
 629   // Entry code
 630   if (StubRoutines::returns_to_call_stub(return_address)) {
 631     // The deferred StackWatermarkSet::after_unwind check will be performed in
 632     // JavaCallWrapper::~JavaCallWrapper
 633     return StubRoutines::catch_exception_entry();
 634   }
 635   if (blob != nullptr && blob->is_upcall_stub()) {
 636     return StubRoutines::upcall_stub_exception_handler();
 637   }
 638   // Interpreted code
 639   if (Interpreter::contains(return_address)) {
 640     // The deferred StackWatermarkSet::after_unwind check will be performed in
 641     // InterpreterRuntime::exception_handler_for_exception
 642     return Interpreter::rethrow_exception_entry();
 643   }
 644 
 645   guarantee(blob == nullptr || !blob->is_runtime_stub(), "caller should have skipped stub");
 646   guarantee(!VtableStubs::contains(return_address), "null exceptions in vtables should have been handled already!");
 647 
 648 #ifndef PRODUCT
 649   { ResourceMark rm;
 650     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", p2i(return_address));
 651     os::print_location(tty, (intptr_t)return_address);
 652     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
 653     tty->print_cr("b) other problem");
 654   }
 655 #endif // PRODUCT
 656   ShouldNotReachHere();
 657   return nullptr;
 658 }
 659 
 660 
 661 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* current, address return_address))
 662   return raw_exception_handler_for_return_address(current, return_address);
 663 JRT_END
 664 
 665 
 666 address SharedRuntime::get_poll_stub(address pc) {
 667   address stub;
 668   // Look up the code blob
 669   CodeBlob *cb = CodeCache::find_blob(pc);
 670 
 671   // Should be an nmethod
 672   guarantee(cb != nullptr && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod");
 673 
 674   // Look up the relocation information
 675   assert(cb->as_nmethod()->is_at_poll_or_poll_return(pc),
 676       "safepoint polling: type must be poll at pc " INTPTR_FORMAT, p2i(pc));
 677 
 678 #ifdef ASSERT
 679   if (!((NativeInstruction*)pc)->is_safepoint_poll()) {
 680     tty->print_cr("bad pc: " PTR_FORMAT, p2i(pc));
 681     Disassembler::decode(cb);
 682     fatal("Only polling locations are used for safepoint");
 683   }
 684 #endif
 685 
 686   bool at_poll_return = cb->as_nmethod()->is_at_poll_return(pc);
 687   bool has_wide_vectors = cb->as_nmethod()->has_wide_vectors();
 688   if (at_poll_return) {
 689     assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
 690            "polling page return stub not created yet");
 691     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 692   } else if (has_wide_vectors) {
 693     assert(SharedRuntime::polling_page_vectors_safepoint_handler_blob() != nullptr,
 694            "polling page vectors safepoint stub not created yet");
 695     stub = SharedRuntime::polling_page_vectors_safepoint_handler_blob()->entry_point();
 696   } else {
 697     assert(SharedRuntime::polling_page_safepoint_handler_blob() != nullptr,
 698            "polling page safepoint stub not created yet");
 699     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
 700   }
 701   log_debug(safepoint)("... found polling page %s exception at pc = "
 702                        INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
 703                        at_poll_return ? "return" : "loop",
 704                        (intptr_t)pc, (intptr_t)stub);
 705   return stub;
 706 }
 707 
 708 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Handle h_exception) {
 709   if (JvmtiExport::can_post_on_exceptions()) {
 710     vframeStream vfst(current, true);
 711     methodHandle method = methodHandle(current, vfst.method());
 712     address bcp = method()->bcp_from(vfst.bci());
 713     JvmtiExport::post_exception_throw(current, method(), bcp, h_exception());
 714   }
 715 
 716 #if INCLUDE_JVMCI
 717   if (EnableJVMCI) {
 718     vframeStream vfst(current, true);
 719     methodHandle method = methodHandle(current, vfst.method());
 720     int bci = vfst.bci();
 721     MethodData* trap_mdo = method->method_data();
 722     if (trap_mdo != nullptr) {
 723       // Set exception_seen if the exceptional bytecode is an invoke
 724       Bytecode_invoke call = Bytecode_invoke_check(method, bci);
 725       if (call.is_valid()) {
 726         ResourceMark rm(current);
 727 
 728         // Lock to read ProfileData, and ensure lock is not broken by a safepoint
 729         MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
 730 
 731         ProfileData* pdata = trap_mdo->allocate_bci_to_data(bci, nullptr);
 732         if (pdata != nullptr && pdata->is_BitData()) {
 733           BitData* bit_data = (BitData*) pdata;
 734           bit_data->set_exception_seen();
 735         }
 736       }
 737     }
 738   }
 739 #endif
 740 
 741   Exceptions::_throw(current, __FILE__, __LINE__, h_exception);
 742 }
 743 
 744 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread* current, Symbol* name, const char *message) {
 745   Handle h_exception = Exceptions::new_exception(current, name, message);
 746   throw_and_post_jvmti_exception(current, h_exception);
 747 }
 748 
 749 #if INCLUDE_JVMTI
 750 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_start(oopDesc* vt, jboolean hide, JavaThread* current))
 751   assert(hide == JNI_FALSE, "must be VTMS transition finish");
 752   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 753   JvmtiVTMSTransitionDisabler::VTMS_vthread_start(vthread);
 754   JNIHandles::destroy_local(vthread);
 755 JRT_END
 756 
 757 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_end(oopDesc* vt, jboolean hide, JavaThread* current))
 758   assert(hide == JNI_TRUE, "must be VTMS transition start");
 759   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 760   JvmtiVTMSTransitionDisabler::VTMS_vthread_end(vthread);
 761   JNIHandles::destroy_local(vthread);
 762 JRT_END
 763 
 764 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_mount(oopDesc* vt, jboolean hide, JavaThread* current))
 765   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 766   JvmtiVTMSTransitionDisabler::VTMS_vthread_mount(vthread, hide);
 767   JNIHandles::destroy_local(vthread);
 768 JRT_END
 769 
 770 JRT_ENTRY(void, SharedRuntime::notify_jvmti_vthread_unmount(oopDesc* vt, jboolean hide, JavaThread* current))
 771   jobject vthread = JNIHandles::make_local(const_cast<oopDesc*>(vt));
 772   JvmtiVTMSTransitionDisabler::VTMS_vthread_unmount(vthread, hide);
 773   JNIHandles::destroy_local(vthread);
 774 JRT_END
 775 #endif // INCLUDE_JVMTI
 776 
 777 // The interpreter code to call this tracing function is only
 778 // called/generated when UL is on for redefine, class and has the right level
 779 // and tags. Since obsolete methods are never compiled, we don't have
 780 // to modify the compilers to generate calls to this function.
 781 //
 782 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
 783     JavaThread* thread, Method* method))
 784   if (method->is_obsolete()) {
 785     // We are calling an obsolete method, but this is not necessarily
 786     // an error. Our method could have been redefined just after we
 787     // fetched the Method* from the constant pool.
 788     ResourceMark rm;
 789     log_trace(redefine, class, obsolete)("calling obsolete method '%s'", method->name_and_sig_as_C_string());
 790   }
 791 
 792   LogStreamHandle(Trace, interpreter, bytecode) log;
 793   if (log.is_enabled()) {
 794     ResourceMark rm;
 795     log.print("method entry: " INTPTR_FORMAT " %s %s%s%s%s",
 796               p2i(thread),
 797               (method->is_static() ? "static" : "virtual"),
 798               method->name_and_sig_as_C_string(),
 799               (method->is_native() ? " native" : ""),
 800               (thread->class_being_initialized() != nullptr ? " clinit" : ""),
 801               (method->method_holder()->is_initialized() ? "" : " being_initialized"));
 802   }
 803   return 0;
 804 JRT_END
 805 
 806 // ret_pc points into caller; we are returning caller's exception handler
 807 // for given exception
 808 // Note that the implementation of this method assumes it's only called when an exception has actually occured
 809 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
 810                                                     bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred) {
 811   assert(nm != nullptr, "must exist");
 812   ResourceMark rm;
 813 
 814 #if INCLUDE_JVMCI
 815   if (nm->is_compiled_by_jvmci()) {
 816     // lookup exception handler for this pc
 817     int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
 818     ExceptionHandlerTable table(nm);
 819     HandlerTableEntry *t = table.entry_for(catch_pco, -1, 0);
 820     if (t != nullptr) {
 821       return nm->code_begin() + t->pco();
 822     } else {
 823       return Deoptimization::deoptimize_for_missing_exception_handler(nm);
 824     }
 825   }
 826 #endif // INCLUDE_JVMCI
 827 
 828   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
 829   // determine handler bci, if any
 830   EXCEPTION_MARK;
 831 
 832   int handler_bci = -1;
 833   int scope_depth = 0;
 834   if (!force_unwind) {
 835     int bci = sd->bci();
 836     bool recursive_exception = false;
 837     do {
 838       bool skip_scope_increment = false;
 839       // exception handler lookup
 840       Klass* ek = exception->klass();
 841       methodHandle mh(THREAD, sd->method());
 842       handler_bci = Method::fast_exception_handler_bci_for(mh, ek, bci, THREAD);
 843       if (HAS_PENDING_EXCEPTION) {
 844         recursive_exception = true;
 845         // We threw an exception while trying to find the exception handler.
 846         // Transfer the new exception to the exception handle which will
 847         // be set into thread local storage, and do another lookup for an
 848         // exception handler for this exception, this time starting at the
 849         // BCI of the exception handler which caused the exception to be
 850         // thrown (bugs 4307310 and 4546590). Set "exception" reference
 851         // argument to ensure that the correct exception is thrown (4870175).
 852         recursive_exception_occurred = true;
 853         exception = Handle(THREAD, PENDING_EXCEPTION);
 854         CLEAR_PENDING_EXCEPTION;
 855         if (handler_bci >= 0) {
 856           bci = handler_bci;
 857           handler_bci = -1;
 858           skip_scope_increment = true;
 859         }
 860       }
 861       else {
 862         recursive_exception = false;
 863       }
 864       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
 865         sd = sd->sender();
 866         if (sd != nullptr) {
 867           bci = sd->bci();
 868         }
 869         ++scope_depth;
 870       }
 871     } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != nullptr));
 872   }
 873 
 874   // found handling method => lookup exception handler
 875   int catch_pco = pointer_delta_as_int(ret_pc, nm->code_begin());
 876 
 877   ExceptionHandlerTable table(nm);
 878   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
 879   if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
 880     // Allow abbreviated catch tables.  The idea is to allow a method
 881     // to materialize its exceptions without committing to the exact
 882     // routing of exceptions.  In particular this is needed for adding
 883     // a synthetic handler to unlock monitors when inlining
 884     // synchronized methods since the unlock path isn't represented in
 885     // the bytecodes.
 886     t = table.entry_for(catch_pco, -1, 0);
 887   }
 888 
 889 #ifdef COMPILER1
 890   if (t == nullptr && nm->is_compiled_by_c1()) {
 891     assert(nm->unwind_handler_begin() != nullptr, "");
 892     return nm->unwind_handler_begin();
 893   }
 894 #endif
 895 
 896   if (t == nullptr) {
 897     ttyLocker ttyl;
 898     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d, catch_pco: %d", p2i(ret_pc), handler_bci, catch_pco);
 899     tty->print_cr("   Exception:");
 900     exception->print();
 901     tty->cr();
 902     tty->print_cr(" Compiled exception table :");
 903     table.print();
 904     nm->print();
 905     nm->print_code();
 906     guarantee(false, "missing exception handler");
 907     return nullptr;
 908   }
 909 
 910   if (handler_bci != -1) { // did we find a handler in this method?
 911     sd->method()->set_exception_handler_entered(handler_bci); // profile
 912   }
 913   return nm->code_begin() + t->pco();
 914 }
 915 
 916 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* current))
 917   // These errors occur only at call sites
 918   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_AbstractMethodError());
 919 JRT_END
 920 
 921 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* current))
 922   // These errors occur only at call sites
 923   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
 924 JRT_END
 925 
 926 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* current))
 927   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 928 JRT_END
 929 
 930 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* current))
 931   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
 932 JRT_END
 933 
 934 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* current))
 935   // This entry point is effectively only used for NullPointerExceptions which occur at inline
 936   // cache sites (when the callee activation is not yet set up) so we are at a call site
 937   throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException(), nullptr);
 938 JRT_END
 939 
 940 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* current))
 941   throw_StackOverflowError_common(current, false);
 942 JRT_END
 943 
 944 JRT_ENTRY(void, SharedRuntime::throw_delayed_StackOverflowError(JavaThread* current))
 945   throw_StackOverflowError_common(current, true);
 946 JRT_END
 947 
 948 void SharedRuntime::throw_StackOverflowError_common(JavaThread* current, bool delayed) {
 949   // We avoid using the normal exception construction in this case because
 950   // it performs an upcall to Java, and we're already out of stack space.
 951   JavaThread* THREAD = current; // For exception macros.
 952   Klass* k = vmClasses::StackOverflowError_klass();
 953   oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
 954   if (delayed) {
 955     java_lang_Throwable::set_message(exception_oop,
 956                                      Universe::delayed_stack_overflow_error_message());
 957   }
 958   Handle exception (current, exception_oop);
 959   if (StackTraceInThrowable) {
 960     java_lang_Throwable::fill_in_stack_trace(exception);
 961   }
 962   // Remove the ScopedValue bindings in case we got a
 963   // StackOverflowError while we were trying to remove ScopedValue
 964   // bindings.
 965   current->clear_scopedValueBindings();
 966   // Increment counter for hs_err file reporting
 967   Atomic::inc(&Exceptions::_stack_overflow_errors);
 968   throw_and_post_jvmti_exception(current, exception);
 969 }
 970 
 971 address SharedRuntime::continuation_for_implicit_exception(JavaThread* current,
 972                                                            address pc,
 973                                                            ImplicitExceptionKind exception_kind)
 974 {
 975   address target_pc = nullptr;
 976 
 977   if (Interpreter::contains(pc)) {
 978     switch (exception_kind) {
 979       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
 980       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
 981       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
 982       default:                      ShouldNotReachHere();
 983     }
 984   } else {
 985     switch (exception_kind) {
 986       case STACK_OVERFLOW: {
 987         // Stack overflow only occurs upon frame setup; the callee is
 988         // going to be unwound. Dispatch to a shared runtime stub
 989         // which will cause the StackOverflowError to be fabricated
 990         // and processed.
 991         // Stack overflow should never occur during deoptimization:
 992         // the compiled method bangs the stack by as much as the
 993         // interpreter would need in case of a deoptimization. The
 994         // deoptimization blob and uncommon trap blob bang the stack
 995         // in a debug VM to verify the correctness of the compiled
 996         // method stack banging.
 997         assert(current->deopt_mark() == nullptr, "no stack overflow from deopt blob/uncommon trap");
 998         Events::log_exception(current, "StackOverflowError at " INTPTR_FORMAT, p2i(pc));
 999         return SharedRuntime::throw_StackOverflowError_entry();
1000       }
1001 
1002       case IMPLICIT_NULL: {
1003         if (VtableStubs::contains(pc)) {
1004           // We haven't yet entered the callee frame. Fabricate an
1005           // exception and begin dispatching it in the caller. Since
1006           // the caller was at a call site, it's safe to destroy all
1007           // caller-saved registers, as these entry points do.
1008           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
1009 
1010           // If vt_stub is null, then return null to signal handler to report the SEGV error.
1011           if (vt_stub == nullptr) return nullptr;
1012 
1013           if (vt_stub->is_abstract_method_error(pc)) {
1014             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
1015             Events::log_exception(current, "AbstractMethodError at " INTPTR_FORMAT, p2i(pc));
1016             // Instead of throwing the abstract method error here directly, we re-resolve
1017             // and will throw the AbstractMethodError during resolve. As a result, we'll
1018             // get a more detailed error message.
1019             return SharedRuntime::get_handle_wrong_method_stub();
1020           } else {
1021             Events::log_exception(current, "NullPointerException at vtable entry " INTPTR_FORMAT, p2i(pc));
1022             // Assert that the signal comes from the expected location in stub code.
1023             assert(vt_stub->is_null_pointer_exception(pc),
1024                    "obtained signal from unexpected location in stub code");
1025             return SharedRuntime::throw_NullPointerException_at_call_entry();
1026           }
1027         } else {
1028           CodeBlob* cb = CodeCache::find_blob(pc);
1029 
1030           // If code blob is null, then return null to signal handler to report the SEGV error.
1031           if (cb == nullptr) return nullptr;
1032 
1033           // Exception happened in CodeCache. Must be either:
1034           // 1. Inline-cache check in C2I handler blob,
1035           // 2. Inline-cache check in nmethod, or
1036           // 3. Implicit null exception in nmethod
1037 
1038           if (!cb->is_nmethod()) {
1039             bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob();
1040             if (!is_in_blob) {
1041               // Allow normal crash reporting to handle this
1042               return nullptr;
1043             }
1044             Events::log_exception(current, "NullPointerException in code blob at " INTPTR_FORMAT, p2i(pc));
1045             // There is no handler here, so we will simply unwind.
1046             return SharedRuntime::throw_NullPointerException_at_call_entry();
1047           }
1048 
1049           // Otherwise, it's a compiled method.  Consult its exception handlers.
1050           nmethod* nm = cb->as_nmethod();
1051           if (nm->inlinecache_check_contains(pc)) {
1052             // exception happened inside inline-cache check code
1053             // => the nmethod is not yet active (i.e., the frame
1054             // is not set up yet) => use return address pushed by
1055             // caller => don't push another return address
1056             Events::log_exception(current, "NullPointerException in IC check " INTPTR_FORMAT, p2i(pc));
1057             return SharedRuntime::throw_NullPointerException_at_call_entry();
1058           }
1059 
1060           if (nm->method()->is_method_handle_intrinsic()) {
1061             // exception happened inside MH dispatch code, similar to a vtable stub
1062             Events::log_exception(current, "NullPointerException in MH adapter " INTPTR_FORMAT, p2i(pc));
1063             return SharedRuntime::throw_NullPointerException_at_call_entry();
1064           }
1065 
1066 #ifndef PRODUCT
1067           _implicit_null_throws++;
1068 #endif
1069           target_pc = nm->continuation_for_implicit_null_exception(pc);
1070           // If there's an unexpected fault, target_pc might be null,
1071           // in which case we want to fall through into the normal
1072           // error handling code.
1073         }
1074 
1075         break; // fall through
1076       }
1077 
1078 
1079       case IMPLICIT_DIVIDE_BY_ZERO: {
1080         nmethod* nm = CodeCache::find_nmethod(pc);
1081         guarantee(nm != nullptr, "must have containing compiled method for implicit division-by-zero exceptions");
1082 #ifndef PRODUCT
1083         _implicit_div0_throws++;
1084 #endif
1085         target_pc = nm->continuation_for_implicit_div0_exception(pc);
1086         // If there's an unexpected fault, target_pc might be null,
1087         // in which case we want to fall through into the normal
1088         // error handling code.
1089         break; // fall through
1090       }
1091 
1092       default: ShouldNotReachHere();
1093     }
1094 
1095     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
1096 
1097     if (exception_kind == IMPLICIT_NULL) {
1098 #ifndef PRODUCT
1099       // for AbortVMOnException flag
1100       Exceptions::debug_check_abort("java.lang.NullPointerException");
1101 #endif //PRODUCT
1102       Events::log_exception(current, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1103     } else {
1104 #ifndef PRODUCT
1105       // for AbortVMOnException flag
1106       Exceptions::debug_check_abort("java.lang.ArithmeticException");
1107 #endif //PRODUCT
1108       Events::log_exception(current, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, p2i(pc), p2i(target_pc));
1109     }
1110     return target_pc;
1111   }
1112 
1113   ShouldNotReachHere();
1114   return nullptr;
1115 }
1116 
1117 
1118 /**
1119  * Throws an java/lang/UnsatisfiedLinkError.  The address of this method is
1120  * installed in the native function entry of all native Java methods before
1121  * they get linked to their actual native methods.
1122  *
1123  * \note
1124  * This method actually never gets called!  The reason is because
1125  * the interpreter's native entries call NativeLookup::lookup() which
1126  * throws the exception when the lookup fails.  The exception is then
1127  * caught and forwarded on the return from NativeLookup::lookup() call
1128  * before the call to the native function.  This might change in the future.
1129  */
1130 JNI_ENTRY(void*, throw_unsatisfied_link_error(JNIEnv* env, ...))
1131 {
1132   // We return a bad value here to make sure that the exception is
1133   // forwarded before we look at the return value.
1134   THROW_(vmSymbols::java_lang_UnsatisfiedLinkError(), (void*)badAddress);
1135 }
1136 JNI_END
1137 
1138 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
1139   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
1140 }
1141 
1142 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* current, oopDesc* obj))
1143 #if INCLUDE_JVMCI
1144   if (!obj->klass()->has_finalizer()) {
1145     return;
1146   }
1147 #endif // INCLUDE_JVMCI
1148   assert(oopDesc::is_oop(obj), "must be a valid oop");
1149   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
1150   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
1151 JRT_END
1152 
1153 jlong SharedRuntime::get_java_tid(JavaThread* thread) {
1154   assert(thread != nullptr, "No thread");
1155   if (thread == nullptr) {
1156     return 0;
1157   }
1158   guarantee(Thread::current() != thread || thread->is_oop_safe(),
1159             "current cannot touch oops after its GC barrier is detached.");
1160   oop obj = thread->threadObj();
1161   return (obj == nullptr) ? 0 : java_lang_Thread::thread_id(obj);
1162 }
1163 
1164 /**
1165  * This function ought to be a void function, but cannot be because
1166  * it gets turned into a tail-call on sparc, which runs into dtrace bug
1167  * 6254741.  Once that is fixed we can remove the dummy return value.
1168  */
1169 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
1170   return dtrace_object_alloc(JavaThread::current(), o, o->size());
1171 }
1172 
1173 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o) {
1174   return dtrace_object_alloc(thread, o, o->size());
1175 }
1176 
1177 int SharedRuntime::dtrace_object_alloc(JavaThread* thread, oopDesc* o, size_t size) {
1178   assert(DTraceAllocProbes, "wrong call");
1179   Klass* klass = o->klass();
1180   Symbol* name = klass->name();
1181   HOTSPOT_OBJECT_ALLOC(
1182                    get_java_tid(thread),
1183                    (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
1184   return 0;
1185 }
1186 
1187 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
1188     JavaThread* current, Method* method))
1189   assert(current == JavaThread::current(), "pre-condition");
1190 
1191   assert(DTraceMethodProbes, "wrong call");
1192   Symbol* kname = method->klass_name();
1193   Symbol* name = method->name();
1194   Symbol* sig = method->signature();
1195   HOTSPOT_METHOD_ENTRY(
1196       get_java_tid(current),
1197       (char *) kname->bytes(), kname->utf8_length(),
1198       (char *) name->bytes(), name->utf8_length(),
1199       (char *) sig->bytes(), sig->utf8_length());
1200   return 0;
1201 JRT_END
1202 
1203 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
1204     JavaThread* current, Method* method))
1205   assert(current == JavaThread::current(), "pre-condition");
1206   assert(DTraceMethodProbes, "wrong call");
1207   Symbol* kname = method->klass_name();
1208   Symbol* name = method->name();
1209   Symbol* sig = method->signature();
1210   HOTSPOT_METHOD_RETURN(
1211       get_java_tid(current),
1212       (char *) kname->bytes(), kname->utf8_length(),
1213       (char *) name->bytes(), name->utf8_length(),
1214       (char *) sig->bytes(), sig->utf8_length());
1215   return 0;
1216 JRT_END
1217 
1218 
1219 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
1220 // for a call current in progress, i.e., arguments has been pushed on stack
1221 // put callee has not been invoked yet.  Used by: resolve virtual/static,
1222 // vtable updates, etc.  Caller frame must be compiled.
1223 Handle SharedRuntime::find_callee_info(Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1224   JavaThread* current = THREAD;
1225   ResourceMark rm(current);
1226 
1227   // last java frame on stack (which includes native call frames)
1228   vframeStream vfst(current, true);  // Do not skip and javaCalls
1229 
1230   return find_callee_info_helper(vfst, bc, callinfo, THREAD);
1231 }
1232 
1233 Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
1234   nmethod* caller = vfst.nm();
1235 
1236   address pc = vfst.frame_pc();
1237   { // Get call instruction under lock because another thread may be busy patching it.
1238     CompiledICLocker ic_locker(caller);
1239     return caller->attached_method_before_pc(pc);
1240   }
1241   return nullptr;
1242 }
1243 
1244 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1245 // for a call current in progress, i.e., arguments has been pushed on stack
1246 // but callee has not been invoked yet.  Caller frame must be compiled.
1247 Handle SharedRuntime::find_callee_info_helper(vframeStream& vfst, Bytecodes::Code& bc,
1248                                               CallInfo& callinfo, TRAPS) {
1249   Handle receiver;
1250   Handle nullHandle;  // create a handy null handle for exception returns
1251   JavaThread* current = THREAD;
1252 
1253   assert(!vfst.at_end(), "Java frame must exist");
1254 
1255   // Find caller and bci from vframe
1256   methodHandle caller(current, vfst.method());
1257   int          bci   = vfst.bci();
1258 
1259   if (caller->is_continuation_enter_intrinsic()) {
1260     bc = Bytecodes::_invokestatic;
1261     LinkResolver::resolve_continuation_enter(callinfo, CHECK_NH);
1262     return receiver;
1263   }
1264 
1265   Bytecode_invoke bytecode(caller, bci);
1266   int bytecode_index = bytecode.index();
1267   bc = bytecode.invoke_code();
1268 
1269   methodHandle attached_method(current, extract_attached_method(vfst));
1270   if (attached_method.not_null()) {
1271     Method* callee = bytecode.static_target(CHECK_NH);
1272     vmIntrinsics::ID id = callee->intrinsic_id();
1273     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
1274     // it attaches statically resolved method to the call site.
1275     if (MethodHandles::is_signature_polymorphic(id) &&
1276         MethodHandles::is_signature_polymorphic_intrinsic(id)) {
1277       bc = MethodHandles::signature_polymorphic_intrinsic_bytecode(id);
1278 
1279       // Adjust invocation mode according to the attached method.
1280       switch (bc) {
1281         case Bytecodes::_invokevirtual:
1282           if (attached_method->method_holder()->is_interface()) {
1283             bc = Bytecodes::_invokeinterface;
1284           }
1285           break;
1286         case Bytecodes::_invokeinterface:
1287           if (!attached_method->method_holder()->is_interface()) {
1288             bc = Bytecodes::_invokevirtual;
1289           }
1290           break;
1291         case Bytecodes::_invokehandle:
1292           if (!MethodHandles::is_signature_polymorphic_method(attached_method())) {
1293             bc = attached_method->is_static() ? Bytecodes::_invokestatic
1294                                               : Bytecodes::_invokevirtual;
1295           }
1296           break;
1297         default:
1298           break;
1299       }
1300     }
1301   }
1302 
1303   assert(bc != Bytecodes::_illegal, "not initialized");
1304 
1305   bool has_receiver = bc != Bytecodes::_invokestatic &&
1306                       bc != Bytecodes::_invokedynamic &&
1307                       bc != Bytecodes::_invokehandle;
1308 
1309   // Find receiver for non-static call
1310   if (has_receiver) {
1311     // This register map must be update since we need to find the receiver for
1312     // compiled frames. The receiver might be in a register.
1313     RegisterMap reg_map2(current,
1314                          RegisterMap::UpdateMap::include,
1315                          RegisterMap::ProcessFrames::include,
1316                          RegisterMap::WalkContinuation::skip);
1317     frame stubFrame   = current->last_frame();
1318     // Caller-frame is a compiled frame
1319     frame callerFrame = stubFrame.sender(&reg_map2);
1320 
1321     if (attached_method.is_null()) {
1322       Method* callee = bytecode.static_target(CHECK_NH);
1323       if (callee == nullptr) {
1324         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1325       }
1326     }
1327 
1328     // Retrieve from a compiled argument list
1329     receiver = Handle(current, callerFrame.retrieve_receiver(&reg_map2));
1330     assert(oopDesc::is_oop_or_null(receiver()), "");
1331 
1332     if (receiver.is_null()) {
1333       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1334     }
1335   }
1336 
1337   // Resolve method
1338   if (attached_method.not_null()) {
1339     // Parameterized by attached method.
1340     LinkResolver::resolve_invoke(callinfo, receiver, attached_method, bc, CHECK_NH);
1341   } else {
1342     // Parameterized by bytecode.
1343     constantPoolHandle constants(current, caller->constants());
1344     LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_NH);
1345   }
1346 
1347 #ifdef ASSERT
1348   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1349   if (has_receiver) {
1350     assert(receiver.not_null(), "should have thrown exception");
1351     Klass* receiver_klass = receiver->klass();
1352     Klass* rk = nullptr;
1353     if (attached_method.not_null()) {
1354       // In case there's resolved method attached, use its holder during the check.
1355       rk = attached_method->method_holder();
1356     } else {
1357       // Klass is already loaded.
1358       constantPoolHandle constants(current, caller->constants());
1359       rk = constants->klass_ref_at(bytecode_index, bc, CHECK_NH);
1360     }
1361     Klass* static_receiver_klass = rk;
1362     assert(receiver_klass->is_subtype_of(static_receiver_klass),
1363            "actual receiver must be subclass of static receiver klass");
1364     if (receiver_klass->is_instance_klass()) {
1365       if (InstanceKlass::cast(receiver_klass)->is_not_initialized()) {
1366         tty->print_cr("ERROR: Klass not yet initialized!!");
1367         receiver_klass->print();
1368       }
1369       assert(!InstanceKlass::cast(receiver_klass)->is_not_initialized(), "receiver_klass must be initialized");
1370     }
1371   }
1372 #endif
1373 
1374   return receiver;
1375 }
1376 
1377 methodHandle SharedRuntime::find_callee_method(TRAPS) {
1378   JavaThread* current = THREAD;
1379   ResourceMark rm(current);
1380   // We need first to check if any Java activations (compiled, interpreted)
1381   // exist on the stack since last JavaCall.  If not, we need
1382   // to get the target method from the JavaCall wrapper.
1383   vframeStream vfst(current, true);  // Do not skip any javaCalls
1384   methodHandle callee_method;
1385   if (vfst.at_end()) {
1386     // No Java frames were found on stack since we did the JavaCall.
1387     // Hence the stack can only contain an entry_frame.  We need to
1388     // find the target method from the stub frame.
1389     RegisterMap reg_map(current,
1390                         RegisterMap::UpdateMap::skip,
1391                         RegisterMap::ProcessFrames::include,
1392                         RegisterMap::WalkContinuation::skip);
1393     frame fr = current->last_frame();
1394     assert(fr.is_runtime_frame(), "must be a runtimeStub");
1395     fr = fr.sender(&reg_map);
1396     assert(fr.is_entry_frame(), "must be");
1397     // fr is now pointing to the entry frame.
1398     callee_method = methodHandle(current, fr.entry_frame_call_wrapper()->callee_method());
1399   } else {
1400     Bytecodes::Code bc;
1401     CallInfo callinfo;
1402     find_callee_info_helper(vfst, bc, callinfo, CHECK_(methodHandle()));
1403     callee_method = methodHandle(current, callinfo.selected_method());
1404   }
1405   assert(callee_method()->is_method(), "must be");
1406   return callee_method;
1407 }
1408 
1409 // Resolves a call.
1410 methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, TRAPS) {
1411   JavaThread* current = THREAD;
1412   ResourceMark rm(current);
1413   RegisterMap cbl_map(current,
1414                       RegisterMap::UpdateMap::skip,
1415                       RegisterMap::ProcessFrames::include,
1416                       RegisterMap::WalkContinuation::skip);
1417   frame caller_frame = current->last_frame().sender(&cbl_map);
1418 
1419   CodeBlob* caller_cb = caller_frame.cb();
1420   guarantee(caller_cb != nullptr && caller_cb->is_nmethod(), "must be called from compiled method");
1421   nmethod* caller_nm = caller_cb->as_nmethod();
1422 
1423   // determine call info & receiver
1424   // note: a) receiver is null for static calls
1425   //       b) an exception is thrown if receiver is null for non-static calls
1426   CallInfo call_info;
1427   Bytecodes::Code invoke_code = Bytecodes::_illegal;
1428   Handle receiver = find_callee_info(invoke_code, call_info, CHECK_(methodHandle()));
1429 
1430   NoSafepointVerifier nsv;
1431 
1432   methodHandle callee_method(current, call_info.selected_method());
1433 
1434   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1435          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
1436          (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1437          (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1438          ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1439 
1440   assert(!caller_nm->is_unloading(), "It should not be unloading");
1441 
1442   // tracing/debugging/statistics
1443   uint *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1444                  (is_virtual) ? (&_resolve_virtual_ctr) :
1445                                 (&_resolve_static_ctr);
1446   Atomic::inc(addr);
1447 
1448 #ifndef PRODUCT
1449   if (TraceCallFixup) {
1450     ResourceMark rm(current);
1451     tty->print("resolving %s%s (%s) call to",
1452                (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1453                Bytecodes::name(invoke_code));
1454     callee_method->print_short_name(tty);
1455     tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT,
1456                   p2i(caller_frame.pc()), p2i(callee_method->code()));
1457   }
1458 #endif
1459 
1460   if (invoke_code == Bytecodes::_invokestatic) {
1461     assert(callee_method->method_holder()->is_initialized() ||
1462            callee_method->method_holder()->is_reentrant_initialization(current),
1463            "invalid class initialization state for invoke_static");
1464     if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
1465       // In order to keep class initialization check, do not patch call
1466       // site for static call when the class is not fully initialized.
1467       // Proper check is enforced by call site re-resolution on every invocation.
1468       //
1469       // When fast class initialization checks are supported (VM_Version::supports_fast_class_init_checks() == true),
1470       // explicit class initialization check is put in nmethod entry (VEP).
1471       assert(callee_method->method_holder()->is_linked(), "must be");
1472       return callee_method;
1473     }
1474   }
1475 
1476 
1477   // JSR 292 key invariant:
1478   // If the resolved method is a MethodHandle invoke target, the call
1479   // site must be a MethodHandle call site, because the lambda form might tail-call
1480   // leaving the stack in a state unknown to either caller or callee
1481 
1482   // Compute entry points. The computation of the entry points is independent of
1483   // patching the call.
1484 
1485   // Make sure the callee nmethod does not get deoptimized and removed before
1486   // we are done patching the code.
1487 
1488 
1489   CompiledICLocker ml(caller_nm);
1490   if (is_virtual && !is_optimized) {
1491     CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1492     inline_cache->update(&call_info, receiver->klass());
1493   } else {
1494     // Callsite is a direct call - set it to the destination method
1495     CompiledDirectCall* callsite = CompiledDirectCall::before(caller_frame.pc());
1496     callsite->set(callee_method);
1497   }
1498 
1499   return callee_method;
1500 }
1501 
1502 // Inline caches exist only in compiled code
1503 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* current))
1504   PerfTraceTime timer(_perf_ic_miss_total_time);
1505 
1506 #ifdef ASSERT
1507   RegisterMap reg_map(current,
1508                       RegisterMap::UpdateMap::skip,
1509                       RegisterMap::ProcessFrames::include,
1510                       RegisterMap::WalkContinuation::skip);
1511   frame stub_frame = current->last_frame();
1512   assert(stub_frame.is_runtime_frame(), "sanity check");
1513   frame caller_frame = stub_frame.sender(&reg_map);
1514   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame() && !caller_frame.is_upcall_stub_frame(), "unexpected frame");
1515 #endif /* ASSERT */
1516 
1517   methodHandle callee_method;
1518   JRT_BLOCK
1519     callee_method = SharedRuntime::handle_ic_miss_helper(CHECK_NULL);
1520     // Return Method* through TLS
1521     current->set_vm_result_2(callee_method());
1522   JRT_BLOCK_END
1523   // return compiled code entry point after potential safepoints
1524   return get_resolved_entry(current, callee_method);
1525 JRT_END
1526 
1527 
1528 // Handle call site that has been made non-entrant
1529 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* current))
1530   PerfTraceTime timer(_perf_handle_wrong_method_total_time);
1531 
1532   // 6243940 We might end up in here if the callee is deoptimized
1533   // as we race to call it.  We don't want to take a safepoint if
1534   // the caller was interpreted because the caller frame will look
1535   // interpreted to the stack walkers and arguments are now
1536   // "compiled" so it is much better to make this transition
1537   // invisible to the stack walking code. The i2c path will
1538   // place the callee method in the callee_target. It is stashed
1539   // there because if we try and find the callee by normal means a
1540   // safepoint is possible and have trouble gc'ing the compiled args.
1541   RegisterMap reg_map(current,
1542                       RegisterMap::UpdateMap::skip,
1543                       RegisterMap::ProcessFrames::include,
1544                       RegisterMap::WalkContinuation::skip);
1545   frame stub_frame = current->last_frame();
1546   assert(stub_frame.is_runtime_frame(), "sanity check");
1547   frame caller_frame = stub_frame.sender(&reg_map);
1548 
1549   if (caller_frame.is_interpreted_frame() ||
1550       caller_frame.is_entry_frame() ||
1551       caller_frame.is_upcall_stub_frame()) {
1552     Method* callee = current->callee_target();
1553     guarantee(callee != nullptr && callee->is_method(), "bad handshake");
1554     current->set_vm_result_2(callee);
1555     current->set_callee_target(nullptr);
1556     if (caller_frame.is_entry_frame() && VM_Version::supports_fast_class_init_checks()) {
1557       // Bypass class initialization checks in c2i when caller is in native.
1558       // JNI calls to static methods don't have class initialization checks.
1559       // Fast class initialization checks are present in c2i adapters and call into
1560       // SharedRuntime::handle_wrong_method() on the slow path.
1561       //
1562       // JVM upcalls may land here as well, but there's a proper check present in
1563       // LinkResolver::resolve_static_call (called from JavaCalls::call_static),
1564       // so bypassing it in c2i adapter is benign.
1565       return callee->get_c2i_no_clinit_check_entry();
1566     } else {
1567       return callee->get_c2i_entry();
1568     }
1569   }
1570 
1571   // Must be compiled to compiled path which is safe to stackwalk
1572   methodHandle callee_method;
1573   JRT_BLOCK
1574     // Force resolving of caller (if we called from compiled frame)
1575     callee_method = SharedRuntime::reresolve_call_site(CHECK_NULL);
1576     current->set_vm_result_2(callee_method());
1577   JRT_BLOCK_END
1578   // return compiled code entry point after potential safepoints
1579   return get_resolved_entry(current, callee_method);
1580 JRT_END
1581 
1582 // Handle abstract method call
1583 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_abstract(JavaThread* current))
1584   PerfTraceTime timer(_perf_handle_wrong_method_total_time);
1585 
1586   // Verbose error message for AbstractMethodError.
1587   // Get the called method from the invoke bytecode.
1588   vframeStream vfst(current, true);
1589   assert(!vfst.at_end(), "Java frame must exist");
1590   methodHandle caller(current, vfst.method());
1591   Bytecode_invoke invoke(caller, vfst.bci());
1592   DEBUG_ONLY( invoke.verify(); )
1593 
1594   // Find the compiled caller frame.
1595   RegisterMap reg_map(current,
1596                       RegisterMap::UpdateMap::include,
1597                       RegisterMap::ProcessFrames::include,
1598                       RegisterMap::WalkContinuation::skip);
1599   frame stubFrame = current->last_frame();
1600   assert(stubFrame.is_runtime_frame(), "must be");
1601   frame callerFrame = stubFrame.sender(&reg_map);
1602   assert(callerFrame.is_compiled_frame(), "must be");
1603 
1604   // Install exception and return forward entry.
1605   address res = SharedRuntime::throw_AbstractMethodError_entry();
1606   JRT_BLOCK
1607     methodHandle callee(current, invoke.static_target(current));
1608     if (!callee.is_null()) {
1609       oop recv = callerFrame.retrieve_receiver(&reg_map);
1610       Klass *recv_klass = (recv != nullptr) ? recv->klass() : nullptr;
1611       res = StubRoutines::forward_exception_entry();
1612       LinkResolver::throw_abstract_method_error(callee, recv_klass, CHECK_(res));
1613     }
1614   JRT_BLOCK_END
1615   return res;
1616 JRT_END
1617 
1618 // return verified_code_entry if interp_only_mode is not set for the current thread;
1619 // otherwise return c2i entry.
1620 address SharedRuntime::get_resolved_entry(JavaThread* current, methodHandle callee_method) {
1621   if (current->is_interp_only_mode() && !callee_method->is_special_native_intrinsic()) {
1622     // In interp_only_mode we need to go to the interpreted entry
1623     // The c2i won't patch in this mode -- see fixup_callers_callsite
1624     return callee_method->get_c2i_entry();
1625   }
1626   assert(callee_method->verified_code_entry() != nullptr, " Jump to zero!");
1627   return callee_method->verified_code_entry();
1628 }
1629 
1630 // resolve a static call and patch code
1631 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread* current ))
1632   PerfTraceTime timer(_perf_resolve_static_total_time);
1633 
1634   methodHandle callee_method;
1635   bool enter_special = false;
1636   JRT_BLOCK
1637     callee_method = SharedRuntime::resolve_helper(false, false, CHECK_NULL);
1638     current->set_vm_result_2(callee_method());
1639   JRT_BLOCK_END
1640   // return compiled code entry point after potential safepoints
1641   return get_resolved_entry(current, callee_method);
1642 JRT_END
1643 
1644 // resolve virtual call and update inline cache to monomorphic
1645 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread* current))
1646   PerfTraceTime timer(_perf_resolve_virtual_total_time);
1647 
1648   methodHandle callee_method;
1649   JRT_BLOCK
1650     callee_method = SharedRuntime::resolve_helper(true, false, CHECK_NULL);
1651     current->set_vm_result_2(callee_method());
1652   JRT_BLOCK_END
1653   // return compiled code entry point after potential safepoints
1654   return get_resolved_entry(current, callee_method);
1655 JRT_END
1656 
1657 
1658 // Resolve a virtual call that can be statically bound (e.g., always
1659 // monomorphic, so it has no inline cache).  Patch code to resolved target.
1660 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread* current))
1661   PerfTraceTime timer(_perf_resolve_opt_virtual_total_time);
1662 
1663   methodHandle callee_method;
1664   JRT_BLOCK
1665     callee_method = SharedRuntime::resolve_helper(true, true, CHECK_NULL);
1666     current->set_vm_result_2(callee_method());
1667   JRT_BLOCK_END
1668   // return compiled code entry point after potential safepoints
1669   return get_resolved_entry(current, callee_method);
1670 JRT_END
1671 
1672 methodHandle SharedRuntime::handle_ic_miss_helper(TRAPS) {
1673   JavaThread* current = THREAD;
1674   ResourceMark rm(current);
1675   CallInfo call_info;
1676   Bytecodes::Code bc;
1677 
1678   // receiver is null for static calls. An exception is thrown for null
1679   // receivers for non-static calls
1680   Handle receiver = find_callee_info(bc, call_info, CHECK_(methodHandle()));
1681 
1682   methodHandle callee_method(current, call_info.selected_method());
1683 
1684   Atomic::inc(&_ic_miss_ctr);
1685 
1686 #ifndef PRODUCT
1687   // Statistics & Tracing
1688   if (TraceCallFixup) {
1689     ResourceMark rm(current);
1690     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1691     callee_method->print_short_name(tty);
1692     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1693   }
1694 
1695   if (ICMissHistogram) {
1696     MutexLocker m(VMStatistic_lock);
1697     RegisterMap reg_map(current,
1698                         RegisterMap::UpdateMap::skip,
1699                         RegisterMap::ProcessFrames::include,
1700                         RegisterMap::WalkContinuation::skip);
1701     frame f = current->last_frame().real_sender(&reg_map);// skip runtime stub
1702     // produce statistics under the lock
1703     trace_ic_miss(f.pc());
1704   }
1705 #endif
1706 
1707   // install an event collector so that when a vtable stub is created the
1708   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1709   // event can't be posted when the stub is created as locks are held
1710   // - instead the event will be deferred until the event collector goes
1711   // out of scope.
1712   JvmtiDynamicCodeEventCollector event_collector;
1713 
1714   // Update inline cache to megamorphic. Skip update if we are called from interpreted.
1715   RegisterMap reg_map(current,
1716                       RegisterMap::UpdateMap::skip,
1717                       RegisterMap::ProcessFrames::include,
1718                       RegisterMap::WalkContinuation::skip);
1719   frame caller_frame = current->last_frame().sender(&reg_map);
1720   CodeBlob* cb = caller_frame.cb();
1721   nmethod* caller_nm = cb->as_nmethod();
1722 
1723   CompiledICLocker ml(caller_nm);
1724   CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1725   inline_cache->update(&call_info, receiver()->klass());
1726 
1727   return callee_method;
1728 }
1729 
1730 //
1731 // Resets a call-site in compiled code so it will get resolved again.
1732 // This routines handles both virtual call sites, optimized virtual call
1733 // sites, and static call sites. Typically used to change a call sites
1734 // destination from compiled to interpreted.
1735 //
1736 methodHandle SharedRuntime::reresolve_call_site(TRAPS) {
1737   JavaThread* current = THREAD;
1738   ResourceMark rm(current);
1739   RegisterMap reg_map(current,
1740                       RegisterMap::UpdateMap::skip,
1741                       RegisterMap::ProcessFrames::include,
1742                       RegisterMap::WalkContinuation::skip);
1743   frame stub_frame = current->last_frame();
1744   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1745   frame caller = stub_frame.sender(&reg_map);
1746 
1747   // Do nothing if the frame isn't a live compiled frame.
1748   // nmethod could be deoptimized by the time we get here
1749   // so no update to the caller is needed.
1750 
1751   if ((caller.is_compiled_frame() && !caller.is_deoptimized_frame()) ||
1752       (caller.is_native_frame() && caller.cb()->as_nmethod()->method()->is_continuation_enter_intrinsic())) {
1753 
1754     address pc = caller.pc();
1755 
1756     nmethod* caller_nm = CodeCache::find_nmethod(pc);
1757     assert(caller_nm != nullptr, "did not find caller nmethod");
1758 
1759     // Default call_addr is the location of the "basic" call.
1760     // Determine the address of the call we a reresolving. With
1761     // Inline Caches we will always find a recognizable call.
1762     // With Inline Caches disabled we may or may not find a
1763     // recognizable call. We will always find a call for static
1764     // calls and for optimized virtual calls. For vanilla virtual
1765     // calls it depends on the state of the UseInlineCaches switch.
1766     //
1767     // With Inline Caches disabled we can get here for a virtual call
1768     // for two reasons:
1769     //   1 - calling an abstract method. The vtable for abstract methods
1770     //       will run us thru handle_wrong_method and we will eventually
1771     //       end up in the interpreter to throw the ame.
1772     //   2 - a racing deoptimization. We could be doing a vanilla vtable
1773     //       call and between the time we fetch the entry address and
1774     //       we jump to it the target gets deoptimized. Similar to 1
1775     //       we will wind up in the interprter (thru a c2i with c2).
1776     //
1777     CompiledICLocker ml(caller_nm);
1778     address call_addr = caller_nm->call_instruction_address(pc);
1779 
1780     if (call_addr != nullptr) {
1781       // On x86 the logic for finding a call instruction is blindly checking for a call opcode 5
1782       // bytes back in the instruction stream so we must also check for reloc info.
1783       RelocIterator iter(caller_nm, call_addr, call_addr+1);
1784       bool ret = iter.next(); // Get item
1785       if (ret) {
1786         switch (iter.type()) {
1787           case relocInfo::static_call_type:
1788           case relocInfo::opt_virtual_call_type: {
1789             CompiledDirectCall* cdc = CompiledDirectCall::at(call_addr);
1790             cdc->set_to_clean();
1791             break;
1792           }
1793 
1794           case relocInfo::virtual_call_type: {
1795             // compiled, dispatched call (which used to call an interpreted method)
1796             CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1797             inline_cache->set_to_clean();
1798             break;
1799           }
1800           default:
1801             break;
1802         }
1803       }
1804     }
1805   }
1806 
1807   methodHandle callee_method = find_callee_method(CHECK_(methodHandle()));
1808 
1809   Atomic::inc(&_wrong_method_ctr);
1810 
1811 #ifndef PRODUCT
1812   if (TraceCallFixup) {
1813     ResourceMark rm(current);
1814     tty->print("handle_wrong_method reresolving call to");
1815     callee_method->print_short_name(tty);
1816     tty->print_cr(" code: " INTPTR_FORMAT, p2i(callee_method->code()));
1817   }
1818 #endif
1819 
1820   return callee_method;
1821 }
1822 
1823 address SharedRuntime::handle_unsafe_access(JavaThread* thread, address next_pc) {
1824   // The faulting unsafe accesses should be changed to throw the error
1825   // synchronously instead. Meanwhile the faulting instruction will be
1826   // skipped over (effectively turning it into a no-op) and an
1827   // asynchronous exception will be raised which the thread will
1828   // handle at a later point. If the instruction is a load it will
1829   // return garbage.
1830 
1831   // Request an async exception.
1832   thread->set_pending_unsafe_access_error();
1833 
1834   // Return address of next instruction to execute.
1835   return next_pc;
1836 }
1837 
1838 #ifdef ASSERT
1839 void SharedRuntime::check_member_name_argument_is_last_argument(const methodHandle& method,
1840                                                                 const BasicType* sig_bt,
1841                                                                 const VMRegPair* regs) {
1842   ResourceMark rm;
1843   const int total_args_passed = method->size_of_parameters();
1844   const VMRegPair*    regs_with_member_name = regs;
1845         VMRegPair* regs_without_member_name = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed - 1);
1846 
1847   const int member_arg_pos = total_args_passed - 1;
1848   assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1849   assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1850 
1851   java_calling_convention(sig_bt, regs_without_member_name, total_args_passed - 1);
1852 
1853   for (int i = 0; i < member_arg_pos; i++) {
1854     VMReg a =    regs_with_member_name[i].first();
1855     VMReg b = regs_without_member_name[i].first();
1856     assert(a->value() == b->value(), "register allocation mismatch: a= %d, b= %d", a->value(), b->value());
1857   }
1858   assert(regs_with_member_name[member_arg_pos].first()->is_valid(), "bad member arg");
1859 }
1860 #endif
1861 
1862 // ---------------------------------------------------------------------------
1863 // We are calling the interpreter via a c2i. Normally this would mean that
1864 // we were called by a compiled method. However we could have lost a race
1865 // where we went int -> i2c -> c2i and so the caller could in fact be
1866 // interpreted. If the caller is compiled we attempt to patch the caller
1867 // so he no longer calls into the interpreter.
1868 JRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1869   AARCH64_PORT_ONLY(assert(pauth_ptr_is_raw(caller_pc), "should be raw"));
1870 
1871   // It's possible that deoptimization can occur at a call site which hasn't
1872   // been resolved yet, in which case this function will be called from
1873   // an nmethod that has been patched for deopt and we can ignore the
1874   // request for a fixup.
1875   // Also it is possible that we lost a race in that from_compiled_entry
1876   // is now back to the i2c in that case we don't need to patch and if
1877   // we did we'd leap into space because the callsite needs to use
1878   // "to interpreter" stub in order to load up the Method*. Don't
1879   // ask me how I know this...
1880 
1881   // Result from nmethod::is_unloading is not stable across safepoints.
1882   NoSafepointVerifier nsv;
1883 
1884   nmethod* callee = method->code();
1885   if (callee == nullptr) {
1886     return;
1887   }
1888 
1889   // write lock needed because we might patch call site by set_to_clean()
1890   // and is_unloading() can modify nmethod's state
1891   MACOS_AARCH64_ONLY(ThreadWXEnable __wx(WXWrite, JavaThread::current()));
1892 
1893   CodeBlob* cb = CodeCache::find_blob(caller_pc);
1894   if (cb == nullptr || !cb->is_nmethod() || !callee->is_in_use() || callee->is_unloading()) {
1895     return;
1896   }
1897 
1898   // The check above makes sure this is an nmethod.
1899   nmethod* caller = cb->as_nmethod();
1900 
1901   // Get the return PC for the passed caller PC.
1902   address return_pc = caller_pc + frame::pc_return_offset;
1903 
1904   if (!caller->is_in_use() || !NativeCall::is_call_before(return_pc)) {
1905     return;
1906   }
1907 
1908   // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1909   CompiledICLocker ic_locker(caller);
1910   ResourceMark rm;
1911 
1912   // If we got here through a static call or opt_virtual call, then we know where the
1913   // call address would be; let's peek at it
1914   address callsite_addr = (address)nativeCall_before(return_pc);
1915   RelocIterator iter(caller, callsite_addr, callsite_addr + 1);
1916   if (!iter.next()) {
1917     // No reloc entry found; not a static or optimized virtual call
1918     return;
1919   }
1920 
1921   relocInfo::relocType type = iter.reloc()->type();
1922   if (type != relocInfo::static_call_type &&
1923       type != relocInfo::opt_virtual_call_type) {
1924     return;
1925   }
1926 
1927   CompiledDirectCall* callsite = CompiledDirectCall::before(return_pc);
1928   callsite->set_to_clean();
1929 JRT_END
1930 
1931 
1932 // same as JVM_Arraycopy, but called directly from compiled code
1933 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
1934                                                 oopDesc* dest, jint dest_pos,
1935                                                 jint length,
1936                                                 JavaThread* current)) {
1937 #ifndef PRODUCT
1938   _slow_array_copy_ctr++;
1939 #endif
1940   // Check if we have null pointers
1941   if (src == nullptr || dest == nullptr) {
1942     THROW(vmSymbols::java_lang_NullPointerException());
1943   }
1944   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
1945   // even though the copy_array API also performs dynamic checks to ensure
1946   // that src and dest are truly arrays (and are conformable).
1947   // The copy_array mechanism is awkward and could be removed, but
1948   // the compilers don't call this function except as a last resort,
1949   // so it probably doesn't matter.
1950   src->klass()->copy_array((arrayOopDesc*)src, src_pos,
1951                                         (arrayOopDesc*)dest, dest_pos,
1952                                         length, current);
1953 }
1954 JRT_END
1955 
1956 // The caller of generate_class_cast_message() (or one of its callers)
1957 // must use a ResourceMark in order to correctly free the result.
1958 char* SharedRuntime::generate_class_cast_message(
1959     JavaThread* thread, Klass* caster_klass) {
1960 
1961   // Get target class name from the checkcast instruction
1962   vframeStream vfst(thread, true);
1963   assert(!vfst.at_end(), "Java frame must exist");
1964   Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
1965   constantPoolHandle cpool(thread, vfst.method()->constants());
1966   Klass* target_klass = ConstantPool::klass_at_if_loaded(cpool, cc.index());
1967   Symbol* target_klass_name = nullptr;
1968   if (target_klass == nullptr) {
1969     // This klass should be resolved, but just in case, get the name in the klass slot.
1970     target_klass_name = cpool->klass_name_at(cc.index());
1971   }
1972   return generate_class_cast_message(caster_klass, target_klass, target_klass_name);
1973 }
1974 
1975 
1976 // The caller of generate_class_cast_message() (or one of its callers)
1977 // must use a ResourceMark in order to correctly free the result.
1978 char* SharedRuntime::generate_class_cast_message(
1979     Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name) {
1980   const char* caster_name = caster_klass->external_name();
1981 
1982   assert(target_klass != nullptr || target_klass_name != nullptr, "one must be provided");
1983   const char* target_name = target_klass == nullptr ? target_klass_name->as_klass_external_name() :
1984                                                    target_klass->external_name();
1985 
1986   size_t msglen = strlen(caster_name) + strlen("class ") + strlen(" cannot be cast to class ") + strlen(target_name) + 1;
1987 
1988   const char* caster_klass_description = "";
1989   const char* target_klass_description = "";
1990   const char* klass_separator = "";
1991   if (target_klass != nullptr && caster_klass->module() == target_klass->module()) {
1992     caster_klass_description = caster_klass->joint_in_module_of_loader(target_klass);
1993   } else {
1994     caster_klass_description = caster_klass->class_in_module_of_loader();
1995     target_klass_description = (target_klass != nullptr) ? target_klass->class_in_module_of_loader() : "";
1996     klass_separator = (target_klass != nullptr) ? "; " : "";
1997   }
1998 
1999   // add 3 for parenthesis and preceding space
2000   msglen += strlen(caster_klass_description) + strlen(target_klass_description) + strlen(klass_separator) + 3;
2001 
2002   char* message = NEW_RESOURCE_ARRAY_RETURN_NULL(char, msglen);
2003   if (message == nullptr) {
2004     // Shouldn't happen, but don't cause even more problems if it does
2005     message = const_cast<char*>(caster_klass->external_name());
2006   } else {
2007     jio_snprintf(message,
2008                  msglen,
2009                  "class %s cannot be cast to class %s (%s%s%s)",
2010                  caster_name,
2011                  target_name,
2012                  caster_klass_description,
2013                  klass_separator,
2014                  target_klass_description
2015                  );
2016   }
2017   return message;
2018 }
2019 
2020 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
2021   (void) JavaThread::current()->stack_overflow_state()->reguard_stack();
2022 JRT_END
2023 
2024 void SharedRuntime::monitor_enter_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2025   if (!SafepointSynchronize::is_synchronizing()) {
2026     // Only try quick_enter() if we're not trying to reach a safepoint
2027     // so that the calling thread reaches the safepoint more quickly.
2028     if (ObjectSynchronizer::quick_enter(obj, lock, current)) {
2029       return;
2030     }
2031   }
2032   // NO_ASYNC required because an async exception on the state transition destructor
2033   // would leave you with the lock held and it would never be released.
2034   // The normal monitorenter NullPointerException is thrown without acquiring a lock
2035   // and the model is that an exception implies the method failed.
2036   JRT_BLOCK_NO_ASYNC
2037   Handle h_obj(THREAD, obj);
2038   ObjectSynchronizer::enter(h_obj, lock, current);
2039   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
2040   JRT_BLOCK_END
2041 }
2042 
2043 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
2044 JRT_BLOCK_ENTRY(void, SharedRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2045   SharedRuntime::monitor_enter_helper(obj, lock, current);
2046 JRT_END
2047 
2048 void SharedRuntime::monitor_exit_helper(oopDesc* obj, BasicLock* lock, JavaThread* current) {
2049   assert(JavaThread::current() == current, "invariant");
2050   // Exit must be non-blocking, and therefore no exceptions can be thrown.
2051   ExceptionMark em(current);
2052 
2053   // Check if C2_MacroAssembler::fast_unlock() or
2054   // C2_MacroAssembler::fast_unlock_lightweight() unlocked an inflated
2055   // monitor before going slow path.  Since there is no safepoint
2056   // polling when calling into the VM, we can be sure that the monitor
2057   // hasn't been deallocated.
2058   ObjectMonitor* m = current->unlocked_inflated_monitor();
2059   if (m != nullptr) {
2060     assert(!m->has_owner(current), "must be");
2061     current->clear_unlocked_inflated_monitor();
2062 
2063     // We need to reacquire the lock before we can call ObjectSynchronizer::exit().
2064     if (!m->try_enter(current, /*check_for_recursion*/ false)) {
2065       // Some other thread acquired the lock (or the monitor was
2066       // deflated). Either way we are done.
2067       current->dec_held_monitor_count();
2068       return;
2069     }
2070   }
2071 
2072   // The object could become unlocked through a JNI call, which we have no other checks for.
2073   // Give a fatal message if CheckJNICalls. Otherwise we ignore it.
2074   if (obj->is_unlocked()) {
2075     if (CheckJNICalls) {
2076       fatal("Object has been unlocked by JNI");
2077     }
2078     return;
2079   }
2080   ObjectSynchronizer::exit(obj, lock, current);
2081 }
2082 
2083 // Handles the uncommon cases of monitor unlocking in compiled code
2084 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* current))
2085   assert(current == JavaThread::current(), "pre-condition");
2086   SharedRuntime::monitor_exit_helper(obj, lock, current);
2087 JRT_END
2088 
2089 // This is only called when CheckJNICalls is true, and only
2090 // for virtual thread termination.
2091 JRT_LEAF(void,  SharedRuntime::log_jni_monitor_still_held())
2092   assert(CheckJNICalls, "Only call this when checking JNI usage");
2093   if (log_is_enabled(Debug, jni)) {
2094     JavaThread* current = JavaThread::current();
2095     int64_t vthread_id = java_lang_Thread::thread_id(current->vthread());
2096     int64_t carrier_id = java_lang_Thread::thread_id(current->threadObj());
2097     log_debug(jni)("VirtualThread (tid: " INT64_FORMAT ", carrier id: " INT64_FORMAT
2098                    ") exiting with Objects still locked by JNI MonitorEnter.",
2099                    vthread_id, carrier_id);
2100   }
2101 JRT_END
2102 
2103 #ifndef PRODUCT
2104 
2105 void SharedRuntime::print_statistics() {
2106   ttyLocker ttyl;
2107   if (xtty != nullptr)  xtty->head("statistics type='SharedRuntime'");
2108 
2109   SharedRuntime::print_ic_miss_histogram_on(tty);
2110   SharedRuntime::print_counters_on(tty);
2111   AdapterHandlerLibrary::print_statistics_on(tty);
2112 
2113   if (xtty != nullptr)  xtty->tail("statistics");
2114 }
2115 
2116 //void SharedRuntime::print_counters_on(outputStream* st) {
2117 //  // Dump the JRT_ENTRY counters
2118 //  if (_new_instance_ctr) st->print_cr("%5u new instance requires GC", _new_instance_ctr);
2119 //  if (_new_array_ctr)    st->print_cr("%5u new array requires GC", _new_array_ctr);
2120 //  if (_multi2_ctr)       st->print_cr("%5u multianewarray 2 dim", _multi2_ctr);
2121 //  if (_multi3_ctr)       st->print_cr("%5u multianewarray 3 dim", _multi3_ctr);
2122 //  if (_multi4_ctr)       st->print_cr("%5u multianewarray 4 dim", _multi4_ctr);
2123 //  if (_multi5_ctr)       st->print_cr("%5u multianewarray 5 dim", _multi5_ctr);
2124 //
2125 //  st->print_cr("%5u inline cache miss in compiled", _ic_miss_ctr);
2126 //  st->print_cr("%5u wrong method", _wrong_method_ctr);
2127 //  st->print_cr("%5u unresolved static call site", _resolve_static_ctr);
2128 //  st->print_cr("%5u unresolved virtual call site", _resolve_virtual_ctr);
2129 //  st->print_cr("%5u unresolved opt virtual call site", _resolve_opt_virtual_ctr);
2130 //
2131 //  if (_mon_enter_stub_ctr)       st->print_cr("%5u monitor enter stub", _mon_enter_stub_ctr);
2132 //  if (_mon_exit_stub_ctr)        st->print_cr("%5u monitor exit stub", _mon_exit_stub_ctr);
2133 //  if (_mon_enter_ctr)            st->print_cr("%5u monitor enter slow", _mon_enter_ctr);
2134 //  if (_mon_exit_ctr)             st->print_cr("%5u monitor exit slow", _mon_exit_ctr);
2135 //  if (_partial_subtype_ctr)      st->print_cr("%5u slow partial subtype", _partial_subtype_ctr);
2136 //  if (_jbyte_array_copy_ctr)     st->print_cr("%5u byte array copies", _jbyte_array_copy_ctr);
2137 //  if (_jshort_array_copy_ctr)    st->print_cr("%5u short array copies", _jshort_array_copy_ctr);
2138 //  if (_jint_array_copy_ctr)      st->print_cr("%5u int array copies", _jint_array_copy_ctr);
2139 //  if (_jlong_array_copy_ctr)     st->print_cr("%5u long array copies", _jlong_array_copy_ctr);
2140 //  if (_oop_array_copy_ctr)       st->print_cr("%5u oop array copies", _oop_array_copy_ctr);
2141 //  if (_checkcast_array_copy_ctr) st->print_cr("%5u checkcast array copies", _checkcast_array_copy_ctr);
2142 //  if (_unsafe_array_copy_ctr)    st->print_cr("%5u unsafe array copies", _unsafe_array_copy_ctr);
2143 //  if (_generic_array_copy_ctr)   st->print_cr("%5u generic array copies", _generic_array_copy_ctr);
2144 //  if (_slow_array_copy_ctr)      st->print_cr("%5u slow array copies", _slow_array_copy_ctr);
2145 //  if (_find_handler_ctr)         st->print_cr("%5u find exception handler", _find_handler_ctr);
2146 //  if (_rethrow_ctr)              st->print_cr("%5u rethrow handler", _rethrow_ctr);
2147 //  if (_unsafe_set_memory_ctr) tty->print_cr("%5u unsafe set memorys", _unsafe_set_memory_ctr);
2148 //}
2149 
2150 inline double percent(int64_t x, int64_t y) {
2151   return 100.0 * (double)x / (double)MAX2(y, (int64_t)1);
2152 }
2153 
2154 class MethodArityHistogram {
2155  public:
2156   enum { MAX_ARITY = 256 };
2157  private:
2158   static uint64_t _arity_histogram[MAX_ARITY]; // histogram of #args
2159   static uint64_t _size_histogram[MAX_ARITY];  // histogram of arg size in words
2160   static uint64_t _total_compiled_calls;
2161   static uint64_t _max_compiled_calls_per_method;
2162   static int _max_arity;                       // max. arity seen
2163   static int _max_size;                        // max. arg size seen
2164 
2165   static void add_method_to_histogram(nmethod* nm) {
2166     Method* method = (nm == nullptr) ? nullptr : nm->method();
2167     if (method != nullptr) {
2168       ArgumentCount args(method->signature());
2169       int arity   = args.size() + (method->is_static() ? 0 : 1);
2170       int argsize = method->size_of_parameters();
2171       arity   = MIN2(arity, MAX_ARITY-1);
2172       argsize = MIN2(argsize, MAX_ARITY-1);
2173       uint64_t count = (uint64_t)method->compiled_invocation_count();
2174       _max_compiled_calls_per_method = count > _max_compiled_calls_per_method ? count : _max_compiled_calls_per_method;
2175       _total_compiled_calls    += count;
2176       _arity_histogram[arity]  += count;
2177       _size_histogram[argsize] += count;
2178       _max_arity = MAX2(_max_arity, arity);
2179       _max_size  = MAX2(_max_size, argsize);
2180     }
2181   }
2182 
2183   void print_histogram_helper(int n, uint64_t* histo, const char* name) {
2184     const int N = MIN2(9, n);
2185     double sum = 0;
2186     double weighted_sum = 0;
2187     for (int i = 0; i <= n; i++) { sum += (double)histo[i]; weighted_sum += (double)(i*histo[i]); }
2188     if (sum >= 1) { // prevent divide by zero or divide overflow
2189       double rest = sum;
2190       double percent = sum / 100;
2191       for (int i = 0; i <= N; i++) {
2192         rest -= (double)histo[i];
2193         tty->print_cr("%4d: " UINT64_FORMAT_W(12) " (%5.1f%%)", i, histo[i], (double)histo[i] / percent);
2194       }
2195       tty->print_cr("rest: " INT64_FORMAT_W(12) " (%5.1f%%)", (int64_t)rest, rest / percent);
2196       tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
2197       tty->print_cr("(total # of compiled calls = " INT64_FORMAT_W(14) ")", _total_compiled_calls);
2198       tty->print_cr("(max # of compiled calls   = " INT64_FORMAT_W(14) ")", _max_compiled_calls_per_method);
2199     } else {
2200       tty->print_cr("Histogram generation failed for %s. n = %d, sum = %7.5f", name, n, sum);
2201     }
2202   }
2203 
2204   void print_histogram() {
2205     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
2206     print_histogram_helper(_max_arity, _arity_histogram, "arity");
2207     tty->print_cr("\nHistogram of parameter block size (in words, incl. rcvr):");
2208     print_histogram_helper(_max_size, _size_histogram, "size");
2209     tty->cr();
2210   }
2211 
2212  public:
2213   MethodArityHistogram() {
2214     // Take the Compile_lock to protect against changes in the CodeBlob structures
2215     MutexLocker mu1(Compile_lock, Mutex::_safepoint_check_flag);
2216     // Take the CodeCache_lock to protect against changes in the CodeHeap structure
2217     MutexLocker mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2218     _max_arity = _max_size = 0;
2219     _total_compiled_calls = 0;
2220     _max_compiled_calls_per_method = 0;
2221     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram[i] = 0;
2222     CodeCache::nmethods_do(add_method_to_histogram);
2223     print_histogram();
2224   }
2225 };
2226 
2227 uint64_t MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
2228 uint64_t MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
2229 uint64_t MethodArityHistogram::_total_compiled_calls;
2230 uint64_t MethodArityHistogram::_max_compiled_calls_per_method;
2231 int MethodArityHistogram::_max_arity;
2232 int MethodArityHistogram::_max_size;
2233 
2234 void SharedRuntime::print_call_statistics_on(outputStream* st) {
2235   tty->print_cr("Calls from compiled code:");
2236   int64_t total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
2237   int64_t mono_c = _nof_normal_calls - _nof_megamorphic_calls;
2238   int64_t mono_i = _nof_interface_calls;
2239   tty->print_cr("\t" INT64_FORMAT_W(12) " (100%%)  total non-inlined   ", total);
2240   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
2241   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
2242   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
2243   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
2244   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
2245   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
2246   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
2247   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.1f%%) |- static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2248   tty->print_cr("\t" INT64_FORMAT_W(12) " (%4.0f%%) |  |- inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2249   tty->cr();
2250   tty->print_cr("Note 1: counter updates are not MT-safe.");
2251   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2252   tty->print_cr("        %% in nested categories are relative to their category");
2253   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
2254   tty->cr();
2255 
2256   MethodArityHistogram h;
2257 }
2258 #endif
2259 
2260 #ifndef PRODUCT
2261 static int _lookups; // number of calls to lookup
2262 static int _equals;  // number of buckets checked with matching hash
2263 static int _archived_hits;    // number of successful lookups in archived table
2264 static int _runtime_hits; // number of successful lookups in runtime table
2265 static int _compact; // number of equals calls with compact signature
2266 #endif
2267 
2268 // A simple wrapper class around the calling convention information
2269 // that allows sharing of adapters for the same calling convention.
2270 class AdapterFingerPrint : public MetaspaceObj {
2271  private:
2272   enum {
2273     _basic_type_bits = 4,
2274     _basic_type_mask = right_n_bits(_basic_type_bits),
2275     _basic_types_per_int = BitsPerInt / _basic_type_bits,
2276     _compact_int_count = 3
2277   };
2278   // TO DO:  Consider integrating this with a more global scheme for compressing signatures.
2279   // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2280 
2281   int _length;
2282   int _value[_compact_int_count];
2283 
2284   // Private construtor. Use allocate() to get an instance.
2285   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2286     // Pack the BasicTypes with 8 per int
2287     _length = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2288     int sig_index = 0;
2289     for (int index = 0; index < _length; index++) {
2290       int value = 0;
2291       for (int byte = 0; sig_index < total_args_passed && byte < _basic_types_per_int; byte++) {
2292         int bt = adapter_encoding(sig_bt[sig_index++]);
2293         assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2294         value = (value << _basic_type_bits) | bt;
2295       }
2296       _value[index] = value;
2297     }
2298   }
2299 
2300   // Call deallocate instead
2301   ~AdapterFingerPrint() {
2302     FreeHeap(this);
2303   }
2304 
2305   // Remap BasicTypes that are handled equivalently by the adapters.
2306   // These are correct for the current system but someday it might be
2307   // necessary to make this mapping platform dependent.
2308   static int adapter_encoding(BasicType in) {
2309     switch (in) {
2310       case T_BOOLEAN:
2311       case T_BYTE:
2312       case T_SHORT:
2313       case T_CHAR:
2314         // There are all promoted to T_INT in the calling convention
2315         return T_INT;
2316 
2317       case T_OBJECT:
2318       case T_ARRAY:
2319         // In other words, we assume that any register good enough for
2320         // an int or long is good enough for a managed pointer.
2321 #ifdef _LP64
2322         return T_LONG;
2323 #else
2324         return T_INT;
2325 #endif
2326 
2327       case T_INT:
2328       case T_LONG:
2329       case T_FLOAT:
2330       case T_DOUBLE:
2331       case T_VOID:
2332         return in;
2333 
2334       default:
2335         ShouldNotReachHere();
2336         return T_CONFLICT;
2337     }
2338   }
2339 
2340   void* operator new(size_t size, size_t fp_size) throw() {
2341     assert(fp_size >= size, "sanity check");
2342     void* p = AllocateHeap(fp_size, mtCode);
2343     memset(p, 0, fp_size);
2344     return p;
2345   }
2346 
2347   template<typename Function>
2348   void iterate_args(Function function) {
2349     for (int i = 0; i < length(); i++) {
2350       unsigned val = (unsigned)value(i);
2351       // args are packed so that first/lower arguments are in the highest
2352       // bits of each int value, so iterate from highest to the lowest
2353       for (int j = 32 - _basic_type_bits; j >= 0; j -= _basic_type_bits) {
2354         unsigned v = (val >> j) & _basic_type_mask;
2355         if (v == 0) {
2356           continue;
2357         }
2358         function(v);
2359       }
2360     }
2361   }
2362 
2363  public:
2364   static int allocation_size(int total_args_passed, BasicType* sig_bt) {
2365     int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2366     return sizeof(AdapterFingerPrint) + (len > _compact_int_count ? (len - _compact_int_count) * sizeof(int) : 0);
2367   }
2368 
2369   static AdapterFingerPrint* allocate(int total_args_passed, BasicType* sig_bt) {
2370     int size_in_bytes = allocation_size(total_args_passed, sig_bt);
2371     return new (size_in_bytes) AdapterFingerPrint(total_args_passed, sig_bt);
2372   }
2373 
2374   static void deallocate(AdapterFingerPrint* fp) {
2375     fp->~AdapterFingerPrint();
2376   }
2377 
2378   int value(int index) {
2379     return _value[index];
2380   }
2381 
2382   int length() {
2383     if (_length < 0) return -_length;
2384     return _length;
2385   }
2386 
2387   bool is_compact() {
2388     return _length <= _compact_int_count;
2389   }
2390 
2391   unsigned int compute_hash() {
2392     int hash = 0;
2393     for (int i = 0; i < length(); i++) {
2394       int v = value(i);
2395       //Add arithmetic operation to the hash, like +3 to improve hashing
2396       hash = ((hash << 8) ^ v ^ (hash >> 5)) + 3;
2397     }
2398     return (unsigned int)hash;
2399   }
2400 
2401   const char* as_string() {
2402     stringStream st;
2403     st.print("0x");
2404     for (int i = 0; i < length(); i++) {
2405       st.print("%x", value(i));
2406     }
2407     return st.as_string();
2408   }
2409 
2410   const char* as_basic_args_string() {
2411     stringStream st;
2412     bool long_prev = false;
2413     iterate_args([&] (int arg) {
2414       if (long_prev) {
2415         long_prev = false;
2416         if (arg == T_VOID) {
2417           st.print("J");
2418         } else {
2419           st.print("L");
2420         }
2421       }
2422       switch (arg) {
2423         case T_INT:    st.print("I");    break;
2424         case T_LONG:   long_prev = true; break;
2425         case T_FLOAT:  st.print("F");    break;
2426         case T_DOUBLE: st.print("D");    break;
2427         case T_VOID:   break;
2428         default: ShouldNotReachHere();
2429       }
2430     });
2431     if (long_prev) {
2432       st.print("L");
2433     }
2434     return st.as_string();
2435   }
2436 
2437   BasicType* as_basic_type(int& nargs) {
2438     nargs = 0;
2439     GrowableArray<BasicType> btarray;
2440     bool long_prev = false;
2441 
2442     iterate_args([&] (int arg) {
2443       if (long_prev) {
2444         long_prev = false;
2445         if (arg == T_VOID) {
2446           btarray.append(T_LONG);
2447         } else {
2448           btarray.append(T_OBJECT); // it could be T_ARRAY; it shouldn't matter
2449         }
2450       }
2451       switch (arg) {
2452         case T_INT: // fallthrough
2453         case T_FLOAT: // fallthrough
2454         case T_DOUBLE:
2455         case T_VOID:
2456           btarray.append((BasicType)arg);
2457           break;
2458         case T_LONG:
2459           long_prev = true;
2460           break;
2461         default: ShouldNotReachHere();
2462       }
2463     });
2464 
2465     if (long_prev) {
2466       btarray.append(T_OBJECT);
2467     }
2468 
2469     nargs = btarray.length();
2470     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nargs);
2471     int index = 0;
2472     GrowableArrayIterator<BasicType> iter = btarray.begin();
2473     while (iter != btarray.end()) {
2474       sig_bt[index++] = *iter;
2475       ++iter;
2476     }
2477     assert(index == btarray.length(), "sanity check");
2478 #ifdef ASSERT
2479     {
2480       AdapterFingerPrint* compare_fp = AdapterFingerPrint::allocate(nargs, sig_bt);
2481       assert(this->equals(compare_fp), "sanity check");
2482       AdapterFingerPrint::deallocate(compare_fp);
2483     }
2484 #endif
2485     return sig_bt;
2486   }
2487 
2488   bool equals(AdapterFingerPrint* other) {
2489     if (other->_length != _length) {
2490       return false;
2491     } else {
2492       for (int i = 0; i < _length; i++) {
2493         if (_value[i] != other->_value[i]) {
2494           return false;
2495         }
2496       }
2497     }
2498     return true;
2499   }
2500 
2501   // methods required by virtue of being a MetaspaceObj
2502   void metaspace_pointers_do(MetaspaceClosure* it) { return; /* nothing to do here */ }
2503   int size() const { return (int)heap_word_size(sizeof(AdapterFingerPrint) + (_length > _compact_int_count ? (_length - _compact_int_count) * sizeof(int) : 0)); }
2504   MetaspaceObj::Type type() const { return AdapterFingerPrintType; }
2505 
2506   static bool equals(AdapterFingerPrint* const& fp1, AdapterFingerPrint* const& fp2) {
2507     NOT_PRODUCT(_equals++);
2508     return fp1->equals(fp2);
2509   }
2510 
2511   static unsigned int compute_hash(AdapterFingerPrint* const& fp) {
2512     return fp->compute_hash();
2513   }
2514 };
2515 
2516 #if INCLUDE_CDS
2517 static inline bool adapter_fp_equals_compact_hashtable_entry(AdapterHandlerEntry* entry, AdapterFingerPrint* fp, int len_unused) {
2518   return AdapterFingerPrint::equals(entry->fingerprint(), fp);
2519 }
2520 
2521 class ArchivedAdapterTable : public OffsetCompactHashtable<
2522   AdapterFingerPrint*,
2523   AdapterHandlerEntry*,
2524   adapter_fp_equals_compact_hashtable_entry> {};
2525 #endif // INCLUDE_CDS
2526 
2527 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2528 using AdapterHandlerTable = ResourceHashtable<AdapterFingerPrint*, AdapterHandlerEntry*, 293,
2529                   AnyObj::C_HEAP, mtCode,
2530                   AdapterFingerPrint::compute_hash,
2531                   AdapterFingerPrint::equals>;
2532 static AdapterHandlerTable* _adapter_handler_table;
2533 static GrowableArray<AdapterHandlerEntry*>* _adapter_handler_list = nullptr;
2534 
2535 // Find a entry with the same fingerprint if it exists
2536 AdapterHandlerEntry* AdapterHandlerLibrary::lookup(AdapterFingerPrint* fp) {
2537   NOT_PRODUCT(_lookups++);
2538   AdapterHandlerEntry* entry = nullptr;
2539 #if INCLUDE_CDS
2540   // if we are building the archive then the archived adapter table is
2541   // not valid and we need to use the ones added to the runtime table
2542   if (!CDSConfig::is_dumping_adapters()) {
2543     // Search archived table first. It is read-only table so can be searched without lock
2544     entry = _archived_adapter_handler_table.lookup(fp, fp->compute_hash(), 0 /* unused */);
2545     if (entry != nullptr) {
2546 #ifndef PRODUCT
2547       if (fp->is_compact()) {
2548         _compact++;
2549       }
2550       _archived_hits++;
2551 #endif
2552       return entry;
2553     }
2554   }
2555 #endif // INCLUDE_CDS
2556   assert_lock_strong(AdapterHandlerLibrary_lock);
2557   AdapterHandlerEntry** entry_p = _adapter_handler_table->get(fp);
2558   if (entry_p != nullptr) {
2559     entry = *entry_p;
2560     assert(entry->fingerprint()->equals(fp), "fingerprint mismatch key fp %s %s (hash=%d) != found fp %s %s (hash=%d)",
2561            entry->fingerprint()->as_basic_args_string(), entry->fingerprint()->as_string(), entry->fingerprint()->compute_hash(),
2562            fp->as_basic_args_string(), fp->as_string(), fp->compute_hash());
2563 #ifndef PRODUCT
2564     if (fp->is_compact()) _compact++;
2565     _runtime_hits++;
2566 #endif
2567     return entry;
2568   }
2569   return nullptr;
2570 }
2571 
2572 #ifndef PRODUCT
2573 void AdapterHandlerLibrary::print_statistics_on(outputStream* st) {
2574   auto size = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
2575     return sizeof(*key) + sizeof(*a);
2576   };
2577   TableStatistics ts = _adapter_handler_table->statistics_calculate(size);
2578   ts.print(st, "AdapterHandlerTable");
2579   st->print_cr("AdapterHandlerTable (table_size=%d, entries=%d)",
2580                _adapter_handler_table->table_size(), _adapter_handler_table->number_of_entries());
2581   int total_hits = _archived_hits + _runtime_hits;
2582   st->print_cr("AdapterHandlerTable: lookups %d equals %d hits %d (archived=%d+runtime=%d) compact %d",
2583                _lookups, _equals, total_hits, _archived_hits, _runtime_hits, _compact);
2584 }
2585 #endif // !PRODUCT
2586 
2587 // ---------------------------------------------------------------------------
2588 // Implementation of AdapterHandlerLibrary
2589 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = nullptr;
2590 AdapterHandlerEntry* AdapterHandlerLibrary::_no_arg_handler = nullptr;
2591 AdapterHandlerEntry* AdapterHandlerLibrary::_int_arg_handler = nullptr;
2592 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_arg_handler = nullptr;
2593 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_int_arg_handler = nullptr;
2594 AdapterHandlerEntry* AdapterHandlerLibrary::_obj_obj_arg_handler = nullptr;
2595 #if INCLUDE_CDS
2596 ArchivedAdapterTable AdapterHandlerLibrary::_archived_adapter_handler_table;
2597 #endif // INCLUDE_CDS
2598 const int AdapterHandlerLibrary_size = 16*K;
2599 BufferBlob* AdapterHandlerLibrary::_buffer = nullptr;
2600 
2601 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2602   return _buffer;
2603 }
2604 
2605 static void post_adapter_creation(const AdapterBlob* new_adapter,
2606                                   const AdapterHandlerEntry* entry) {
2607   if (Forte::is_enabled() || JvmtiExport::should_post_dynamic_code_generated()) {
2608     char blob_id[256];
2609     jio_snprintf(blob_id,
2610                  sizeof(blob_id),
2611                  "%s(%s)",
2612                  new_adapter->name(),
2613                  entry->fingerprint()->as_string());
2614     if (Forte::is_enabled()) {
2615       Forte::register_stub(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2616     }
2617 
2618     if (JvmtiExport::should_post_dynamic_code_generated()) {
2619       JvmtiExport::post_dynamic_code_generated(blob_id, new_adapter->content_begin(), new_adapter->content_end());
2620     }
2621   }
2622 }
2623 
2624 void AdapterHandlerLibrary::initialize() {
2625   ResourceMark rm;
2626   AdapterBlob* no_arg_blob = nullptr;
2627   AdapterBlob* int_arg_blob = nullptr;
2628   AdapterBlob* obj_arg_blob = nullptr;
2629   AdapterBlob* obj_int_arg_blob = nullptr;
2630   AdapterBlob* obj_obj_arg_blob = nullptr;
2631   {
2632     _adapter_handler_table = new (mtCode) AdapterHandlerTable();
2633     MutexLocker mu(AdapterHandlerLibrary_lock);
2634 
2635     // Create a special handler for abstract methods.  Abstract methods
2636     // are never compiled so an i2c entry is somewhat meaningless, but
2637     // throw AbstractMethodError just in case.
2638     // Pass wrong_method_abstract for the c2i transitions to return
2639     // AbstractMethodError for invalid invocations.
2640     address wrong_method_abstract = SharedRuntime::get_handle_wrong_method_abstract_stub();
2641     _abstract_method_handler = AdapterHandlerLibrary::new_entry(AdapterFingerPrint::allocate(0, nullptr),
2642                                                                 SharedRuntime::throw_AbstractMethodError_entry(),
2643                                                                 wrong_method_abstract, wrong_method_abstract);
2644 
2645     _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2646     _no_arg_handler = create_simple_adapter(no_arg_blob, 0, nullptr);
2647 
2648     BasicType obj_args[] = { T_OBJECT };
2649     _obj_arg_handler = create_simple_adapter(obj_arg_blob, 1, obj_args);
2650 
2651     BasicType int_args[] = { T_INT };
2652     _int_arg_handler = create_simple_adapter(int_arg_blob, 1, int_args);
2653 
2654     BasicType obj_int_args[] = { T_OBJECT, T_INT };
2655     _obj_int_arg_handler = create_simple_adapter(obj_int_arg_blob, 2, obj_int_args);
2656 
2657     BasicType obj_obj_args[] = { T_OBJECT, T_OBJECT };
2658     _obj_obj_arg_handler = create_simple_adapter(obj_obj_arg_blob, 2, obj_obj_args);
2659 
2660     assert(no_arg_blob != nullptr &&
2661            obj_arg_blob != nullptr &&
2662            int_arg_blob != nullptr &&
2663            obj_int_arg_blob != nullptr &&
2664            obj_obj_arg_blob != nullptr, "Initial adapters must be properly created");
2665   }
2666 
2667   // Outside of the lock
2668   post_adapter_creation(no_arg_blob, _no_arg_handler);
2669   post_adapter_creation(obj_arg_blob, _obj_arg_handler);
2670   post_adapter_creation(int_arg_blob, _int_arg_handler);
2671   post_adapter_creation(obj_int_arg_blob, _obj_int_arg_handler);
2672   post_adapter_creation(obj_obj_arg_blob, _obj_obj_arg_handler);
2673 }
2674 
2675 AdapterHandlerEntry* AdapterHandlerLibrary::create_simple_adapter(AdapterBlob*& adapter_blob,
2676                                                                   int total_args_passed,
2677                                                                   BasicType* sig_bt) {
2678   AdapterFingerPrint* fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2679   // We may find the adapter in the table if it is loaded from the AOT cache
2680   AdapterHandlerEntry* entry = lookup(fp);
2681   if (entry != nullptr) {
2682     assert(entry->is_shared() && !entry->is_linked(), "Non null AdapterHandlerEntry should be in the AOT cache in unlinked state");
2683     if (!link_adapter_handler(entry, adapter_blob)) {
2684       if (!generate_adapter_code(adapter_blob, entry, total_args_passed, sig_bt, /* is_transient */ false)) {
2685         return nullptr;
2686       }
2687     }
2688     // AdapterFingerPrint is already in the cache, no need to keep this one
2689     AdapterFingerPrint::deallocate(fp);
2690   } else {
2691     entry = create_adapter(adapter_blob, fp, total_args_passed, sig_bt, /* is_transient */ false);
2692   }
2693   return entry;
2694 }
2695 
2696 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2697                                                       address i2c_entry,
2698                                                       address c2i_entry,
2699                                                       address c2i_unverified_entry,
2700                                                       address c2i_no_clinit_check_entry) {
2701   // Insert an entry into the table
2702   return AdapterHandlerEntry::allocate(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry,
2703                                        c2i_no_clinit_check_entry);
2704 }
2705 
2706 AdapterHandlerEntry* AdapterHandlerLibrary::get_simple_adapter(const methodHandle& method) {
2707   if (method->is_abstract()) {
2708     return _abstract_method_handler;
2709   }
2710   int total_args_passed = method->size_of_parameters(); // All args on stack
2711   if (total_args_passed == 0) {
2712     return _no_arg_handler;
2713   } else if (total_args_passed == 1) {
2714     if (!method->is_static()) {
2715       return _obj_arg_handler;
2716     }
2717     switch (method->signature()->char_at(1)) {
2718       case JVM_SIGNATURE_CLASS:
2719       case JVM_SIGNATURE_ARRAY:
2720         return _obj_arg_handler;
2721       case JVM_SIGNATURE_INT:
2722       case JVM_SIGNATURE_BOOLEAN:
2723       case JVM_SIGNATURE_CHAR:
2724       case JVM_SIGNATURE_BYTE:
2725       case JVM_SIGNATURE_SHORT:
2726         return _int_arg_handler;
2727     }
2728   } else if (total_args_passed == 2 &&
2729              !method->is_static()) {
2730     switch (method->signature()->char_at(1)) {
2731       case JVM_SIGNATURE_CLASS:
2732       case JVM_SIGNATURE_ARRAY:
2733         return _obj_obj_arg_handler;
2734       case JVM_SIGNATURE_INT:
2735       case JVM_SIGNATURE_BOOLEAN:
2736       case JVM_SIGNATURE_CHAR:
2737       case JVM_SIGNATURE_BYTE:
2738       case JVM_SIGNATURE_SHORT:
2739         return _obj_int_arg_handler;
2740     }
2741   }
2742   return nullptr;
2743 }
2744 
2745 class AdapterSignatureIterator : public SignatureIterator {
2746  private:
2747   BasicType stack_sig_bt[16];
2748   BasicType* sig_bt;
2749   int index;
2750 
2751  public:
2752   AdapterSignatureIterator(Symbol* signature,
2753                            fingerprint_t fingerprint,
2754                            bool is_static,
2755                            int total_args_passed) :
2756     SignatureIterator(signature, fingerprint),
2757     index(0)
2758   {
2759     sig_bt = (total_args_passed <= 16) ? stack_sig_bt : NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2760     if (!is_static) { // Pass in receiver first
2761       sig_bt[index++] = T_OBJECT;
2762     }
2763     do_parameters_on(this);
2764   }
2765 
2766   BasicType* basic_types() {
2767     return sig_bt;
2768   }
2769 
2770 #ifdef ASSERT
2771   int slots() {
2772     return index;
2773   }
2774 #endif
2775 
2776  private:
2777 
2778   friend class SignatureIterator;  // so do_parameters_on can call do_type
2779   void do_type(BasicType type) {
2780     sig_bt[index++] = type;
2781     if (type == T_LONG || type == T_DOUBLE) {
2782       sig_bt[index++] = T_VOID; // Longs & doubles take 2 Java slots
2783     }
2784   }
2785 };
2786 
2787 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(const methodHandle& method) {
2788   // Use customized signature handler.  Need to lock around updates to
2789   // the _adapter_handler_table (it is not safe for concurrent readers
2790   // and a single writer: this could be fixed if it becomes a
2791   // problem).
2792 
2793   // Fast-path for trivial adapters
2794   AdapterHandlerEntry* entry = get_simple_adapter(method);
2795   if (entry != nullptr) {
2796     return entry;
2797   }
2798 
2799   ResourceMark rm;
2800   AdapterBlob* new_adapter = nullptr;
2801 
2802   // Fill in the signature array, for the calling-convention call.
2803   int total_args_passed = method->size_of_parameters(); // All args on stack
2804 
2805   AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
2806                               method->is_static(), total_args_passed);
2807   assert(si.slots() == total_args_passed, "");
2808   BasicType* sig_bt = si.basic_types();
2809   {
2810     MutexLocker mu(AdapterHandlerLibrary_lock);
2811 
2812     // Lookup method signature's fingerprint
2813     AdapterFingerPrint *fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2814     entry = lookup(fp);
2815 
2816     if (entry != nullptr) {
2817 #ifdef ASSERT
2818       if (VerifyAdapterSharing) {
2819         AdapterBlob* comparison_blob = nullptr;
2820         AdapterFingerPrint* comparison_fp = AdapterFingerPrint::allocate(total_args_passed, sig_bt);
2821         AdapterHandlerEntry* comparison_entry = create_adapter(comparison_blob, comparison_fp, total_args_passed, sig_bt, true);
2822         assert(comparison_blob == nullptr, "no blob should be created when creating an adapter for comparison");
2823         assert(comparison_entry->compare_code(entry), "code must match");
2824         AdapterFingerPrint::deallocate(comparison_fp);
2825         // Release the one just created and return the original
2826         AdapterHandlerEntry::deallocate(comparison_entry);
2827       }
2828 #endif
2829       AdapterFingerPrint::deallocate(fp);
2830       return entry;
2831     }
2832 
2833     entry = create_adapter(new_adapter, fp, total_args_passed, sig_bt, /* is_transient */ false);
2834   }
2835 
2836   // Outside of the lock
2837   if (new_adapter != nullptr) {
2838     post_adapter_creation(new_adapter, entry);
2839   }
2840   return entry;
2841 }
2842 
2843 bool AdapterHandlerLibrary::lookup_aot_cache(AdapterHandlerEntry* handler, CodeBuffer* buffer) {
2844   ResourceMark rm;
2845   const char* name = AdapterHandlerLibrary::name(handler->fingerprint());
2846   const uint32_t id = AdapterHandlerLibrary::id(handler->fingerprint());
2847   uint32_t offsets[4];
2848   if (SCCache::load_adapter(buffer, id, name, offsets)) {
2849     address i2c_entry = buffer->insts_begin();
2850     assert(offsets[0] == 0, "sanity check");
2851     handler->set_entry_points(i2c_entry, i2c_entry + offsets[1], i2c_entry + offsets[2], i2c_entry + offsets[3]);
2852     return true;
2853   }
2854   return false;
2855 }
2856 
2857 #ifndef PRODUCT
2858 void AdapterHandlerLibrary::print_adapter_handler_info(AdapterHandlerEntry* handler, AdapterBlob* adapter_blob) {
2859   ttyLocker ttyl;
2860   ResourceMark rm;
2861   int insts_size = adapter_blob->code_size();
2862   handler->print_adapter_on(tty);
2863   tty->print_cr("i2c argument handler for: %s %s (%d bytes generated)",
2864                 handler->fingerprint()->as_basic_args_string(),
2865                 handler->fingerprint()->as_string(), insts_size);
2866   tty->print_cr("c2i argument handler starts at " INTPTR_FORMAT, p2i(handler->get_c2i_entry()));
2867   if (Verbose || PrintStubCode) {
2868     address first_pc = handler->base_address();
2869     if (first_pc != nullptr) {
2870       Disassembler::decode(first_pc, first_pc + insts_size, tty, &adapter_blob->asm_remarks());
2871       tty->cr();
2872     }
2873   }
2874 }
2875 #endif // PRODUCT
2876 
2877 bool AdapterHandlerLibrary::generate_adapter_code(AdapterBlob*& adapter_blob,
2878                                                   AdapterHandlerEntry* handler,
2879                                                   int total_args_passed,
2880                                                   BasicType* sig_bt,
2881                                                   bool is_transient) {
2882   if (log_is_enabled(Info, perf, class, link)) {
2883     ClassLoader::perf_method_adapters_count()->inc();
2884   }
2885 
2886   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2887   CodeBuffer buffer(buf);
2888   short buffer_locs[20];
2889   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2890                                          sizeof(buffer_locs)/sizeof(relocInfo));
2891   MacroAssembler masm(&buffer);
2892   VMRegPair stack_regs[16];
2893   VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2894 
2895   // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2896   int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
2897   SharedRuntime::generate_i2c2i_adapters(&masm,
2898                                          total_args_passed,
2899                                          comp_args_on_stack,
2900                                          sig_bt,
2901                                          regs,
2902                                          handler);
2903   if (CDSConfig::is_dumping_adapters()) {
2904     // try to save generated code
2905     const char* name = AdapterHandlerLibrary::name(handler->fingerprint());
2906     const uint32_t id = AdapterHandlerLibrary::id(handler->fingerprint());
2907     uint32_t offsets[4];
2908     offsets[0] = 0;
2909     offsets[1] = handler->get_c2i_entry() - handler->get_i2c_entry();
2910     offsets[2] = handler->get_c2i_unverified_entry() - handler->get_i2c_entry();
2911     offsets[3] = handler->get_c2i_no_clinit_check_entry() - handler->get_i2c_entry();
2912     SCCache::store_adapter(&buffer, id, name, offsets);
2913   }
2914 #ifdef ASSERT
2915   if (VerifyAdapterSharing) {
2916     handler->save_code(buf->code_begin(), buffer.insts_size());
2917     if (is_transient) {
2918       return true;
2919     }
2920   }
2921 #endif
2922 
2923   adapter_blob = AdapterBlob::create(&buffer);
2924   if (adapter_blob == nullptr) {
2925     // CodeCache is full, disable compilation
2926     // Ought to log this but compile log is only per compile thread
2927     // and we're some non descript Java thread.
2928     return false;
2929   }
2930   handler->relocate(adapter_blob->content_begin());
2931 #ifndef PRODUCT
2932   // debugging support
2933   if (PrintAdapterHandlers || PrintStubCode) {
2934     print_adapter_handler_info(handler, adapter_blob);
2935   }
2936 #endif
2937   return true;
2938 }
2939 
2940 AdapterHandlerEntry* AdapterHandlerLibrary::create_adapter(AdapterBlob*& adapter_blob,
2941                                                            AdapterFingerPrint* fingerprint,
2942                                                            int total_args_passed,
2943                                                            BasicType* sig_bt,
2944                                                            bool is_transient) {
2945   AdapterHandlerEntry* handler = AdapterHandlerLibrary::new_entry(fingerprint);
2946   if (!generate_adapter_code(adapter_blob, handler, total_args_passed, sig_bt, is_transient)) {
2947     return nullptr;
2948   }
2949   if (!is_transient) {
2950     assert_lock_strong(AdapterHandlerLibrary_lock);
2951     _adapter_handler_table->put(fingerprint, handler);
2952   }
2953   return handler;
2954 }
2955 
2956 #if INCLUDE_CDS
2957 bool AdapterHandlerLibrary::link_adapter_handler(AdapterHandlerEntry* handler, AdapterBlob*& adapter_blob) {
2958 #ifndef PRODUCT
2959   if (TestAdapterLinkFailure) {
2960     return false;
2961   }
2962 #endif
2963   BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2964   CodeBuffer buffer(buf);
2965   short buffer_locs[20];
2966   buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2967                                          sizeof(buffer_locs)/sizeof(relocInfo));
2968 
2969   if (!lookup_aot_cache(handler, &buffer)) {
2970     return false;
2971   }
2972   adapter_blob = AdapterBlob::create(&buffer);
2973   if (adapter_blob == nullptr) {
2974     // CodeCache is full, disable compilation
2975     // Ought to log this but compile log is only per compile thread
2976     // and we're some non descript Java thread.
2977     return false;
2978   }
2979   handler->relocate(adapter_blob->content_begin());
2980 #ifndef PRODUCT
2981   // debugging support
2982   if (PrintAdapterHandlers || PrintStubCode) {
2983     print_adapter_handler_info(handler, adapter_blob);
2984   }
2985 #endif
2986   return true;
2987 }
2988 
2989 class CopyAdapterTableToArchive : StackObj {
2990 private:
2991   CompactHashtableWriter* _writer;
2992   ArchiveBuilder* _builder;
2993 public:
2994   CopyAdapterTableToArchive(CompactHashtableWriter* writer) : _writer(writer),
2995                                                              _builder(ArchiveBuilder::current())
2996   {}
2997 
2998   bool do_entry(AdapterFingerPrint* fp, AdapterHandlerEntry* entry) {
2999     LogStreamHandle(Trace, cds) lsh;
3000     if (ArchiveBuilder::current()->has_been_archived((address)entry)) {
3001       assert(ArchiveBuilder::current()->has_been_archived((address)fp), "must be");
3002       AdapterFingerPrint* buffered_fp = ArchiveBuilder::current()->get_buffered_addr(fp);
3003       assert(buffered_fp != nullptr,"sanity check");
3004       AdapterHandlerEntry* buffered_entry = ArchiveBuilder::current()->get_buffered_addr(entry);
3005       assert(buffered_entry != nullptr,"sanity check");
3006 
3007       uint hash = fp->compute_hash();
3008       u4 delta = _builder->buffer_to_offset_u4((address)buffered_entry);
3009       _writer->add(hash, delta);
3010       if (lsh.is_enabled()) {
3011         address fp_runtime_addr = (address)buffered_fp + ArchiveBuilder::current()->buffer_to_requested_delta();
3012         address entry_runtime_addr = (address)buffered_entry + ArchiveBuilder::current()->buffer_to_requested_delta();
3013         log_trace(cds)("Added fp=%p (%s), entry=%p to the archived adater table", buffered_fp, buffered_fp->as_basic_args_string(), buffered_entry);
3014       }
3015     } else {
3016       if (lsh.is_enabled()) {
3017         log_trace(cds)("Skipping adapter handler %p (fp=%s) as it is not archived", entry, fp->as_basic_args_string());
3018       }
3019     }
3020     return true;
3021   }
3022 };
3023 
3024 size_t AdapterHandlerLibrary::estimate_size_for_archive() {
3025   return CompactHashtableWriter::estimate_size(_adapter_handler_table->number_of_entries());
3026 }
3027 
3028 void AdapterHandlerLibrary::archive_adapter_table() {
3029   CompactHashtableStats stats;
3030   CompactHashtableWriter writer(_adapter_handler_table->number_of_entries(), &stats);
3031   CopyAdapterTableToArchive copy(&writer);
3032   _adapter_handler_table->iterate(&copy);
3033   writer.dump(&_archived_adapter_handler_table, "archived adapter table");
3034 }
3035 
3036 void AdapterHandlerLibrary::serialize_shared_table_header(SerializeClosure* soc) {
3037   _archived_adapter_handler_table.serialize_header(soc);
3038 }
3039 #endif // INCLUDE_CDS
3040 
3041 address AdapterHandlerEntry::base_address() {
3042   address base = _i2c_entry;
3043   if (base == nullptr)  base = _c2i_entry;
3044   assert(base <= _c2i_entry || _c2i_entry == nullptr, "");
3045   assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == nullptr, "");
3046   assert(base <= _c2i_no_clinit_check_entry || _c2i_no_clinit_check_entry == nullptr, "");
3047   return base;
3048 }
3049 
3050 void AdapterHandlerEntry::relocate(address new_base) {
3051   address old_base = base_address();
3052   assert(old_base != nullptr, "");
3053   ptrdiff_t delta = new_base - old_base;
3054   if (_i2c_entry != nullptr)
3055     _i2c_entry += delta;
3056   if (_c2i_entry != nullptr)
3057     _c2i_entry += delta;
3058   if (_c2i_unverified_entry != nullptr)
3059     _c2i_unverified_entry += delta;
3060   if (_c2i_no_clinit_check_entry != nullptr)
3061     _c2i_no_clinit_check_entry += delta;
3062   assert(base_address() == new_base, "");
3063 }
3064 
3065 void AdapterHandlerEntry::metaspace_pointers_do(MetaspaceClosure* it) {
3066   LogStreamHandle(Trace, cds) lsh;
3067   if (lsh.is_enabled()) {
3068     lsh.print("Iter(AdapterHandlerEntry): %p(%s)", this, _fingerprint->as_basic_args_string());
3069     lsh.cr();
3070   }
3071   it->push(&_fingerprint);
3072 }
3073 
3074 #if INCLUDE_CDS
3075 void AdapterHandlerEntry::remove_unshareable_info() {
3076   set_entry_points(nullptr, nullptr, nullptr, nullptr, false);
3077 }
3078 
3079 void AdapterHandlerEntry::restore_unshareable_info(TRAPS) {
3080   PerfTraceElapsedTime timer(ClassLoader::perf_method_adapters_time());
3081   // A fixed set of simple adapters are eagerly linked during JVM initialization
3082   // in AdapterHandlerTable::initialize().
3083   // Others may already have been linked because they are shared by other methods.
3084   if (is_linked()) {
3085     return;
3086   }
3087   AdapterBlob* adapter_blob = nullptr;
3088   {
3089     MutexLocker mu(AdapterHandlerLibrary_lock);
3090     assert(_fingerprint != nullptr, "_fingerprint must not be null");
3091 #ifdef ASSERT
3092     AdapterHandlerEntry* entry = AdapterHandlerLibrary::lookup(_fingerprint);
3093     assert(entry == this, "sanity check");
3094 #endif
3095     if (!AdapterHandlerLibrary::link_adapter_handler(this, adapter_blob)) {
3096       ResourceMark rm;
3097       log_warning(cds)("Failed to link AdapterHandlerEntry to its code in the AOT code cache");
3098       int nargs;
3099       BasicType* bt = _fingerprint->as_basic_type(nargs);
3100       if (!AdapterHandlerLibrary::generate_adapter_code(adapter_blob, this, nargs, bt, /* is_transient */ false)) {
3101         if (!is_init_completed()) {
3102           // Don't throw exceptions during VM initialization because java.lang.* classes
3103           // might not have been initialized, causing problems when constructing the
3104           // Java exception object.
3105           vm_exit_during_initialization("Out of space in CodeCache for adapters");
3106         } else {
3107           THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Out of space in CodeCache for adapters");
3108         }
3109       }
3110     }
3111   }
3112   // Outside of the lock
3113   if (adapter_blob != nullptr) {
3114     post_adapter_creation(adapter_blob, this);
3115   }
3116   assert(_linked, "AdapterHandlerEntry must now be linked");
3117 }
3118 #endif // INCLUDE_CDS
3119 
3120 AdapterHandlerEntry::~AdapterHandlerEntry() {
3121 #ifdef ASSERT
3122   FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
3123 #endif
3124   FreeHeap(this);
3125 }
3126 
3127 
3128 #ifdef ASSERT
3129 // Capture the code before relocation so that it can be compared
3130 // against other versions.  If the code is captured after relocation
3131 // then relative instructions won't be equivalent.
3132 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length) {
3133   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
3134   _saved_code_length = length;
3135   memcpy(_saved_code, buffer, length);
3136 }
3137 
3138 
3139 bool AdapterHandlerEntry::compare_code(AdapterHandlerEntry* other) {
3140   assert(_saved_code != nullptr && other->_saved_code != nullptr, "code not saved");
3141 
3142   if (other->_saved_code_length != _saved_code_length) {
3143     return false;
3144   }
3145 
3146   return memcmp(other->_saved_code, _saved_code, _saved_code_length) == 0;
3147 }
3148 #endif
3149 
3150 
3151 /**
3152  * Create a native wrapper for this native method.  The wrapper converts the
3153  * Java-compiled calling convention to the native convention, handles
3154  * arguments, and transitions to native.  On return from the native we transition
3155  * back to java blocking if a safepoint is in progress.
3156  */
3157 void AdapterHandlerLibrary::create_native_wrapper(const methodHandle& method) {
3158   ResourceMark rm;
3159   nmethod* nm = nullptr;
3160 
3161   // Check if memory should be freed before allocation
3162   CodeCache::gc_on_allocation();
3163 
3164   assert(method->is_native(), "must be native");
3165   assert(method->is_special_native_intrinsic() ||
3166          method->has_native_function(), "must have something valid to call!");
3167 
3168   {
3169     // Perform the work while holding the lock, but perform any printing outside the lock
3170     MutexLocker mu(AdapterHandlerLibrary_lock);
3171     // See if somebody beat us to it
3172     if (method->code() != nullptr) {
3173       return;
3174     }
3175 
3176     const int compile_id = CompileBroker::assign_compile_id(method, CompileBroker::standard_entry_bci);
3177     assert(compile_id > 0, "Must generate native wrapper");
3178 
3179 
3180     ResourceMark rm;
3181     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
3182     if (buf != nullptr) {
3183       CodeBuffer buffer(buf);
3184 
3185       if (method->is_continuation_enter_intrinsic()) {
3186         buffer.initialize_stubs_size(192);
3187       }
3188 
3189       struct { double data[20]; } locs_buf;
3190       struct { double data[20]; } stubs_locs_buf;
3191       buffer.insts()->initialize_shared_locs((relocInfo*)&locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
3192 #if defined(AARCH64) || defined(PPC64)
3193       // On AArch64 with ZGC and nmethod entry barriers, we need all oops to be
3194       // in the constant pool to ensure ordering between the barrier and oops
3195       // accesses. For native_wrappers we need a constant.
3196       // On PPC64 the continuation enter intrinsic needs the constant pool for the compiled
3197       // static java call that is resolved in the runtime.
3198       if (PPC64_ONLY(method->is_continuation_enter_intrinsic() &&) true) {
3199         buffer.initialize_consts_size(8 PPC64_ONLY(+ 24));
3200       }
3201 #endif
3202       buffer.stubs()->initialize_shared_locs((relocInfo*)&stubs_locs_buf, sizeof(stubs_locs_buf) / sizeof(relocInfo));
3203       MacroAssembler _masm(&buffer);
3204 
3205       // Fill in the signature array, for the calling-convention call.
3206       const int total_args_passed = method->size_of_parameters();
3207 
3208       VMRegPair stack_regs[16];
3209       VMRegPair* regs = (total_args_passed <= 16) ? stack_regs : NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
3210 
3211       AdapterSignatureIterator si(method->signature(), method->constMethod()->fingerprint(),
3212                               method->is_static(), total_args_passed);
3213       BasicType* sig_bt = si.basic_types();
3214       assert(si.slots() == total_args_passed, "");
3215       BasicType ret_type = si.return_type();
3216 
3217       // Now get the compiled-Java arguments layout.
3218       SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed);
3219 
3220       // Generate the compiled-to-native wrapper code
3221       nm = SharedRuntime::generate_native_wrapper(&_masm, method, compile_id, sig_bt, regs, ret_type);
3222 
3223       if (nm != nullptr) {
3224         {
3225           MutexLocker pl(NMethodState_lock, Mutex::_no_safepoint_check_flag);
3226           if (nm->make_in_use()) {
3227             method->set_code(method, nm);
3228           }
3229         }
3230 
3231         DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, CompileBroker::compiler(CompLevel_simple));
3232         if (directive->PrintAssemblyOption) {
3233           nm->print_code();
3234         }
3235         DirectivesStack::release(directive);
3236       }
3237     }
3238   } // Unlock AdapterHandlerLibrary_lock
3239 
3240 
3241   // Install the generated code.
3242   if (nm != nullptr) {
3243     const char *msg = method->is_static() ? "(static)" : "";
3244     CompileTask::print_ul(nm, msg);
3245     if (PrintCompilation) {
3246       ttyLocker ttyl;
3247       CompileTask::print(tty, nm, msg);
3248     }
3249     nm->post_compiled_method_load_event();
3250   }
3251 }
3252 
3253 // -------------------------------------------------------------------------
3254 // Java-Java calling convention
3255 // (what you use when Java calls Java)
3256 
3257 //------------------------------name_for_receiver----------------------------------
3258 // For a given signature, return the VMReg for parameter 0.
3259 VMReg SharedRuntime::name_for_receiver() {
3260   VMRegPair regs;
3261   BasicType sig_bt = T_OBJECT;
3262   (void) java_calling_convention(&sig_bt, &regs, 1);
3263   // Return argument 0 register.  In the LP64 build pointers
3264   // take 2 registers, but the VM wants only the 'main' name.
3265   return regs.first();
3266 }
3267 
3268 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
3269   // This method is returning a data structure allocating as a
3270   // ResourceObject, so do not put any ResourceMarks in here.
3271 
3272   BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
3273   VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
3274   int cnt = 0;
3275   if (has_receiver) {
3276     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
3277   }
3278 
3279   for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
3280     BasicType type = ss.type();
3281     sig_bt[cnt++] = type;
3282     if (is_double_word_type(type))
3283       sig_bt[cnt++] = T_VOID;
3284   }
3285 
3286   if (has_appendix) {
3287     sig_bt[cnt++] = T_OBJECT;
3288   }
3289 
3290   assert(cnt < 256, "grow table size");
3291 
3292   int comp_args_on_stack;
3293   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt);
3294 
3295   // the calling convention doesn't count out_preserve_stack_slots so
3296   // we must add that in to get "true" stack offsets.
3297 
3298   if (comp_args_on_stack) {
3299     for (int i = 0; i < cnt; i++) {
3300       VMReg reg1 = regs[i].first();
3301       if (reg1->is_stack()) {
3302         // Yuck
3303         reg1 = reg1->bias(out_preserve_stack_slots());
3304       }
3305       VMReg reg2 = regs[i].second();
3306       if (reg2->is_stack()) {
3307         // Yuck
3308         reg2 = reg2->bias(out_preserve_stack_slots());
3309       }
3310       regs[i].set_pair(reg2, reg1);
3311     }
3312   }
3313 
3314   // results
3315   *arg_size = cnt;
3316   return regs;
3317 }
3318 
3319 // OSR Migration Code
3320 //
3321 // This code is used convert interpreter frames into compiled frames.  It is
3322 // called from very start of a compiled OSR nmethod.  A temp array is
3323 // allocated to hold the interesting bits of the interpreter frame.  All
3324 // active locks are inflated to allow them to move.  The displaced headers and
3325 // active interpreter locals are copied into the temp buffer.  Then we return
3326 // back to the compiled code.  The compiled code then pops the current
3327 // interpreter frame off the stack and pushes a new compiled frame.  Then it
3328 // copies the interpreter locals and displaced headers where it wants.
3329 // Finally it calls back to free the temp buffer.
3330 //
3331 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
3332 
3333 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *current) )
3334   assert(current == JavaThread::current(), "pre-condition");
3335 
3336   // During OSR migration, we unwind the interpreted frame and replace it with a compiled
3337   // frame. The stack watermark code below ensures that the interpreted frame is processed
3338   // before it gets unwound. This is helpful as the size of the compiled frame could be
3339   // larger than the interpreted frame, which could result in the new frame not being
3340   // processed correctly.
3341   StackWatermarkSet::before_unwind(current);
3342 
3343   //
3344   // This code is dependent on the memory layout of the interpreter local
3345   // array and the monitors. On all of our platforms the layout is identical
3346   // so this code is shared. If some platform lays the their arrays out
3347   // differently then this code could move to platform specific code or
3348   // the code here could be modified to copy items one at a time using
3349   // frame accessor methods and be platform independent.
3350 
3351   frame fr = current->last_frame();
3352   assert(fr.is_interpreted_frame(), "");
3353   assert(fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks");
3354 
3355   // Figure out how many monitors are active.
3356   int active_monitor_count = 0;
3357   for (BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
3358        kptr < fr.interpreter_frame_monitor_begin();
3359        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
3360     if (kptr->obj() != nullptr) active_monitor_count++;
3361   }
3362 
3363   // QQQ we could place number of active monitors in the array so that compiled code
3364   // could double check it.
3365 
3366   Method* moop = fr.interpreter_frame_method();
3367   int max_locals = moop->max_locals();
3368   // Allocate temp buffer, 1 word per local & 2 per active monitor
3369   int buf_size_words = max_locals + active_monitor_count * BasicObjectLock::size();
3370   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
3371 
3372   // Copy the locals.  Order is preserved so that loading of longs works.
3373   // Since there's no GC I can copy the oops blindly.
3374   assert(sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
3375   Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
3376                        (HeapWord*)&buf[0],
3377                        max_locals);
3378 
3379   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
3380   int i = max_locals;
3381   for (BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
3382        kptr2 < fr.interpreter_frame_monitor_begin();
3383        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
3384     if (kptr2->obj() != nullptr) {         // Avoid 'holes' in the monitor array
3385       BasicLock *lock = kptr2->lock();
3386       if (LockingMode == LM_LEGACY) {
3387         // Inflate so the object's header no longer refers to the BasicLock.
3388         if (lock->displaced_header().is_unlocked()) {
3389           // The object is locked and the resulting ObjectMonitor* will also be
3390           // locked so it can't be async deflated until ownership is dropped.
3391           // See the big comment in basicLock.cpp: BasicLock::move_to().
3392           ObjectSynchronizer::inflate_helper(kptr2->obj());
3393         }
3394         // Now the displaced header is free to move because the
3395         // object's header no longer refers to it.
3396         buf[i] = (intptr_t)lock->displaced_header().value();
3397       } else if (UseObjectMonitorTable) {
3398         buf[i] = (intptr_t)lock->object_monitor_cache();
3399       }
3400 #ifdef ASSERT
3401       else {
3402         buf[i] = badDispHeaderOSR;
3403       }
3404 #endif
3405       i++;
3406       buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
3407     }
3408   }
3409   assert(i - max_locals == active_monitor_count*2, "found the expected number of monitors");
3410 
3411   RegisterMap map(current,
3412                   RegisterMap::UpdateMap::skip,
3413                   RegisterMap::ProcessFrames::include,
3414                   RegisterMap::WalkContinuation::skip);
3415   frame sender = fr.sender(&map);
3416   if (sender.is_interpreted_frame()) {
3417     current->push_cont_fastpath(sender.sp());
3418   }
3419 
3420   return buf;
3421 JRT_END
3422 
3423 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
3424   FREE_C_HEAP_ARRAY(intptr_t, buf);
3425 JRT_END
3426 
3427 bool AdapterHandlerLibrary::contains(const CodeBlob* b) {
3428   bool found = false;
3429 #if INCLUDE_CDS
3430   auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3431     return (found = (b == CodeCache::find_blob(handler->get_i2c_entry())));
3432   };
3433   _archived_adapter_handler_table.iterate(findblob_archived_table);
3434 #endif // INCLUDE_CDS
3435   if (!found) {
3436     auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3437       return (found = (b == CodeCache::find_blob(a->get_i2c_entry())));
3438     };
3439     assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3440     _adapter_handler_table->iterate(findblob_runtime_table);
3441   }
3442   return found;
3443 }
3444 
3445 const char* AdapterHandlerLibrary::name(AdapterFingerPrint* fingerprint) {
3446   return fingerprint->as_basic_args_string();
3447 }
3448 
3449 uint32_t AdapterHandlerLibrary::id(AdapterFingerPrint* fingerprint) {
3450   unsigned int hash = fingerprint->compute_hash();
3451   return hash;
3452 }
3453 
3454 void AdapterHandlerLibrary::print_handler_on(outputStream* st, const CodeBlob* b) {
3455   bool found = false;
3456 #if INCLUDE_CDS
3457   auto findblob_archived_table = [&] (AdapterHandlerEntry* handler) {
3458     if (b == CodeCache::find_blob(handler->get_i2c_entry())) {
3459       found = true;
3460       st->print("Adapter for signature: ");
3461       handler->print_adapter_on(st);
3462       return true;
3463     } else {
3464       return false; // keep looking
3465 
3466     }
3467   };
3468   _archived_adapter_handler_table.iterate(findblob_archived_table);
3469 #endif // INCLUDE_CDS
3470   if (!found) {
3471     auto findblob_runtime_table = [&] (AdapterFingerPrint* key, AdapterHandlerEntry* a) {
3472       if (b == CodeCache::find_blob(a->get_i2c_entry())) {
3473         found = true;
3474         st->print("Adapter for signature: ");
3475         a->print_adapter_on(st);
3476         return true;
3477       } else {
3478         return false; // keep looking
3479       }
3480     };
3481     assert_locked_or_safepoint(AdapterHandlerLibrary_lock);
3482     _adapter_handler_table->iterate(findblob_runtime_table);
3483   }
3484   assert(found, "Should have found handler");
3485 }
3486 
3487 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
3488   st->print("AHE@" INTPTR_FORMAT ": %s", p2i(this), fingerprint()->as_string());
3489   if (get_i2c_entry() != nullptr) {
3490     st->print(" i2c: " INTPTR_FORMAT, p2i(get_i2c_entry()));
3491   }
3492   if (get_c2i_entry() != nullptr) {
3493     st->print(" c2i: " INTPTR_FORMAT, p2i(get_c2i_entry()));
3494   }
3495   if (get_c2i_unverified_entry() != nullptr) {
3496     st->print(" c2iUV: " INTPTR_FORMAT, p2i(get_c2i_unverified_entry()));
3497   }
3498   if (get_c2i_no_clinit_check_entry() != nullptr) {
3499     st->print(" c2iNCI: " INTPTR_FORMAT, p2i(get_c2i_no_clinit_check_entry()));
3500   }
3501   st->cr();
3502 }
3503 
3504 bool AdapterHandlerLibrary::is_abstract_method_adapter(AdapterHandlerEntry* entry) {
3505   if (entry == _abstract_method_handler) {
3506     return true;
3507   }
3508   return false;
3509 }
3510 
3511 JRT_LEAF(void, SharedRuntime::enable_stack_reserved_zone(JavaThread* current))
3512   assert(current == JavaThread::current(), "pre-condition");
3513   StackOverflow* overflow_state = current->stack_overflow_state();
3514   overflow_state->enable_stack_reserved_zone(/*check_if_disabled*/true);
3515   overflow_state->set_reserved_stack_activation(current->stack_base());
3516 JRT_END
3517 
3518 frame SharedRuntime::look_for_reserved_stack_annotated_method(JavaThread* current, frame fr) {
3519   ResourceMark rm(current);
3520   frame activation;
3521   nmethod* nm = nullptr;
3522   int count = 1;
3523 
3524   assert(fr.is_java_frame(), "Must start on Java frame");
3525 
3526   RegisterMap map(JavaThread::current(),
3527                   RegisterMap::UpdateMap::skip,
3528                   RegisterMap::ProcessFrames::skip,
3529                   RegisterMap::WalkContinuation::skip); // don't walk continuations
3530   for (; !fr.is_first_frame(); fr = fr.sender(&map)) {
3531     if (!fr.is_java_frame()) {
3532       continue;
3533     }
3534 
3535     Method* method = nullptr;
3536     bool found = false;
3537     if (fr.is_interpreted_frame()) {
3538       method = fr.interpreter_frame_method();
3539       if (method != nullptr && method->has_reserved_stack_access()) {
3540         found = true;
3541       }
3542     } else {
3543       CodeBlob* cb = fr.cb();
3544       if (cb != nullptr && cb->is_nmethod()) {
3545         nm = cb->as_nmethod();
3546         method = nm->method();
3547         // scope_desc_near() must be used, instead of scope_desc_at() because on
3548         // SPARC, the pcDesc can be on the delay slot after the call instruction.
3549         for (ScopeDesc *sd = nm->scope_desc_near(fr.pc()); sd != nullptr; sd = sd->sender()) {
3550           method = sd->method();
3551           if (method != nullptr && method->has_reserved_stack_access()) {
3552             found = true;
3553           }
3554         }
3555       }
3556     }
3557     if (found) {
3558       activation = fr;
3559       warning("Potentially dangerous stack overflow in "
3560               "ReservedStackAccess annotated method %s [%d]",
3561               method->name_and_sig_as_C_string(), count++);
3562       EventReservedStackActivation event;
3563       if (event.should_commit()) {
3564         event.set_method(method);
3565         event.commit();
3566       }
3567     }
3568   }
3569   return activation;
3570 }
3571 
3572 void SharedRuntime::on_slowpath_allocation_exit(JavaThread* current) {
3573   // After any safepoint, just before going back to compiled code,
3574   // we inform the GC that we will be doing initializing writes to
3575   // this object in the future without emitting card-marks, so
3576   // GC may take any compensating steps.
3577 
3578   oop new_obj = current->vm_result();
3579   if (new_obj == nullptr) return;
3580 
3581   BarrierSet *bs = BarrierSet::barrier_set();
3582   bs->on_slowpath_allocation_exit(current, new_obj);
3583 }