1 /*
   2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/codeBuffer.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "classfile/javaClasses.inline.hpp"
  32 #include "classfile/vmClasses.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/aotCodeCache.hpp"
  35 #include "code/codeBlob.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/scopeDesc.hpp"
  38 #include "code/vtableStubs.hpp"
  39 #include "compiler/compilationPolicy.hpp"
  40 #include "compiler/compilerDefinitions.inline.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "compiler/oopMap.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/c1/barrierSetC1.hpp"
  45 #include "gc/shared/collectedHeap.hpp"
  46 #include "interpreter/bytecode.hpp"
  47 #include "interpreter/interpreter.hpp"
  48 #include "jfr/support/jfrIntrinsics.hpp"
  49 #include "logging/log.hpp"
  50 #include "memory/oopFactory.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "memory/universe.hpp"
  53 #include "oops/access.inline.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "runtime/atomicAccess.hpp"
  59 #include "runtime/fieldDescriptor.inline.hpp"
  60 #include "runtime/frame.inline.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/interfaceSupport.inline.hpp"
  63 #include "runtime/javaCalls.hpp"
  64 #include "runtime/perfData.inline.hpp"
  65 #include "runtime/runtimeUpcalls.hpp"
  66 #include "runtime/sharedRuntime.hpp"
  67 #include "runtime/stackWatermarkSet.hpp"
  68 #include "runtime/stubInfo.hpp"
  69 #include "runtime/stubRoutines.hpp"
  70 #include "runtime/vframe.inline.hpp"
  71 #include "runtime/vframeArray.hpp"
  72 #include "runtime/vm_version.hpp"
  73 #include "services/management.hpp"
  74 #include "utilities/copy.hpp"
  75 #include "utilities/events.hpp"
  76 
  77 
  78 // Implementation of StubAssembler
  79 
  80 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  81   _name = name;
  82   _must_gc_arguments = false;
  83   _frame_size = no_frame_size;
  84   _num_rt_args = 0;
  85   _stub_id = stub_id;
  86 }
  87 
  88 
  89 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  90   _name = name;
  91   _must_gc_arguments = must_gc_arguments;
  92 }
  93 
  94 
  95 void StubAssembler::set_frame_size(int size) {
  96   if (_frame_size == no_frame_size) {
  97     _frame_size = size;
  98   }
  99   assert(_frame_size == size, "can't change the frame size");
 100 }
 101 
 102 
 103 void StubAssembler::set_num_rt_args(int args) {
 104   if (_num_rt_args == 0) {
 105     _num_rt_args = args;
 106   }
 107   assert(_num_rt_args == args, "can't change the number of args");
 108 }
 109 
 110 // Implementation of Runtime1
 111 CodeBlob* Runtime1::_blobs[StubInfo::C1_STUB_COUNT];
 112 
 113 #ifndef PRODUCT
 114 // statistics
 115 uint Runtime1::_generic_arraycopystub_cnt = 0;
 116 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 117 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 118 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 119 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 120 uint Runtime1::_new_object_array_slowcase_cnt = 0;
 121 uint Runtime1::_new_instance_slowcase_cnt = 0;
 122 uint Runtime1::_new_multi_array_slowcase_cnt = 0;
 123 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 124 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 125 uint Runtime1::_patch_code_slowcase_cnt = 0;
 126 uint Runtime1::_throw_range_check_exception_count = 0;
 127 uint Runtime1::_throw_index_exception_count = 0;
 128 uint Runtime1::_throw_div0_exception_count = 0;
 129 uint Runtime1::_throw_null_pointer_exception_count = 0;
 130 uint Runtime1::_throw_class_cast_exception_count = 0;
 131 uint Runtime1::_throw_incompatible_class_change_error_count = 0;
 132 uint Runtime1::_throw_count = 0;
 133 
 134 static uint _byte_arraycopy_stub_cnt = 0;
 135 static uint _short_arraycopy_stub_cnt = 0;
 136 static uint _int_arraycopy_stub_cnt = 0;
 137 static uint _long_arraycopy_stub_cnt = 0;
 138 static uint _oop_arraycopy_stub_cnt = 0;
 139 
 140 address Runtime1::arraycopy_count_address(BasicType type) {
 141   switch (type) {
 142   case T_BOOLEAN:
 143   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 144   case T_CHAR:
 145   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 146   case T_FLOAT:
 147   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 148   case T_DOUBLE:
 149   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 150   case T_ARRAY:
 151   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
 152   default:
 153     ShouldNotReachHere();
 154     return nullptr;
 155   }
 156 }
 157 
 158 
 159 #endif
 160 
 161 // Simple helper to see if the caller of a runtime stub which
 162 // entered the VM has been deoptimized
 163 
 164 static bool caller_is_deopted(JavaThread* current) {
 165   RegisterMap reg_map(current,
 166                       RegisterMap::UpdateMap::skip,
 167                       RegisterMap::ProcessFrames::include,
 168                       RegisterMap::WalkContinuation::skip);
 169   frame runtime_frame = current->last_frame();
 170   frame caller_frame = runtime_frame.sender(&reg_map);
 171   assert(caller_frame.is_compiled_frame(), "must be compiled");
 172   return caller_frame.is_deoptimized_frame();
 173 }
 174 
 175 // Stress deoptimization
 176 static void deopt_caller(JavaThread* current) {
 177   if (!caller_is_deopted(current)) {
 178     RegisterMap reg_map(current,
 179                         RegisterMap::UpdateMap::skip,
 180                         RegisterMap::ProcessFrames::include,
 181                         RegisterMap::WalkContinuation::skip);
 182     frame runtime_frame = current->last_frame();
 183     frame caller_frame = runtime_frame.sender(&reg_map);
 184     Deoptimization::deoptimize_frame(current, caller_frame.id());
 185     assert(caller_is_deopted(current), "Must be deoptimized");
 186   }
 187 }
 188 
 189 class C1StubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
 190  private:
 191   StubId _id;
 192  public:
 193   C1StubAssemblerCodeGenClosure(StubId id) : _id(id) {
 194     assert(StubInfo::is_c1(_id), "not a c1 stub id %s", StubInfo::name(_id));
 195   }
 196   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 197     return Runtime1::generate_code_for(_id, sasm);
 198   }
 199 };
 200 
 201 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
 202   if (id != StubId::NO_STUBID) {
 203     CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C1Blob, StubInfo::blob(id));
 204     if (blob != nullptr) {
 205       return blob;
 206     }
 207   }
 208 
 209   ResourceMark rm;
 210   // create code buffer for code storage
 211   CodeBuffer code(buffer_blob);
 212 
 213   OopMapSet* oop_maps;
 214   int frame_size;
 215   bool must_gc_arguments;
 216 
 217   Compilation::setup_code_buffer(&code, 0);
 218 
 219   // create assembler for code generation
 220   StubAssembler* sasm = new StubAssembler(&code, name, (int)id);
 221   // generate code for runtime stub
 222   oop_maps = cl->generate_code(sasm);
 223   assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size,
 224          "if stub has an oop map it must have a valid frame size");
 225   assert(!expect_oop_map || oop_maps != nullptr, "must have an oopmap");
 226 
 227   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 228   sasm->align(BytesPerWord);
 229   // make sure all code is in code buffer
 230   sasm->flush();
 231 
 232   frame_size = sasm->frame_size();
 233   must_gc_arguments = sasm->must_gc_arguments();
 234   // create blob - distinguish a few special cases
 235   CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
 236                                                  &code,
 237                                                  CodeOffsets::frame_never_safe,
 238                                                  frame_size,
 239                                                  oop_maps,
 240                                                  must_gc_arguments,
 241                                                  false /* alloc_fail_is_fatal */ );
 242   if (blob != nullptr && (int)id >= 0) {
 243     AOTCodeCache::store_code_blob(*blob, AOTCodeEntry::C1Blob, StubInfo::blob(id));
 244   }
 245   return blob;
 246 }
 247 
 248 bool Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubId id) {
 249   assert(StubInfo::is_c1(id), "not a c1 stub %s", StubInfo::name(id));
 250   bool expect_oop_map = true;
 251 #ifdef ASSERT
 252   // Make sure that stubs that need oopmaps have them
 253   switch (id) {
 254     // These stubs don't need to have an oopmap
 255   case StubId::c1_dtrace_object_alloc_id:
 256   case StubId::c1_slow_subtype_check_id:
 257   case StubId::c1_fpu2long_stub_id:
 258   case StubId::c1_unwind_exception_id:
 259   case StubId::c1_counter_overflow_id:
 260   case StubId::c1_is_instance_of_id:
 261     expect_oop_map = false;
 262     break;
 263   default:
 264     break;
 265   }
 266 #endif
 267   C1StubAssemblerCodeGenClosure cl(id);
 268   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 269   // install blob
 270   int idx = StubInfo::c1_offset(id);   // will assert on non-c1 id
 271   _blobs[idx] = blob;
 272   return blob != nullptr;
 273 }
 274 
 275 bool Runtime1::initialize(BufferBlob* blob) {
 276   init_counters();
 277   // platform-dependent initialization
 278   initialize_pd();
 279   // iterate blobs in C1 group and generate a single stub per blob
 280   StubId id = StubInfo::stub_base(StubGroup::C1);
 281   StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
 282   for (; id != limit; id = StubInfo::next(id)) {
 283     if (!generate_blob_for(blob, id)) {
 284       return false;
 285     }
 286     if (id == StubId::c1_forward_exception_id) {
 287       // publish early c1 stubs at this point so later stubs can refer to them
 288       AOTCodeCache::init_early_c1_table();
 289     }
 290   }
 291   // printing
 292 #ifndef PRODUCT
 293   if (PrintSimpleStubs) {
 294     ResourceMark rm;
 295     id = StubInfo::stub_base(StubGroup::C1);
 296     for (; id != limit; id = StubInfo::next(id)) {
 297       CodeBlob* blob = blob_for(id);
 298       blob->print();
 299       if (blob->oop_maps() != nullptr) {
 300         blob->oop_maps()->print();
 301       }
 302     }
 303   }
 304 #endif
 305   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 306   return bs->generate_c1_runtime_stubs(blob);
 307 }
 308 
 309 CodeBlob* Runtime1::blob_for(StubId id) {
 310   int idx = StubInfo::c1_offset(id);   // will assert on non-c1 id
 311   return _blobs[idx];
 312 }
 313 
 314 
 315 const char* Runtime1::name_for(StubId id) {
 316   return StubInfo::name(id);
 317 }
 318 
 319 const char* Runtime1::name_for_address(address entry) {
 320   // iterate stubs starting from C1 group base
 321   StubId id = StubInfo::stub_base(StubGroup::C1);
 322   StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
 323   for (; id != limit; id = StubInfo::next(id)) {
 324     if (entry == entry_for(id)) return StubInfo::name(id);
 325   }
 326 
 327 #define FUNCTION_CASE(a, f) \
 328   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 329 
 330   FUNCTION_CASE(entry, os::javaTimeMillis);
 331   FUNCTION_CASE(entry, os::javaTimeNanos);
 332   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
 333   FUNCTION_CASE(entry, SharedRuntime::d2f);
 334   FUNCTION_CASE(entry, SharedRuntime::d2i);
 335   FUNCTION_CASE(entry, SharedRuntime::d2l);
 336   FUNCTION_CASE(entry, SharedRuntime::dcos);
 337   FUNCTION_CASE(entry, SharedRuntime::dexp);
 338   FUNCTION_CASE(entry, SharedRuntime::dlog);
 339   FUNCTION_CASE(entry, SharedRuntime::dlog10);
 340   FUNCTION_CASE(entry, SharedRuntime::dpow);
 341   FUNCTION_CASE(entry, SharedRuntime::drem);
 342   FUNCTION_CASE(entry, SharedRuntime::dsin);
 343   FUNCTION_CASE(entry, SharedRuntime::dtan);
 344   FUNCTION_CASE(entry, SharedRuntime::f2i);
 345   FUNCTION_CASE(entry, SharedRuntime::f2l);
 346   FUNCTION_CASE(entry, SharedRuntime::frem);
 347   FUNCTION_CASE(entry, SharedRuntime::l2d);
 348   FUNCTION_CASE(entry, SharedRuntime::l2f);
 349   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 350   FUNCTION_CASE(entry, SharedRuntime::lmul);
 351   FUNCTION_CASE(entry, SharedRuntime::lrem);
 352   FUNCTION_CASE(entry, SharedRuntime::lrem);
 353   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 354   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 355   FUNCTION_CASE(entry, is_instance_of);
 356   FUNCTION_CASE(entry, trace_block_entry);
 357 #ifdef JFR_HAVE_INTRINSICS
 358   FUNCTION_CASE(entry, JfrTime::time_function());
 359 #endif
 360   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 361   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 362   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 363   FUNCTION_CASE(entry, StubRoutines::dexp());
 364   FUNCTION_CASE(entry, StubRoutines::dlog());
 365   FUNCTION_CASE(entry, StubRoutines::dlog10());
 366   FUNCTION_CASE(entry, StubRoutines::dpow());
 367   FUNCTION_CASE(entry, StubRoutines::dsin());
 368   FUNCTION_CASE(entry, StubRoutines::dcos());
 369   FUNCTION_CASE(entry, StubRoutines::dtan());
 370   FUNCTION_CASE(entry, StubRoutines::dsinh());
 371   FUNCTION_CASE(entry, StubRoutines::dtanh());
 372   FUNCTION_CASE(entry, StubRoutines::dcbrt());
 373 
 374 #undef FUNCTION_CASE
 375 
 376   // Runtime upcalls also has a map of addresses to names
 377   const char* upcall_name = RuntimeUpcalls::get_name_for_upcall_address(entry);
 378   if (upcall_name != nullptr) {
 379     return upcall_name;
 380   }
 381 
 382   // Soft float adds more runtime names.
 383   return pd_name_for_address(entry);
 384 }
 385 
 386 
 387 JRT_ENTRY_PROF(void, Runtime1, new_instance, Runtime1::new_instance(JavaThread* current, Klass* klass))
 388 #ifndef PRODUCT
 389   if (PrintC1Statistics) {
 390     _new_instance_slowcase_cnt++;
 391   }
 392 #endif
 393   assert(klass->is_klass(), "not a class");
 394   Handle holder(current, klass->klass_holder()); // keep the klass alive
 395   InstanceKlass* h = InstanceKlass::cast(klass);
 396   h->check_valid_for_instantiation(true, CHECK);
 397   // make sure klass is initialized
 398   h->initialize(CHECK);
 399   // allocate instance and return via TLS
 400   oop obj = h->allocate_instance(CHECK);
 401   current->set_vm_result_oop(obj);
 402 JRT_END
 403 
 404 
 405 JRT_ENTRY_PROF(void, Runtime1, new_type_array, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 406 #ifndef PRODUCT
 407   if (PrintC1Statistics) {
 408     _new_type_array_slowcase_cnt++;
 409   }
 410 #endif
 411   // Note: no handle for klass needed since they are not used
 412   //       anymore after new_typeArray() and no GC can happen before.
 413   //       (This may have to change if this code changes!)
 414   assert(klass->is_klass(), "not a class");
 415   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 416   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 417   current->set_vm_result_oop(obj);
 418   // This is pretty rare but this runtime patch is stressful to deoptimization
 419   // if we deoptimize here so force a deopt to stress the path.
 420   if (DeoptimizeALot) {
 421     deopt_caller(current);
 422   }
 423 
 424 JRT_END
 425 
 426 
 427 JRT_ENTRY_PROF(void, Runtime1, new_object_array, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 428 #ifndef PRODUCT
 429   if (PrintC1Statistics) {
 430     _new_object_array_slowcase_cnt++;
 431   }
 432 #endif
 433   // Note: no handle for klass needed since they are not used
 434   //       anymore after new_objArray() and no GC can happen before.
 435   //       (This may have to change if this code changes!)
 436   assert(array_klass->is_klass(), "not a class");
 437   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 438   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 439   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 440   current->set_vm_result_oop(obj);
 441   // This is pretty rare but this runtime patch is stressful to deoptimization
 442   // if we deoptimize here so force a deopt to stress the path.
 443   if (DeoptimizeALot) {
 444     deopt_caller(current);
 445   }
 446 JRT_END
 447 
 448 
 449 JRT_ENTRY_PROF(void, Runtime1, new_multi_array, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 450 #ifndef PRODUCT
 451   if (PrintC1Statistics) {
 452     _new_multi_array_slowcase_cnt++;
 453   }
 454 #endif
 455   assert(klass->is_klass(), "not a class");
 456   assert(rank >= 1, "rank must be nonzero");
 457   Handle holder(current, klass->klass_holder()); // keep the klass alive
 458   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 459   current->set_vm_result_oop(obj);
 460 JRT_END
 461 
 462 
 463 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubId id))
 464   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 465 JRT_END
 466 
 467 
 468 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 469   ResourceMark rm(current);
 470   const char* klass_name = obj->klass()->external_name();
 471   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 472 JRT_END
 473 
 474 
 475 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 476 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 477 // method) method is passed as an argument. In order to do that it is embedded in the code as
 478 // a constant.
 479 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 480   nmethod* osr_nm = nullptr;
 481   methodHandle method(current, m);
 482 
 483   RegisterMap map(current,
 484                   RegisterMap::UpdateMap::skip,
 485                   RegisterMap::ProcessFrames::include,
 486                   RegisterMap::WalkContinuation::skip);
 487   frame fr =  current->last_frame().sender(&map);
 488   nmethod* nm = (nmethod*) fr.cb();
 489   assert(nm!= nullptr && nm->is_nmethod(), "Sanity check");
 490   methodHandle enclosing_method(current, nm->method());
 491 
 492   CompLevel level = (CompLevel)nm->comp_level();
 493   int bci = InvocationEntryBci;
 494   if (branch_bci != InvocationEntryBci) {
 495     // Compute destination bci
 496     address pc = method()->code_base() + branch_bci;
 497     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
 498     int offset = 0;
 499     switch (branch) {
 500       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 501       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 502       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 503       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 504       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 505       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 506       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 507         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 508         break;
 509       case Bytecodes::_goto_w:
 510         offset = Bytes::get_Java_u4(pc + 1);
 511         break;
 512       default: ;
 513     }
 514     bci = branch_bci + offset;
 515   }
 516   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 517   return osr_nm;
 518 }
 519 
 520 JRT_BLOCK_ENTRY_PROF(address, Runtime1, counter_overflow, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 521   nmethod* osr_nm;
 522   JRT_BLOCK_NO_ASYNC
 523     osr_nm = counter_overflow_helper(current, bci, method);
 524     if (osr_nm != nullptr) {
 525       RegisterMap map(current,
 526                       RegisterMap::UpdateMap::skip,
 527                       RegisterMap::ProcessFrames::include,
 528                       RegisterMap::WalkContinuation::skip);
 529       frame fr =  current->last_frame().sender(&map);
 530       Deoptimization::deoptimize_frame(current, fr.id());
 531     }
 532   JRT_BLOCK_END
 533   return nullptr;
 534 JRT_END
 535 
 536 extern void vm_exit(int code);
 537 
 538 // Enter this method from compiled code handler below. This is where we transition
 539 // to VM mode. This is done as a helper routine so that the method called directly
 540 // from compiled code does not have to transition to VM. This allows the entry
 541 // method to see if the nmethod that we have just looked up a handler for has
 542 // been deoptimized while we were in the vm. This simplifies the assembly code
 543 // cpu directories.
 544 //
 545 // We are entering here from exception stub (via the entry method below)
 546 // If there is a compiled exception handler in this method, we will continue there;
 547 // otherwise we will unwind the stack and continue at the caller of top frame method
 548 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 549 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 550 // check to see if the handler we are going to return is now in a nmethod that has
 551 // been deoptimized. If that is the case we return the deopt blob
 552 // unpack_with_exception entry instead. This makes life for the exception blob easier
 553 // because making that same check and diverting is painful from assembly language.
 554 JRT_ENTRY_NO_ASYNC_PROF(static address, Runtime1, exception_handler_for_pc_helper, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 555   MACOS_AARCH64_ONLY(current->wx_enable_write());
 556   Handle exception(current, ex);
 557 
 558   // This function is called when we are about to throw an exception. Therefore,
 559   // we have to poll the stack watermark barrier to make sure that not yet safe
 560   // stack frames are made safe before returning into them.
 561   if (current->last_frame().cb() == Runtime1::blob_for(StubId::c1_handle_exception_from_callee_id)) {
 562     // The StubId::c1_handle_exception_from_callee_id handler is invoked after the
 563     // frame has been unwound. It instead builds its own stub frame, to call the
 564     // runtime. But the throwing frame has already been unwound here.
 565     StackWatermarkSet::after_unwind(current);
 566   }
 567 
 568   nm = CodeCache::find_nmethod(pc);
 569   assert(nm != nullptr, "this is not an nmethod");
 570   // Adjust the pc as needed/
 571   if (nm->is_deopt_pc(pc)) {
 572     RegisterMap map(current,
 573                     RegisterMap::UpdateMap::skip,
 574                     RegisterMap::ProcessFrames::include,
 575                     RegisterMap::WalkContinuation::skip);
 576     frame exception_frame = current->last_frame().sender(&map);
 577     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 578     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 579     pc = exception_frame.pc();
 580   }
 581   assert(exception.not_null(), "null exceptions should be handled by throw_exception");
 582   // Check that exception is a subclass of Throwable
 583   assert(exception->is_a(vmClasses::Throwable_klass()),
 584          "Exception not subclass of Throwable");
 585 
 586   // debugging support
 587   // tracing
 588   if (log_is_enabled(Info, exceptions)) {
 589     ResourceMark rm; // print_value_string
 590     stringStream tempst;
 591     assert(nm->method() != nullptr, "Unexpected null method()");
 592     tempst.print("C1 compiled method <%s>\n"
 593                  " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 594                  nm->method()->print_value_string(), p2i(pc), p2i(current));
 595     Exceptions::log_exception(exception, tempst.freeze());
 596   }
 597   // for AbortVMOnException flag
 598   Exceptions::debug_check_abort(exception);
 599 
 600   // Check the stack guard pages and re-enable them if necessary and there is
 601   // enough space on the stack to do so.  Use fast exceptions only if the guard
 602   // pages are enabled.
 603   bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
 604 
 605   if (JvmtiExport::can_post_on_exceptions()) {
 606     // To ensure correct notification of exception catches and throws
 607     // we have to deoptimize here.  If we attempted to notify the
 608     // catches and throws during this exception lookup it's possible
 609     // we could deoptimize on the way out of the VM and end back in
 610     // the interpreter at the throw site.  This would result in double
 611     // notifications since the interpreter would also notify about
 612     // these same catches and throws as it unwound the frame.
 613 
 614     RegisterMap reg_map(current,
 615                         RegisterMap::UpdateMap::include,
 616                         RegisterMap::ProcessFrames::include,
 617                         RegisterMap::WalkContinuation::skip);
 618     frame stub_frame = current->last_frame();
 619     frame caller_frame = stub_frame.sender(&reg_map);
 620 
 621     // We don't really want to deoptimize the nmethod itself since we
 622     // can actually continue in the exception handler ourselves but I
 623     // don't see an easy way to have the desired effect.
 624     Deoptimization::deoptimize_frame(current, caller_frame.id());
 625     assert(caller_is_deopted(current), "Must be deoptimized");
 626 
 627     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 628   }
 629 
 630   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 631   if (guard_pages_enabled) {
 632     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
 633     if (fast_continuation != nullptr) {
 634       return fast_continuation;
 635     }
 636   }
 637 
 638   // If the stack guard pages are enabled, check whether there is a handler in
 639   // the current method.  Otherwise (guard pages disabled), force an unwind and
 640   // skip the exception cache update (i.e., just leave continuation as null).
 641   address continuation = nullptr;
 642   if (guard_pages_enabled) {
 643 
 644     // New exception handling mechanism can support inlined methods
 645     // with exception handlers since the mappings are from PC to PC
 646 
 647     // Clear out the exception oop and pc since looking up an
 648     // exception handler can cause class loading, which might throw an
 649     // exception and those fields are expected to be clear during
 650     // normal bytecode execution.
 651     current->clear_exception_oop_and_pc();
 652 
 653     bool recursive_exception = false;
 654     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
 655     // If an exception was thrown during exception dispatch, the exception oop may have changed
 656     current->set_exception_oop(exception());
 657     current->set_exception_pc(pc);
 658 
 659     // the exception cache is used only by non-implicit exceptions
 660     // Update the exception cache only when there didn't happen
 661     // another exception during the computation of the compiled
 662     // exception handler. Checking for exception oop equality is not
 663     // sufficient because some exceptions are pre-allocated and reused.
 664     if (continuation != nullptr && !recursive_exception) {
 665       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
 666     }
 667   }
 668 
 669   current->set_vm_result_oop(exception());
 670 
 671   if (log_is_enabled(Info, exceptions)) {
 672     ResourceMark rm;
 673     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 674                          " for exception thrown at PC " PTR_FORMAT,
 675                          p2i(current), p2i(continuation), p2i(pc));
 676   }
 677 
 678   return continuation;
 679 JRT_END
 680 
 681 // Enter this method from compiled code only if there is a Java exception handler
 682 // in the method handling the exception.
 683 // We are entering here from exception stub. We don't do a normal VM transition here.
 684 // We do it in a helper. This is so we can check to see if the nmethod we have just
 685 // searched for an exception handler has been deoptimized in the meantime.
 686 address Runtime1::exception_handler_for_pc(JavaThread* current) {
 687   oop exception = current->exception_oop();
 688   address pc = current->exception_pc();
 689   // Still in Java mode
 690   DEBUG_ONLY(NoHandleMark nhm);
 691   nmethod* nm = nullptr;
 692   address continuation = nullptr;
 693   {
 694     // Enter VM mode by calling the helper
 695     ResetNoHandleMark rnhm;
 696     continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
 697   }
 698   // Back in JAVA, use no oops DON'T safepoint
 699 
 700   // Now check to see if the nmethod we were called from is now deoptimized.
 701   // If so we must return to the deopt blob and deoptimize the nmethod
 702   if (nm != nullptr && caller_is_deopted(current)) {
 703     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 704   }
 705 
 706   assert(continuation != nullptr, "no handler found");
 707   return continuation;
 708 }
 709 
 710 
 711 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))
 712 #ifndef PRODUCT
 713   if (PrintC1Statistics) {
 714     _throw_range_check_exception_count++;
 715   }
 716 #endif
 717   const int len = 35;
 718   assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
 719   char message[2 * jintAsStringSize + len];
 720   os::snprintf_checked(message, sizeof(message), "Index %d out of bounds for length %d", index, a->length());
 721   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
 722 JRT_END
 723 
 724 
 725 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))
 726 #ifndef PRODUCT
 727   if (PrintC1Statistics) {
 728     _throw_index_exception_count++;
 729   }
 730 #endif
 731   char message[16];
 732   os::snprintf_checked(message, sizeof(message), "%d", index);
 733   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
 734 JRT_END
 735 
 736 
 737 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))
 738 #ifndef PRODUCT
 739   if (PrintC1Statistics) {
 740     _throw_div0_exception_count++;
 741   }
 742 #endif
 743   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 744 JRT_END
 745 
 746 
 747 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))
 748 #ifndef PRODUCT
 749   if (PrintC1Statistics) {
 750     _throw_null_pointer_exception_count++;
 751   }
 752 #endif
 753   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 754 JRT_END
 755 
 756 
 757 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))
 758 #ifndef PRODUCT
 759   if (PrintC1Statistics) {
 760     _throw_class_cast_exception_count++;
 761   }
 762 #endif
 763   ResourceMark rm(current);
 764   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 765   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 766 JRT_END
 767 
 768 
 769 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 770 #ifndef PRODUCT
 771   if (PrintC1Statistics) {
 772     _throw_incompatible_class_change_error_count++;
 773   }
 774 #endif
 775   ResourceMark rm(current);
 776   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 777 JRT_END
 778 
 779 
 780 JRT_BLOCK_ENTRY_PROF(void, Runtime1, monitorenter, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 781 #ifndef PRODUCT
 782   if (PrintC1Statistics) {
 783     _monitorenter_slowcase_cnt++;
 784   }
 785 #endif
 786   assert(obj == lock->obj(), "must match");
 787   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 788 JRT_END
 789 
 790 
 791 JRT_LEAF_PROF(void, Runtime1, monitorexit, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 792   assert(current == JavaThread::current(), "pre-condition");
 793 #ifndef PRODUCT
 794   if (PrintC1Statistics) {
 795     _monitorexit_slowcase_cnt++;
 796   }
 797 #endif
 798   assert(current->last_Java_sp(), "last_Java_sp must be set");
 799   oop obj = lock->obj();
 800   assert(oopDesc::is_oop(obj), "must be null or an object");
 801   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 802 JRT_END
 803 
 804 // Cf. OptoRuntime::deoptimize_caller_frame
 805 JRT_ENTRY_PROF(void, Runtime1, deoptimize, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 806   // Called from within the owner thread, so no need for safepoint
 807   RegisterMap reg_map(current,
 808                       RegisterMap::UpdateMap::skip,
 809                       RegisterMap::ProcessFrames::include,
 810                       RegisterMap::WalkContinuation::skip);
 811   frame stub_frame = current->last_frame();
 812   assert(stub_frame.is_runtime_frame(), "Sanity check");
 813   frame caller_frame = stub_frame.sender(&reg_map);
 814   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 815   assert(nm != nullptr, "Sanity check");
 816   methodHandle method(current, nm->method());
 817   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 818   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 819   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 820 
 821   if (action == Deoptimization::Action_make_not_entrant) {
 822     if (nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE)) {
 823       if (reason == Deoptimization::Reason_tenured) {
 824         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 825         if (trap_mdo != nullptr) {
 826           trap_mdo->inc_tenure_traps();
 827         }
 828       }
 829     }
 830   }
 831 
 832   // Deoptimize the caller frame.
 833   Deoptimization::deoptimize_frame(current, caller_frame.id());
 834   // Return to the now deoptimized frame.
 835 JRT_END
 836 
 837 
 838 #ifndef DEOPTIMIZE_WHEN_PATCHING
 839 
 840 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 841   Bytecode_field field_access(caller, bci);
 842   // This can be static or non-static field access
 843   Bytecodes::Code code       = field_access.code();
 844 
 845   // We must load class, initialize class and resolve the field
 846   fieldDescriptor result; // initialize class if needed
 847   constantPoolHandle constants(THREAD, caller->constants());
 848   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller,
 849                                      Bytecodes::java_code(code), ClassInitMode::init, CHECK_NULL);
 850   return result.field_holder();
 851 }
 852 
 853 
 854 //
 855 // This routine patches sites where a class wasn't loaded or
 856 // initialized at the time the code was generated.  It handles
 857 // references to classes, fields and forcing of initialization.  Most
 858 // of the cases are straightforward and involving simply forcing
 859 // resolution of a class, rewriting the instruction stream with the
 860 // needed constant and replacing the call in this function with the
 861 // patched code.  The case for static field is more complicated since
 862 // the thread which is in the process of initializing a class can
 863 // access it's static fields but other threads can't so the code
 864 // either has to deoptimize when this case is detected or execute a
 865 // check that the current thread is the initializing thread.  The
 866 // current
 867 //
 868 // Patches basically look like this:
 869 //
 870 //
 871 // patch_site: jmp patch stub     ;; will be patched
 872 // continue:   ...
 873 //             ...
 874 //             ...
 875 //             ...
 876 //
 877 // They have a stub which looks like this:
 878 //
 879 //             ;; patch body
 880 //             movl <const>, reg           (for class constants)
 881 //        <or> movl [reg1 + <const>], reg  (for field offsets)
 882 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
 883 //             <being_init offset> <bytes to copy> <bytes to skip>
 884 // patch_stub: call Runtime1::patch_code (through a runtime stub)
 885 //             jmp patch_site
 886 //
 887 //
 888 // A normal patch is done by rewriting the patch body, usually a move,
 889 // and then copying it into place over top of the jmp instruction
 890 // being careful to flush caches and doing it in an MP-safe way.  The
 891 // constants following the patch body are used to find various pieces
 892 // of the patch relative to the call site for Runtime1::patch_code.
 893 // The case for getstatic and putstatic is more complicated because
 894 // getstatic and putstatic have special semantics when executing while
 895 // the class is being initialized.  getstatic/putstatic on a class
 896 // which is being_initialized may be executed by the initializing
 897 // thread but other threads have to block when they execute it.  This
 898 // is accomplished in compiled code by executing a test of the current
 899 // thread against the initializing thread of the class.  It's emitted
 900 // as boilerplate in their stub which allows the patched code to be
 901 // executed before it's copied back into the main body of the nmethod.
 902 //
 903 // being_init: get_thread(<tmp reg>
 904 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
 905 //             jne patch_stub
 906 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
 907 //             movl reg, [reg1 + <const>]  (for field offsets)
 908 //             jmp continue
 909 //             <being_init offset> <bytes to copy> <bytes to skip>
 910 // patch_stub: jmp Runtime1::patch_code (through a runtime stub)
 911 //             jmp patch_site
 912 //
 913 // If the class is being initialized the patch body is rewritten and
 914 // the patch site is rewritten to jump to being_init, instead of
 915 // patch_stub.  Whenever this code is executed it checks the current
 916 // thread against the initializing thread so other threads will enter
 917 // the runtime and end up blocked waiting the class to finish
 918 // initializing inside the calls to resolve_field below.  The
 919 // initializing class will continue on it's way.  Once the class is
 920 // fully_initialized, the intializing_thread of the class becomes
 921 // null, so the next thread to execute this code will fail the test,
 922 // call into patch_code and complete the patching process by copying
 923 // the patch body back into the main part of the nmethod and resume
 924 // executing.
 925 
 926 // NB:
 927 //
 928 // Patchable instruction sequences inherently exhibit race conditions,
 929 // where thread A is patching an instruction at the same time thread B
 930 // is executing it.  The algorithms we use ensure that any observation
 931 // that B can make on any intermediate states during A's patching will
 932 // always end up with a correct outcome.  This is easiest if there are
 933 // few or no intermediate states.  (Some inline caches have two
 934 // related instructions that must be patched in tandem.  For those,
 935 // intermediate states seem to be unavoidable, but we will get the
 936 // right answer from all possible observation orders.)
 937 //
 938 // When patching the entry instruction at the head of a method, or a
 939 // linkable call instruction inside of a method, we try very hard to
 940 // use a patch sequence which executes as a single memory transaction.
 941 // This means, in practice, that when thread A patches an instruction,
 942 // it should patch a 32-bit or 64-bit word that somehow overlaps the
 943 // instruction or is contained in it.  We believe that memory hardware
 944 // will never break up such a word write, if it is naturally aligned
 945 // for the word being written.  We also know that some CPUs work very
 946 // hard to create atomic updates even of naturally unaligned words,
 947 // but we don't want to bet the farm on this always working.
 948 //
 949 // Therefore, if there is any chance of a race condition, we try to
 950 // patch only naturally aligned words, as single, full-word writes.
 951 
 952 JRT_ENTRY_PROF(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, StubId stub_id))
 953 #ifndef PRODUCT
 954   if (PrintC1Statistics) {
 955     _patch_code_slowcase_cnt++;
 956   }
 957 #endif
 958 
 959   ResourceMark rm(current);
 960   RegisterMap reg_map(current,
 961                       RegisterMap::UpdateMap::skip,
 962                       RegisterMap::ProcessFrames::include,
 963                       RegisterMap::WalkContinuation::skip);
 964   frame runtime_frame = current->last_frame();
 965   frame caller_frame = runtime_frame.sender(&reg_map);
 966 
 967   // last java frame on stack
 968   vframeStream vfst(current, true);
 969   assert(!vfst.at_end(), "Java frame must exist");
 970 
 971   methodHandle caller_method(current, vfst.method());
 972   // Note that caller_method->code() may not be same as caller_code because of OSR's
 973   // Note also that in the presence of inlining it is not guaranteed
 974   // that caller_method() == caller_code->method()
 975 
 976   int bci = vfst.bci();
 977   Bytecodes::Code code = caller_method()->java_code_at(bci);
 978 
 979   // this is used by assertions in the access_field_patching_id
 980   BasicType patch_field_type = T_ILLEGAL;
 981   bool deoptimize_for_volatile = false;
 982   bool deoptimize_for_atomic = false;
 983   int patch_field_offset = -1;
 984   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
 985   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
 986   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
 987   Handle appendix(current, nullptr); // oop needed by appendix_patching code
 988   bool load_klass_or_mirror_patch_id =
 989     (stub_id == StubId::c1_load_klass_patching_id || stub_id == StubId::c1_load_mirror_patching_id);
 990 
 991   if (stub_id == StubId::c1_access_field_patching_id) {
 992 
 993     Bytecode_field field_access(caller_method, bci);
 994     fieldDescriptor result; // initialize class if needed
 995     Bytecodes::Code code = field_access.code();
 996     constantPoolHandle constants(current, caller_method->constants());
 997     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method,
 998                                        Bytecodes::java_code(code), ClassInitMode::init, CHECK);
 999     patch_field_offset = result.offset();
1000 
1001     // If we're patching a field which is volatile then at compile it
1002     // must not have been know to be volatile, so the generated code
1003     // isn't correct for a volatile reference.  The nmethod has to be
1004     // deoptimized so that the code can be regenerated correctly.
1005     // This check is only needed for access_field_patching since this
1006     // is the path for patching field offsets.  load_klass is only
1007     // used for patching references to oops which don't need special
1008     // handling in the volatile case.
1009 
1010     deoptimize_for_volatile = result.access_flags().is_volatile();
1011 
1012     // If we are patching a field which should be atomic, then
1013     // the generated code is not correct either, force deoptimizing.
1014     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1015     // break access atomicity only for them.
1016 
1017     // Strictly speaking, the deoptimization on 64-bit platforms
1018     // is unnecessary, and T_LONG stores on 32-bit platforms need
1019     // to be handled by special patching code when AlwaysAtomicAccesses
1020     // becomes product feature. At this point, we are still going
1021     // for the deoptimization for consistency against volatile
1022     // accesses.
1023 
1024     patch_field_type = result.field_type();
1025     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1026 
1027   } else if (load_klass_or_mirror_patch_id) {
1028     Klass* k = nullptr;
1029     switch (code) {
1030       case Bytecodes::_putstatic:
1031       case Bytecodes::_getstatic:
1032         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1033           init_klass = klass;
1034           mirror = Handle(current, klass->java_mirror());
1035         }
1036         break;
1037       case Bytecodes::_new:
1038         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1039           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1040         }
1041         break;
1042       case Bytecodes::_multianewarray:
1043         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1044           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1045         }
1046         break;
1047       case Bytecodes::_instanceof:
1048         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
1049           k = caller_method->constants()->klass_at(io.index(), CHECK);
1050         }
1051         break;
1052       case Bytecodes::_checkcast:
1053         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
1054           k = caller_method->constants()->klass_at(cc.index(), CHECK);
1055         }
1056         break;
1057       case Bytecodes::_anewarray:
1058         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
1059           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
1060           k = ek->array_klass(CHECK);
1061         }
1062         break;
1063       case Bytecodes::_ldc:
1064       case Bytecodes::_ldc_w:
1065       case Bytecodes::_ldc2_w:
1066         {
1067           Bytecode_loadconstant cc(caller_method, bci);
1068           oop m = cc.resolve_constant(CHECK);
1069           mirror = Handle(current, m);
1070         }
1071         break;
1072       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
1073     }
1074     load_klass = k;
1075   } else if (stub_id == StubId::c1_load_appendix_patching_id) {
1076     Bytecode_invoke bytecode(caller_method, bci);
1077     Bytecodes::Code bc = bytecode.invoke_code();
1078 
1079     CallInfo info;
1080     constantPoolHandle pool(current, caller_method->constants());
1081     int index = bytecode.index();
1082     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1083     switch (bc) {
1084       case Bytecodes::_invokehandle: {
1085         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1086         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1087         break;
1088       }
1089       case Bytecodes::_invokedynamic: {
1090         appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
1091         break;
1092       }
1093       default: fatal("unexpected bytecode for load_appendix_patching_id");
1094     }
1095   } else {
1096     ShouldNotReachHere();
1097   }
1098 
1099   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1100     // At compile time we assumed the field wasn't volatile/atomic but after
1101     // loading it turns out it was volatile/atomic so we have to throw the
1102     // compiled code out and let it be regenerated.
1103     if (TracePatching) {
1104       if (deoptimize_for_volatile) {
1105         tty->print_cr("Deoptimizing for patching volatile field reference");
1106       }
1107       if (deoptimize_for_atomic) {
1108         tty->print_cr("Deoptimizing for patching atomic field reference");
1109       }
1110     }
1111 
1112     // It's possible the nmethod was invalidated in the last
1113     // safepoint, but if it's still alive then make it not_entrant.
1114     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1115     if (nm != nullptr) {
1116       nm->make_not_entrant(nmethod::InvalidationReason::C1_CODEPATCH);
1117     }
1118 
1119     Deoptimization::deoptimize_frame(current, caller_frame.id());
1120 
1121     // Return to the now deoptimized frame.
1122   }
1123 
1124   // Now copy code back
1125 
1126   {
1127     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1128     //
1129     // Deoptimization may have happened while we waited for the lock.
1130     // In that case we don't bother to do any patching we just return
1131     // and let the deopt happen
1132     if (!caller_is_deopted(current)) {
1133       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1134       address instr_pc = jump->jump_destination();
1135       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1136       if (ni->is_jump() ) {
1137         // the jump has not been patched yet
1138         // The jump destination is slow case and therefore not part of the stubs
1139         // (stubs are only for StaticCalls)
1140 
1141         // format of buffer
1142         //    ....
1143         //    instr byte 0     <-- copy_buff
1144         //    instr byte 1
1145         //    ..
1146         //    instr byte n-1
1147         //      n
1148         //    ....             <-- call destination
1149 
1150         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1151         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1152         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1153         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1154         address copy_buff = stub_location - *byte_skip - *byte_count;
1155         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1156         if (TracePatching) {
1157           ttyLocker ttyl;
1158           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
1159                         p2i(instr_pc), (stub_id == StubId::c1_access_field_patching_id) ? "field" : "klass");
1160           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1161           assert(caller_code != nullptr, "nmethod not found");
1162 
1163           // NOTE we use pc() not original_pc() because we already know they are
1164           // identical otherwise we'd have never entered this block of code
1165 
1166           const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1167           assert(map != nullptr, "null check");
1168           map->print();
1169           tty->cr();
1170 
1171           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1172         }
1173         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1174         bool do_patch = true;
1175         if (stub_id == StubId::c1_access_field_patching_id) {
1176           // The offset may not be correct if the class was not loaded at code generation time.
1177           // Set it now.
1178           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1179           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1180           assert(patch_field_offset >= 0, "illegal offset");
1181           n_move->add_offset_in_bytes(patch_field_offset);
1182         } else if (load_klass_or_mirror_patch_id) {
1183           // If a getstatic or putstatic is referencing a klass which
1184           // isn't fully initialized, the patch body isn't copied into
1185           // place until initialization is complete.  In this case the
1186           // patch site is setup so that any threads besides the
1187           // initializing thread are forced to come into the VM and
1188           // block.
1189           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1190                      InstanceKlass::cast(init_klass)->is_initialized();
1191           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1192           if (jump->jump_destination() == being_initialized_entry) {
1193             assert(do_patch == true, "initialization must be complete at this point");
1194           } else {
1195             // patch the instruction <move reg, klass>
1196             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1197 
1198             assert(n_copy->data() == 0 ||
1199                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
1200                    "illegal init value");
1201             if (stub_id == StubId::c1_load_klass_patching_id) {
1202               assert(load_klass != nullptr, "klass not set");
1203               n_copy->set_data((intx) (load_klass));
1204             } else {
1205               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1206               n_copy->set_data(cast_from_oop<intx>(mirror()));
1207             }
1208 
1209             if (TracePatching) {
1210               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1211             }
1212           }
1213         } else if (stub_id == StubId::c1_load_appendix_patching_id) {
1214           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1215           assert(n_copy->data() == 0 ||
1216                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
1217                  "illegal init value");
1218           n_copy->set_data(cast_from_oop<intx>(appendix()));
1219 
1220           if (TracePatching) {
1221             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1222           }
1223         } else {
1224           ShouldNotReachHere();
1225         }
1226 
1227         if (do_patch) {
1228           // replace instructions
1229           // first replace the tail, then the call
1230 #ifdef ARM
1231           if((load_klass_or_mirror_patch_id ||
1232               stub_id == StubId::c1_load_appendix_patching_id) &&
1233               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1234             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1235             address addr = nullptr;
1236             assert(nm != nullptr, "invalid nmethod_pc");
1237             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1238             while (mds.next()) {
1239               if (mds.type() == relocInfo::oop_type) {
1240                 assert(stub_id == StubId::c1_load_mirror_patching_id ||
1241                        stub_id == StubId::c1_load_appendix_patching_id, "wrong stub id");
1242                 oop_Relocation* r = mds.oop_reloc();
1243                 addr = (address)r->oop_addr();
1244                 break;
1245               } else if (mds.type() == relocInfo::metadata_type) {
1246                 assert(stub_id == StubId::c1_load_klass_patching_id, "wrong stub id");
1247                 metadata_Relocation* r = mds.metadata_reloc();
1248                 addr = (address)r->metadata_addr();
1249                 break;
1250               }
1251             }
1252             assert(addr != nullptr, "metadata relocation must exist");
1253             copy_buff -= *byte_count;
1254             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1255             n_copy2->set_pc_relative_offset(addr, instr_pc);
1256           }
1257 #endif
1258 
1259           for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1260             address ptr = copy_buff + i;
1261             int a_byte = (*ptr) & 0xFF;
1262             address dst = instr_pc + i;
1263             *(unsigned char*)dst = (unsigned char) a_byte;
1264           }
1265           ICache::invalidate_range(instr_pc, *byte_count);
1266           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1267 
1268           if (load_klass_or_mirror_patch_id ||
1269               stub_id == StubId::c1_load_appendix_patching_id) {
1270             relocInfo::relocType rtype =
1271               (stub_id == StubId::c1_load_klass_patching_id) ?
1272                                    relocInfo::metadata_type :
1273                                    relocInfo::oop_type;
1274             // update relocInfo to metadata
1275             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1276             assert(nm != nullptr, "invalid nmethod_pc");
1277 
1278             // The old patch site is now a move instruction so update
1279             // the reloc info so that it will get updated during
1280             // future GCs.
1281             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1282             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1283                                                      relocInfo::none, rtype);
1284           }
1285 
1286         } else {
1287           ICache::invalidate_range(copy_buff, *byte_count);
1288           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1289         }
1290       }
1291     }
1292     // If we are patching in a non-perm oop, make sure the nmethod
1293     // is on the right list.
1294     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1295     guarantee(nm != nullptr, "only nmethods can contain non-perm oops");
1296 
1297     // Since we've patched some oops in the nmethod,
1298     // (re)register it with the heap.
1299     Universe::heap()->register_nmethod(nm);
1300   }
1301 JRT_END
1302 
1303 #else // DEOPTIMIZE_WHEN_PATCHING
1304 
1305 static bool is_patching_needed(JavaThread* current, StubId stub_id) {
1306   if (stub_id == StubId::c1_load_klass_patching_id ||
1307       stub_id == StubId::c1_load_mirror_patching_id) {
1308     // last java frame on stack
1309     vframeStream vfst(current, true);
1310     assert(!vfst.at_end(), "Java frame must exist");
1311 
1312     methodHandle caller_method(current, vfst.method());
1313     int bci = vfst.bci();
1314     Bytecodes::Code code = caller_method()->java_code_at(bci);
1315 
1316     switch (code) {
1317       case Bytecodes::_new:
1318       case Bytecodes::_anewarray:
1319       case Bytecodes::_multianewarray:
1320       case Bytecodes::_instanceof:
1321       case Bytecodes::_checkcast: {
1322         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1323         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1324         if (tag.is_unresolved_klass_in_error()) {
1325           return false; // throws resolution error
1326         }
1327         break;
1328       }
1329 
1330       default: break;
1331     }
1332   }
1333   return true;
1334 }
1335 
1336 PROF_ENTRY(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, StubId stub_id))
1337 #ifndef PRODUCT
1338   if (PrintC1Statistics) {
1339     _patch_code_slowcase_cnt++;
1340   }
1341 #endif
1342 
1343   // Enable WXWrite: the function is called by c1 stub as a runtime function
1344   // (see another implementation above).
1345   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1346 
1347   if (TracePatching) {
1348     tty->print_cr("Deoptimizing because patch is needed");
1349   }
1350 
1351   RegisterMap reg_map(current,
1352                       RegisterMap::UpdateMap::skip,
1353                       RegisterMap::ProcessFrames::include,
1354                       RegisterMap::WalkContinuation::skip);
1355 
1356   frame runtime_frame = current->last_frame();
1357   frame caller_frame = runtime_frame.sender(&reg_map);
1358   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1359 
1360   if (is_patching_needed(current, stub_id)) {
1361     // Make sure the nmethod is invalidated, i.e. made not entrant.
1362     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1363     if (nm != nullptr) {
1364       nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING);
1365     }
1366   }
1367 
1368   Deoptimization::deoptimize_frame(current, caller_frame.id());
1369   // Return to the now deoptimized frame.
1370   postcond(caller_is_deopted(current));
1371 PROF_END
1372 
1373 #endif // DEOPTIMIZE_WHEN_PATCHING
1374 
1375 // Entry point for compiled code. We want to patch a nmethod.
1376 // We don't do a normal VM transition here because we want to
1377 // know after the patching is complete and any safepoint(s) are taken
1378 // if the calling nmethod was deoptimized. We do this by calling a
1379 // helper method which does the normal VM transition and when it
1380 // completes we can check for deoptimization. This simplifies the
1381 // assembly code in the cpu directories.
1382 //
1383 int Runtime1::move_klass_patching(JavaThread* current) {
1384 //
1385 // NOTE: we are still in Java
1386 //
1387   DEBUG_ONLY(NoHandleMark nhm;)
1388   {
1389     // Enter VM mode
1390     ResetNoHandleMark rnhm;
1391     patch_code(current, StubId::c1_load_klass_patching_id);
1392   }
1393   // Back in JAVA, use no oops DON'T safepoint
1394 
1395   // Return true if calling code is deoptimized
1396 
1397   return caller_is_deopted(current);
1398 }
1399 
1400 int Runtime1::move_mirror_patching(JavaThread* current) {
1401 //
1402 // NOTE: we are still in Java
1403 //
1404   DEBUG_ONLY(NoHandleMark nhm;)
1405   {
1406     // Enter VM mode
1407     ResetNoHandleMark rnhm;
1408     patch_code(current, StubId::c1_load_mirror_patching_id);
1409   }
1410   // Back in JAVA, use no oops DON'T safepoint
1411 
1412   // Return true if calling code is deoptimized
1413 
1414   return caller_is_deopted(current);
1415 }
1416 
1417 int Runtime1::move_appendix_patching(JavaThread* current) {
1418 //
1419 // NOTE: we are still in Java
1420 //
1421   DEBUG_ONLY(NoHandleMark nhm;)
1422   {
1423     // Enter VM mode
1424     ResetNoHandleMark rnhm;
1425     patch_code(current, StubId::c1_load_appendix_patching_id);
1426   }
1427   // Back in JAVA, use no oops DON'T safepoint
1428 
1429   // Return true if calling code is deoptimized
1430 
1431   return caller_is_deopted(current);
1432 }
1433 
1434 // Entry point for compiled code. We want to patch a nmethod.
1435 // We don't do a normal VM transition here because we want to
1436 // know after the patching is complete and any safepoint(s) are taken
1437 // if the calling nmethod was deoptimized. We do this by calling a
1438 // helper method which does the normal VM transition and when it
1439 // completes we can check for deoptimization. This simplifies the
1440 // assembly code in the cpu directories.
1441 //
1442 int Runtime1::access_field_patching(JavaThread* current) {
1443   //
1444   // NOTE: we are still in Java
1445   //
1446   // Handles created in this function will be deleted by the
1447   // HandleMarkCleaner in the transition to the VM.
1448   NoHandleMark nhm;
1449   {
1450     // Enter VM mode
1451     ResetNoHandleMark rnhm;
1452     patch_code(current, StubId::c1_access_field_patching_id);
1453   }
1454   // Back in JAVA, use no oops DON'T safepoint
1455 
1456   // Return true if calling code is deoptimized
1457 
1458   return caller_is_deopted(current);
1459 }
1460 
1461 
1462 JRT_LEAF_PROF_NO_THREAD(void, Runtime1, trace_block_entry, Runtime1::trace_block_entry(jint block_id))
1463   // for now we just print out the block id
1464   tty->print("%d ", block_id);
1465 JRT_END
1466 
1467 
1468 JRT_LEAF_PROF_NO_THREAD(int, Runtime1, is_instance_of, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1469   // had to return int instead of bool, otherwise there may be a mismatch
1470   // between the C calling convention and the Java one.
1471   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1472   // JVM takes the whole %eax as the return value, which may misinterpret
1473   // the return value as a boolean true.
1474 
1475   assert(mirror != nullptr, "should null-check on mirror before calling");
1476   Klass* k = java_lang_Class::as_Klass(mirror);
1477   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1478 JRT_END
1479 
1480 JRT_ENTRY_PROF(void, Runtime1, predicate_failed_trap, Runtime1::predicate_failed_trap(JavaThread* current))
1481   ResourceMark rm;
1482 
1483   RegisterMap reg_map(current,
1484                       RegisterMap::UpdateMap::skip,
1485                       RegisterMap::ProcessFrames::include,
1486                       RegisterMap::WalkContinuation::skip);
1487   frame runtime_frame = current->last_frame();
1488   frame caller_frame = runtime_frame.sender(&reg_map);
1489 
1490   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1491   assert (nm != nullptr, "no more nmethod?");
1492   nm->make_not_entrant(nmethod::InvalidationReason::C1_PREDICATE_FAILED_TRAP);
1493 
1494   methodHandle m(current, nm->method());
1495   MethodData* mdo = m->method_data();
1496 
1497   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1498     // Build an MDO.  Ignore errors like OutOfMemory;
1499     // that simply means we won't have an MDO to update.
1500     Method::build_profiling_method_data(m, THREAD);
1501     if (HAS_PENDING_EXCEPTION) {
1502       // Only metaspace OOM is expected. No Java code executed.
1503       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1504       CLEAR_PENDING_EXCEPTION;
1505     }
1506     mdo = m->method_data();
1507   }
1508 
1509   if (mdo != nullptr) {
1510     mdo->inc_trap_count(Deoptimization::Reason_none);
1511   }
1512 
1513   if (TracePredicateFailedTraps) {
1514     stringStream ss1, ss2;
1515     vframeStream vfst(current);
1516     Method* inlinee = vfst.method();
1517     inlinee->print_short_name(&ss1);
1518     m->print_short_name(&ss2);
1519     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc()));
1520   }
1521 
1522 
1523   Deoptimization::deoptimize_frame(current, caller_frame.id());
1524 
1525 JRT_END
1526 
1527 // Check exception if AbortVMOnException flag set
1528 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1529   ResourceMark rm;
1530   const char* message = nullptr;
1531   if (ex->is_a(vmClasses::Throwable_klass())) {
1532     oop msg = java_lang_Throwable::message(ex);
1533     if (msg != nullptr) {
1534       message = java_lang_String::as_utf8_string(msg);
1535     }
1536   }
1537   Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1538 JRT_END
1539 
1540 #define DO_COUNTERS(macro) \
1541   macro(Runtime1, new_instance) \
1542   macro(Runtime1, new_type_array) \
1543   macro(Runtime1, new_object_array) \
1544   macro(Runtime1, new_multi_array) \
1545   macro(Runtime1, counter_overflow) \
1546   macro(Runtime1, exception_handler_for_pc_helper) \
1547   macro(Runtime1, monitorenter) \
1548   macro(Runtime1, monitorexit) \
1549   macro(Runtime1, deoptimize) \
1550   macro(Runtime1, is_instance_of) \
1551   macro(Runtime1, predicate_failed_trap) \
1552   macro(Runtime1, patch_code)
1553 
1554 #define INIT_COUNTER(sub, name) \
1555   NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \
1556   NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
1557 
1558 void Runtime1::init_counters() {
1559   assert(CompilerConfig::is_c1_enabled(), "");
1560 
1561   if (UsePerfData) {
1562     EXCEPTION_MARK;
1563 
1564     DO_COUNTERS(INIT_COUNTER)
1565 
1566     if (HAS_PENDING_EXCEPTION) {
1567       vm_exit_during_initialization("Runtime1::init_counters() failed unexpectedly");
1568     }
1569   }
1570 }
1571 #undef INIT_COUNTER
1572 
1573 #define PRINT_COUNTER(sub, name) { \
1574   if (_perf_##sub##_##name##_count != nullptr) {  \
1575     jlong count = _perf_##sub##_##name##_count->get_value(); \
1576     if (count > 0) { \
1577       st->print_cr("  %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \
1578                    _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \
1579                    _perf_##sub##_##name##_timer->thread_counter_value_us(), \
1580                    count); \
1581     }}}
1582 
1583 
1584 void Runtime1::print_counters_on(outputStream* st) {
1585   if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c1_enabled()) {
1586     DO_COUNTERS(PRINT_COUNTER)
1587   } else {
1588     st->print_cr("  Runtime1: no info (%s is disabled)",
1589                  (!CompilerConfig::is_c1_enabled() ? "C1" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")));
1590   }
1591 }
1592 
1593 #undef PRINT_COUNTER
1594 #undef DO_COUNTERS
1595 
1596 #ifndef PRODUCT
1597 void Runtime1::print_statistics_on(outputStream* st) {
1598   st->print_cr("C1 Runtime statistics:");
1599   st->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1600   st->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1601   st->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1602   st->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1603   st->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1604   st->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1605   st->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1606   st->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1607   st->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1608   st->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1609   st->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1610   st->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1611   st->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1612   st->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1613 
1614   st->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1615   st->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1616   st->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1617   st->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1618   st->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1619   st->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1620   st->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1621 
1622   st->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1623   st->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1624   st->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1625   st->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1626   st->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1627   st->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1628   st->print_cr(" _throw_count:                                  %u:", _throw_count);
1629 
1630   SharedRuntime::print_ic_miss_histogram_on(st);
1631   st->cr();
1632 }
1633 #endif // PRODUCT