1 /*
   2  * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/codeBuffer.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "classfile/javaClasses.inline.hpp"
  32 #include "classfile/vmClasses.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/aotCodeCache.hpp"
  35 #include "code/codeBlob.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/scopeDesc.hpp"
  38 #include "code/vtableStubs.hpp"
  39 #include "compiler/compilationPolicy.hpp"
  40 #include "compiler/disassembler.hpp"
  41 #include "compiler/oopMap.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/c1/barrierSetC1.hpp"
  44 #include "gc/shared/collectedHeap.hpp"
  45 #include "interpreter/bytecode.hpp"
  46 #include "interpreter/interpreter.hpp"
  47 #include "jfr/support/jfrIntrinsics.hpp"
  48 #include "logging/log.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/access.inline.hpp"
  53 #include "oops/arrayProperties.hpp"
  54 #include "oops/flatArrayKlass.hpp"
  55 #include "oops/flatArrayOop.inline.hpp"
  56 #include "oops/objArrayKlass.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "oops/oopCast.inline.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "runtime/atomicAccess.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/frame.inline.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/interfaceSupport.inline.hpp"
  66 #include "runtime/javaCalls.hpp"
  67 #include "runtime/sharedRuntime.hpp"
  68 #include "runtime/stackWatermarkSet.hpp"
  69 #include "runtime/stubInfo.hpp"
  70 #include "runtime/stubRoutines.hpp"
  71 #include "runtime/vframe.inline.hpp"
  72 #include "runtime/vframeArray.hpp"
  73 #include "runtime/vm_version.hpp"
  74 #include "utilities/copy.hpp"
  75 #include "utilities/events.hpp"
  76 
  77 
  78 // Implementation of StubAssembler
  79 
  80 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  81   _name = name;
  82   _must_gc_arguments = false;
  83   _frame_size = no_frame_size;
  84   _num_rt_args = 0;
  85   _stub_id = stub_id;
  86 }
  87 
  88 
  89 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  90   _name = name;
  91   _must_gc_arguments = must_gc_arguments;
  92 }
  93 
  94 
  95 void StubAssembler::set_frame_size(int size) {
  96   if (_frame_size == no_frame_size) {
  97     _frame_size = size;
  98   }
  99   assert(_frame_size == size, "can't change the frame size");
 100 }
 101 
 102 
 103 void StubAssembler::set_num_rt_args(int args) {
 104   if (_num_rt_args == 0) {
 105     _num_rt_args = args;
 106   }
 107   assert(_num_rt_args == args, "can't change the number of args");
 108 }
 109 
 110 // Implementation of Runtime1
 111 CodeBlob* Runtime1::_blobs[StubInfo::C1_STUB_COUNT];
 112 
 113 #ifndef PRODUCT
 114 // statistics
 115 uint Runtime1::_generic_arraycopystub_cnt = 0;
 116 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 117 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 118 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 119 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 120 uint Runtime1::_new_object_array_slowcase_cnt = 0;
 121 uint Runtime1::_new_null_free_array_slowcase_cnt = 0;
 122 uint Runtime1::_new_instance_slowcase_cnt = 0;
 123 uint Runtime1::_new_multi_array_slowcase_cnt = 0;
 124 uint Runtime1::_load_flat_array_slowcase_cnt = 0;
 125 uint Runtime1::_store_flat_array_slowcase_cnt = 0;
 126 uint Runtime1::_substitutability_check_slowcase_cnt = 0;
 127 uint Runtime1::_buffer_inline_args_slowcase_cnt = 0;
 128 uint Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
 129 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 130 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 131 uint Runtime1::_patch_code_slowcase_cnt = 0;
 132 uint Runtime1::_throw_range_check_exception_count = 0;
 133 uint Runtime1::_throw_index_exception_count = 0;
 134 uint Runtime1::_throw_div0_exception_count = 0;
 135 uint Runtime1::_throw_null_pointer_exception_count = 0;
 136 uint Runtime1::_throw_class_cast_exception_count = 0;
 137 uint Runtime1::_throw_incompatible_class_change_error_count = 0;
 138 uint Runtime1::_throw_illegal_monitor_state_exception_count = 0;
 139 uint Runtime1::_throw_identity_exception_count = 0;
 140 uint Runtime1::_throw_count = 0;
 141 
 142 static uint _byte_arraycopy_stub_cnt = 0;
 143 static uint _short_arraycopy_stub_cnt = 0;
 144 static uint _int_arraycopy_stub_cnt = 0;
 145 static uint _long_arraycopy_stub_cnt = 0;
 146 static uint _oop_arraycopy_stub_cnt = 0;
 147 
 148 address Runtime1::arraycopy_count_address(BasicType type) {
 149   switch (type) {
 150   case T_BOOLEAN:
 151   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 152   case T_CHAR:
 153   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 154   case T_FLOAT:
 155   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 156   case T_DOUBLE:
 157   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 158   case T_ARRAY:
 159   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
 160   default:
 161     ShouldNotReachHere();
 162     return nullptr;
 163   }
 164 }
 165 
 166 
 167 #endif
 168 
 169 // Simple helper to see if the caller of a runtime stub which
 170 // entered the VM has been deoptimized
 171 
 172 static bool caller_is_deopted(JavaThread* current) {
 173   RegisterMap reg_map(current,
 174                       RegisterMap::UpdateMap::skip,
 175                       RegisterMap::ProcessFrames::include,
 176                       RegisterMap::WalkContinuation::skip);
 177   frame runtime_frame = current->last_frame();
 178   frame caller_frame = runtime_frame.sender(&reg_map);
 179   assert(caller_frame.is_compiled_frame(), "must be compiled");
 180   return caller_frame.is_deoptimized_frame();
 181 }
 182 
 183 // Stress deoptimization
 184 static void deopt_caller(JavaThread* current) {
 185   if (!caller_is_deopted(current)) {
 186     RegisterMap reg_map(current,
 187                         RegisterMap::UpdateMap::skip,
 188                         RegisterMap::ProcessFrames::include,
 189                         RegisterMap::WalkContinuation::skip);
 190     frame runtime_frame = current->last_frame();
 191     frame caller_frame = runtime_frame.sender(&reg_map);
 192     Deoptimization::deoptimize_frame(current, caller_frame.id());
 193     assert(caller_is_deopted(current), "Must be deoptimized");
 194   }
 195 }
 196 
 197 class C1StubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
 198  private:
 199   StubId _id;
 200  public:
 201   C1StubAssemblerCodeGenClosure(StubId id) : _id(id) {
 202     assert(StubInfo::is_c1(_id), "not a c1 stub id %s", StubInfo::name(_id));
 203   }
 204   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 205     return Runtime1::generate_code_for(_id, sasm);
 206   }
 207 };
 208 
 209 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
 210   if (id != StubId::NO_STUBID) {
 211     CodeBlob* blob = AOTCodeCache::load_code_blob(AOTCodeEntry::C1Blob, StubInfo::blob(id));
 212     if (blob != nullptr) {
 213       return blob;
 214     }
 215   }
 216 
 217   ResourceMark rm;
 218   // create code buffer for code storage
 219   CodeBuffer code(buffer_blob);
 220 
 221   OopMapSet* oop_maps;
 222   int frame_size;
 223   bool must_gc_arguments;
 224 
 225   Compilation::setup_code_buffer(&code, 0);
 226 
 227   // create assembler for code generation
 228   StubAssembler* sasm = new StubAssembler(&code, name, (int)id);
 229   // generate code for runtime stub
 230   oop_maps = cl->generate_code(sasm);
 231   assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size,
 232          "if stub has an oop map it must have a valid frame size");
 233   assert(!expect_oop_map || oop_maps != nullptr, "must have an oopmap");
 234 
 235   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 236   sasm->align(BytesPerWord);
 237   // make sure all code is in code buffer
 238   sasm->flush();
 239 
 240   frame_size = sasm->frame_size();
 241   must_gc_arguments = sasm->must_gc_arguments();
 242   // create blob - distinguish a few special cases
 243   CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
 244                                                  &code,
 245                                                  CodeOffsets::frame_never_safe,
 246                                                  frame_size,
 247                                                  oop_maps,
 248                                                  must_gc_arguments,
 249                                                  false /* alloc_fail_is_fatal */ );
 250   if (blob != nullptr && (int)id >= 0) {
 251     AOTCodeCache::store_code_blob(*blob, AOTCodeEntry::C1Blob, StubInfo::blob(id));
 252   }
 253   return blob;
 254 }
 255 
 256 bool Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubId id) {
 257   assert(StubInfo::is_c1(id), "not a c1 stub %s", StubInfo::name(id));
 258   bool expect_oop_map = true;
 259 #ifdef ASSERT
 260   // Make sure that stubs that need oopmaps have them
 261   switch (id) {
 262     // These stubs don't need to have an oopmap
 263   case StubId::c1_dtrace_object_alloc_id:
 264   case StubId::c1_slow_subtype_check_id:
 265   case StubId::c1_fpu2long_stub_id:
 266   case StubId::c1_unwind_exception_id:
 267   case StubId::c1_counter_overflow_id:
 268   case StubId::c1_is_instance_of_id:
 269     expect_oop_map = false;
 270     break;
 271   default:
 272     break;
 273   }
 274 #endif
 275   C1StubAssemblerCodeGenClosure cl(id);
 276   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 277   // install blob
 278   int idx = StubInfo::c1_offset(id);   // will assert on non-c1 id
 279   _blobs[idx] = blob;
 280   return blob != nullptr;
 281 }
 282 
 283 bool Runtime1::initialize(BufferBlob* blob) {
 284   // platform-dependent initialization
 285   initialize_pd();
 286   // iterate blobs in C1 group and generate a single stub per blob
 287   StubId id = StubInfo::stub_base(StubGroup::C1);
 288   StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
 289   for (; id != limit; id = StubInfo::next(id)) {
 290     if (!generate_blob_for(blob, id)) {
 291       return false;
 292     }
 293     if (id == StubId::c1_forward_exception_id) {
 294       // publish early c1 stubs at this point so later stubs can refer to them
 295       AOTCodeCache::init_early_c1_table();
 296     }
 297   }
 298   // printing
 299 #ifndef PRODUCT
 300   if (PrintSimpleStubs) {
 301     ResourceMark rm;
 302     id = StubInfo::stub_base(StubGroup::C1);
 303     for (; id != limit; id = StubInfo::next(id)) {
 304       CodeBlob* blob = blob_for(id);
 305       blob->print();
 306       if (blob->oop_maps() != nullptr) {
 307         blob->oop_maps()->print();
 308       }
 309     }
 310   }
 311 #endif
 312   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 313   return bs->generate_c1_runtime_stubs(blob);
 314 }
 315 
 316 CodeBlob* Runtime1::blob_for(StubId id) {
 317   int idx = StubInfo::c1_offset(id);   // will assert on non-c1 id
 318   return _blobs[idx];
 319 }
 320 
 321 
 322 const char* Runtime1::name_for(StubId id) {
 323   return StubInfo::name(id);
 324 }
 325 
 326 const char* Runtime1::name_for_address(address entry) {
 327   // iterate stubs starting from C1 group base
 328   StubId id = StubInfo::stub_base(StubGroup::C1);
 329   StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
 330   for (; id != limit; id = StubInfo::next(id)) {
 331     if (entry == entry_for(id)) return StubInfo::name(id);
 332   }
 333 
 334 #define FUNCTION_CASE(a, f) \
 335   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 336 
 337   FUNCTION_CASE(entry, os::javaTimeMillis);
 338   FUNCTION_CASE(entry, os::javaTimeNanos);
 339   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
 340   FUNCTION_CASE(entry, SharedRuntime::d2f);
 341   FUNCTION_CASE(entry, SharedRuntime::d2i);
 342   FUNCTION_CASE(entry, SharedRuntime::d2l);
 343   FUNCTION_CASE(entry, SharedRuntime::dcos);
 344   FUNCTION_CASE(entry, SharedRuntime::dexp);
 345   FUNCTION_CASE(entry, SharedRuntime::dlog);
 346   FUNCTION_CASE(entry, SharedRuntime::dlog10);
 347   FUNCTION_CASE(entry, SharedRuntime::dpow);
 348   FUNCTION_CASE(entry, SharedRuntime::drem);
 349   FUNCTION_CASE(entry, SharedRuntime::dsin);
 350   FUNCTION_CASE(entry, SharedRuntime::dtan);
 351   FUNCTION_CASE(entry, SharedRuntime::f2i);
 352   FUNCTION_CASE(entry, SharedRuntime::f2l);
 353   FUNCTION_CASE(entry, SharedRuntime::frem);
 354   FUNCTION_CASE(entry, SharedRuntime::l2d);
 355   FUNCTION_CASE(entry, SharedRuntime::l2f);
 356   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 357   FUNCTION_CASE(entry, SharedRuntime::lmul);
 358   FUNCTION_CASE(entry, SharedRuntime::lrem);
 359   FUNCTION_CASE(entry, SharedRuntime::lrem);
 360   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 361   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 362   FUNCTION_CASE(entry, is_instance_of);
 363   FUNCTION_CASE(entry, trace_block_entry);
 364 #ifdef JFR_HAVE_INTRINSICS
 365   FUNCTION_CASE(entry, JfrTime::time_function());
 366 #endif
 367   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 368   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 369   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 370   FUNCTION_CASE(entry, StubRoutines::dexp());
 371   FUNCTION_CASE(entry, StubRoutines::dlog());
 372   FUNCTION_CASE(entry, StubRoutines::dlog10());
 373   FUNCTION_CASE(entry, StubRoutines::dpow());
 374   FUNCTION_CASE(entry, StubRoutines::dsin());
 375   FUNCTION_CASE(entry, StubRoutines::dcos());
 376   FUNCTION_CASE(entry, StubRoutines::dtan());
 377   FUNCTION_CASE(entry, StubRoutines::dsinh());
 378   FUNCTION_CASE(entry, StubRoutines::dtanh());
 379   FUNCTION_CASE(entry, StubRoutines::dcbrt());
 380 
 381 #undef FUNCTION_CASE
 382 
 383   // Soft float adds more runtime names.
 384   return pd_name_for_address(entry);
 385 }
 386 
 387 static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {
 388 #ifndef PRODUCT
 389   if (PrintC1Statistics) {
 390     Runtime1::_new_instance_slowcase_cnt++;
 391   }
 392 #endif
 393   assert(klass->is_klass(), "not a class");
 394   Handle holder(current, klass->klass_holder()); // keep the klass alive
 395   InstanceKlass* h = InstanceKlass::cast(klass);
 396   h->check_valid_for_instantiation(true, CHECK);
 397   // make sure klass is initialized
 398   h->initialize(CHECK);
 399   // allocate instance and return via TLS
 400   oop obj = h->allocate_instance(CHECK);
 401   current->set_vm_result_oop(obj);
 402 JRT_END
 403 
 404 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 405   allocate_instance(current, klass, CHECK);
 406 JRT_END
 407 
 408 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 409 #ifndef PRODUCT
 410   if (PrintC1Statistics) {
 411     _new_type_array_slowcase_cnt++;
 412   }
 413 #endif
 414   // Note: no handle for klass needed since they are not used
 415   //       anymore after new_typeArray() and no GC can happen before.
 416   //       (This may have to change if this code changes!)
 417   assert(klass->is_klass(), "not a class");
 418   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 419   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 420   current->set_vm_result_oop(obj);
 421   // This is pretty rare but this runtime patch is stressful to deoptimization
 422   // if we deoptimize here so force a deopt to stress the path.
 423   if (DeoptimizeALot) {
 424     deopt_caller(current);
 425   }
 426 
 427 JRT_END
 428 
 429 
 430 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 431 #ifndef PRODUCT
 432   if (PrintC1Statistics) {
 433     _new_object_array_slowcase_cnt++;
 434   }
 435 #endif
 436   // Note: no handle for klass needed since they are not used
 437   //       anymore after new_objArray() and no GC can happen before.
 438   //       (This may have to change if this code changes!)
 439   assert(array_klass->is_klass(), "not a class");
 440   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 441   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 442   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 443   current->set_vm_result_oop(obj);
 444   // This is pretty rare but this runtime patch is stressful to deoptimization
 445   // if we deoptimize here so force a deopt to stress the path.
 446   if (DeoptimizeALot) {
 447     deopt_caller(current);
 448   }
 449 JRT_END
 450 
 451 
 452 JRT_ENTRY(void, Runtime1::new_null_free_array(JavaThread* current, Klass* array_klass, jint length))
 453   NOT_PRODUCT(_new_null_free_array_slowcase_cnt++;)
 454   // TODO 8350865 This is dead code since 8325660 because null-free arrays can only be created via the factory methods that are not yet implemented in C1. Should probably be fixed by 8265122.
 455 
 456   // Note: no handle for klass needed since they are not used
 457   //       anymore after new_objArray() and no GC can happen before.
 458   //       (This may have to change if this code changes!)
 459   assert(array_klass->is_klass(), "not a class");
 460   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 461   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 462   assert(elem_klass->is_inline_klass(), "must be");
 463   // Logically creates elements, ensure klass init
 464   elem_klass->initialize(CHECK);
 465 
 466   const ArrayProperties props = ArrayProperties::Default().with_null_restricted();
 467   arrayOop obj = oopFactory::new_objArray(elem_klass, length, props, CHECK);
 468 
 469   current->set_vm_result_oop(obj);
 470   // This is pretty rare but this runtime patch is stressful to deoptimization
 471   // if we deoptimize here so force a deopt to stress the path.
 472   if (DeoptimizeALot) {
 473     deopt_caller(current);
 474   }
 475 JRT_END
 476 
 477 
 478 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 479 #ifndef PRODUCT
 480   if (PrintC1Statistics) {
 481     _new_multi_array_slowcase_cnt++;
 482   }
 483 #endif
 484   assert(klass->is_klass(), "not a class");
 485   assert(rank >= 1, "rank must be nonzero");
 486   Handle holder(current, klass->klass_holder()); // keep the klass alive
 487   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 488   current->set_vm_result_oop(obj);
 489 JRT_END
 490 
 491 
 492 static void profile_flat_array(JavaThread* current, bool load, bool null_free) {
 493   ResourceMark rm(current);
 494   vframeStream vfst(current, true);
 495   assert(!vfst.at_end(), "Java frame must exist");
 496   // Check if array access profiling is enabled
 497   if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
 498     return;
 499   }
 500   int bci = vfst.bci();
 501   Method* method = vfst.method();
 502   MethodData* md = method->method_data();
 503   if (md != nullptr) {
 504     // Lock to access ProfileData, and ensure lock is not broken by a safepoint
 505     MutexLocker ml(md->extra_data_lock(), Mutex::_no_safepoint_check_flag);
 506 
 507     ProfileData* data = md->bci_to_data(bci);
 508     assert(data != nullptr, "incorrect profiling entry");
 509     if (data->is_ArrayLoadData()) {
 510       assert(load, "should be an array load");
 511       ArrayLoadData* load_data = (ArrayLoadData*) data;
 512       load_data->set_flat_array();
 513       if (null_free) {
 514         load_data->set_null_free_array();
 515       }
 516     } else {
 517       assert(data->is_ArrayStoreData(), "");
 518       assert(!load, "should be an array store");
 519       ArrayStoreData* store_data = (ArrayStoreData*) data;
 520       store_data->set_flat_array();
 521       if (null_free) {
 522         store_data->set_null_free_array();
 523       }
 524     }
 525   }
 526 }
 527 
 528 JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* array, int index))
 529   assert(array->klass()->is_flatArray_klass(), "should not be called");
 530   profile_flat_array(current, true, array->is_null_free_array());
 531 
 532   NOT_PRODUCT(_load_flat_array_slowcase_cnt++;)
 533   assert(array->length() > 0 && index < array->length(), "already checked");
 534   flatArrayHandle vah(current, array);
 535   oop obj = array->obj_at(index, CHECK);
 536   current->set_vm_result_oop(obj);
 537 JRT_END
 538 
 539 JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, arrayOopDesc* array, int index, oopDesc* value))
 540   // TOOD 8350865 We can call here with a non-flat array because of LIR_Assembler::emit_opFlattenedArrayCheck
 541   if (array->is_flatArray()) {
 542     profile_flat_array(current, false, array->is_null_free_array());
 543   }
 544 
 545   NOT_PRODUCT(_store_flat_array_slowcase_cnt++;)
 546   if (value == nullptr && array->is_null_free_array()) {
 547     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 548   } else {
 549     // Here we know that we have a flat array
 550     oop_cast<flatArrayOop>(array)->obj_at_put(index, value, CHECK);
 551   }
 552 JRT_END
 553 
 554 JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
 555   NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
 556   JavaCallArguments args;
 557   args.push_oop(Handle(THREAD, left));
 558   args.push_oop(Handle(THREAD, right));
 559   JavaValue result(T_BOOLEAN);
 560   JavaCalls::call_static(&result,
 561                          vmClasses::ValueObjectMethods_klass(),
 562                          vmSymbols::isSubstitutable_name(),
 563                          vmSymbols::object_object_boolean_signature(),
 564                          &args, CHECK_0);
 565   return result.get_jboolean() ? 1 : 0;
 566 JRT_END
 567 
 568 
 569 extern "C" void ps();
 570 
 571 void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
 572   JavaThread* THREAD = current;
 573   methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
 574   oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
 575   current->set_vm_result_oop(obj);
 576 }
 577 
 578 JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
 579   NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
 580   buffer_inline_args_impl(current, method, true);
 581 JRT_END
 582 
 583 JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
 584   NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
 585   buffer_inline_args_impl(current, method, false);
 586 JRT_END
 587 
 588 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubId id))
 589   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 590 JRT_END
 591 
 592 
 593 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 594   ResourceMark rm(current);
 595   const char* klass_name = obj->klass()->external_name();
 596   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 597 JRT_END
 598 
 599 
 600 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 601 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 602 // method) method is passed as an argument. In order to do that it is embedded in the code as
 603 // a constant.
 604 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 605   nmethod* osr_nm = nullptr;
 606   methodHandle method(current, m);
 607 
 608   RegisterMap map(current,
 609                   RegisterMap::UpdateMap::skip,
 610                   RegisterMap::ProcessFrames::include,
 611                   RegisterMap::WalkContinuation::skip);
 612   frame fr =  current->last_frame().sender(&map);
 613   nmethod* nm = (nmethod*) fr.cb();
 614   assert(nm!= nullptr && nm->is_nmethod(), "Sanity check");
 615   methodHandle enclosing_method(current, nm->method());
 616 
 617   CompLevel level = (CompLevel)nm->comp_level();
 618   int bci = InvocationEntryBci;
 619   if (branch_bci != InvocationEntryBci) {
 620     // Compute destination bci
 621     address pc = method()->code_base() + branch_bci;
 622     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
 623     int offset = 0;
 624     switch (branch) {
 625       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 626       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 627       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 628       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 629       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 630       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 631       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 632         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 633         break;
 634       case Bytecodes::_goto_w:
 635         offset = Bytes::get_Java_u4(pc + 1);
 636         break;
 637       default: ;
 638     }
 639     bci = branch_bci + offset;
 640   }
 641   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 642   return osr_nm;
 643 }
 644 
 645 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 646   nmethod* osr_nm;
 647   JRT_BLOCK_NO_ASYNC
 648     osr_nm = counter_overflow_helper(current, bci, method);
 649     if (osr_nm != nullptr) {
 650       RegisterMap map(current,
 651                       RegisterMap::UpdateMap::skip,
 652                       RegisterMap::ProcessFrames::include,
 653                       RegisterMap::WalkContinuation::skip);
 654       frame fr =  current->last_frame().sender(&map);
 655       Deoptimization::deoptimize_frame(current, fr.id());
 656     }
 657   JRT_BLOCK_END
 658   return nullptr;
 659 JRT_END
 660 
 661 extern void vm_exit(int code);
 662 
 663 // Enter this method from compiled code handler below. This is where we transition
 664 // to VM mode. This is done as a helper routine so that the method called directly
 665 // from compiled code does not have to transition to VM. This allows the entry
 666 // method to see if the nmethod that we have just looked up a handler for has
 667 // been deoptimized while we were in the vm. This simplifies the assembly code
 668 // cpu directories.
 669 //
 670 // We are entering here from exception stub (via the entry method below)
 671 // If there is a compiled exception handler in this method, we will continue there;
 672 // otherwise we will unwind the stack and continue at the caller of top frame method
 673 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 674 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 675 // check to see if the handler we are going to return is now in a nmethod that has
 676 // been deoptimized. If that is the case we return the deopt blob
 677 // unpack_with_exception entry instead. This makes life for the exception blob easier
 678 // because making that same check and diverting is painful from assembly language.
 679 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 680   MACOS_AARCH64_ONLY(current->wx_enable_write());
 681   Handle exception(current, ex);
 682 
 683   // This function is called when we are about to throw an exception. Therefore,
 684   // we have to poll the stack watermark barrier to make sure that not yet safe
 685   // stack frames are made safe before returning into them.
 686   if (current->last_frame().cb() == Runtime1::blob_for(StubId::c1_handle_exception_from_callee_id)) {
 687     // The StubId::c1_handle_exception_from_callee_id handler is invoked after the
 688     // frame has been unwound. It instead builds its own stub frame, to call the
 689     // runtime. But the throwing frame has already been unwound here.
 690     StackWatermarkSet::after_unwind(current);
 691   }
 692 
 693   nm = CodeCache::find_nmethod(pc);
 694   assert(nm != nullptr, "this is not an nmethod");
 695   // Adjust the pc as needed/
 696   if (nm->is_deopt_pc(pc)) {
 697     RegisterMap map(current,
 698                     RegisterMap::UpdateMap::skip,
 699                     RegisterMap::ProcessFrames::include,
 700                     RegisterMap::WalkContinuation::skip);
 701     frame exception_frame = current->last_frame().sender(&map);
 702     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 703     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 704     pc = exception_frame.pc();
 705   }
 706   assert(exception.not_null(), "null exceptions should be handled by throw_exception");
 707   // Check that exception is a subclass of Throwable
 708   assert(exception->is_a(vmClasses::Throwable_klass()),
 709          "Exception not subclass of Throwable");
 710 
 711   // debugging support
 712   // tracing
 713   if (log_is_enabled(Info, exceptions)) {
 714     ResourceMark rm; // print_value_string
 715     stringStream tempst;
 716     assert(nm->method() != nullptr, "Unexpected null method()");
 717     tempst.print("C1 compiled method <%s>\n"
 718                  " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 719                  nm->method()->print_value_string(), p2i(pc), p2i(current));
 720     Exceptions::log_exception(exception, tempst.freeze());
 721   }
 722   // for AbortVMOnException flag
 723   Exceptions::debug_check_abort(exception);
 724 
 725   // Check the stack guard pages and re-enable them if necessary and there is
 726   // enough space on the stack to do so.  Use fast exceptions only if the guard
 727   // pages are enabled.
 728   bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
 729 
 730   if (JvmtiExport::can_post_on_exceptions()) {
 731     // To ensure correct notification of exception catches and throws
 732     // we have to deoptimize here.  If we attempted to notify the
 733     // catches and throws during this exception lookup it's possible
 734     // we could deoptimize on the way out of the VM and end back in
 735     // the interpreter at the throw site.  This would result in double
 736     // notifications since the interpreter would also notify about
 737     // these same catches and throws as it unwound the frame.
 738 
 739     RegisterMap reg_map(current,
 740                         RegisterMap::UpdateMap::include,
 741                         RegisterMap::ProcessFrames::include,
 742                         RegisterMap::WalkContinuation::skip);
 743     frame stub_frame = current->last_frame();
 744     frame caller_frame = stub_frame.sender(&reg_map);
 745 
 746     // We don't really want to deoptimize the nmethod itself since we
 747     // can actually continue in the exception handler ourselves but I
 748     // don't see an easy way to have the desired effect.
 749     Deoptimization::deoptimize_frame(current, caller_frame.id());
 750     assert(caller_is_deopted(current), "Must be deoptimized");
 751 
 752     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 753   }
 754 
 755   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 756   if (guard_pages_enabled) {
 757     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
 758     if (fast_continuation != nullptr) {
 759       return fast_continuation;
 760     }
 761   }
 762 
 763   // If the stack guard pages are enabled, check whether there is a handler in
 764   // the current method.  Otherwise (guard pages disabled), force an unwind and
 765   // skip the exception cache update (i.e., just leave continuation as null).
 766   address continuation = nullptr;
 767   if (guard_pages_enabled) {
 768 
 769     // New exception handling mechanism can support inlined methods
 770     // with exception handlers since the mappings are from PC to PC
 771 
 772     // Clear out the exception oop and pc since looking up an
 773     // exception handler can cause class loading, which might throw an
 774     // exception and those fields are expected to be clear during
 775     // normal bytecode execution.
 776     current->clear_exception_oop_and_pc();
 777 
 778     bool recursive_exception = false;
 779     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
 780     // If an exception was thrown during exception dispatch, the exception oop may have changed
 781     current->set_exception_oop(exception());
 782     current->set_exception_pc(pc);
 783 
 784     // the exception cache is used only by non-implicit exceptions
 785     // Update the exception cache only when there didn't happen
 786     // another exception during the computation of the compiled
 787     // exception handler. Checking for exception oop equality is not
 788     // sufficient because some exceptions are pre-allocated and reused.
 789     if (continuation != nullptr && !recursive_exception) {
 790       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
 791     }
 792   }
 793 
 794   current->set_vm_result_oop(exception());
 795 
 796   if (log_is_enabled(Info, exceptions)) {
 797     ResourceMark rm;
 798     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 799                          " for exception thrown at PC " PTR_FORMAT,
 800                          p2i(current), p2i(continuation), p2i(pc));
 801   }
 802 
 803   return continuation;
 804 JRT_END
 805 
 806 // Enter this method from compiled code only if there is a Java exception handler
 807 // in the method handling the exception.
 808 // We are entering here from exception stub. We don't do a normal VM transition here.
 809 // We do it in a helper. This is so we can check to see if the nmethod we have just
 810 // searched for an exception handler has been deoptimized in the meantime.
 811 address Runtime1::exception_handler_for_pc(JavaThread* current) {
 812   oop exception = current->exception_oop();
 813   address pc = current->exception_pc();
 814   // Still in Java mode
 815   DEBUG_ONLY(NoHandleMark nhm);
 816   nmethod* nm = nullptr;
 817   address continuation = nullptr;
 818   {
 819     // Enter VM mode by calling the helper
 820     ResetNoHandleMark rnhm;
 821     continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
 822   }
 823   // Back in JAVA, use no oops DON'T safepoint
 824 
 825   // Now check to see if the nmethod we were called from is now deoptimized.
 826   // If so we must return to the deopt blob and deoptimize the nmethod
 827   if (nm != nullptr && caller_is_deopted(current)) {
 828     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 829   }
 830 
 831   assert(continuation != nullptr, "no handler found");
 832   return continuation;
 833 }
 834 
 835 
 836 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))
 837 #ifndef PRODUCT
 838   if (PrintC1Statistics) {
 839     _throw_range_check_exception_count++;
 840   }
 841 #endif
 842   const int len = 35;
 843   assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
 844   char message[2 * jintAsStringSize + len];
 845   os::snprintf_checked(message, sizeof(message), "Index %d out of bounds for length %d", index, a->length());
 846   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
 847 JRT_END
 848 
 849 
 850 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))
 851 #ifndef PRODUCT
 852   if (PrintC1Statistics) {
 853     _throw_index_exception_count++;
 854   }
 855 #endif
 856   char message[16];
 857   os::snprintf_checked(message, sizeof(message), "%d", index);
 858   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
 859 JRT_END
 860 
 861 
 862 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))
 863 #ifndef PRODUCT
 864   if (PrintC1Statistics) {
 865     _throw_div0_exception_count++;
 866   }
 867 #endif
 868   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 869 JRT_END
 870 
 871 
 872 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))
 873 #ifndef PRODUCT
 874   if (PrintC1Statistics) {
 875     _throw_null_pointer_exception_count++;
 876   }
 877 #endif
 878   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 879 JRT_END
 880 
 881 
 882 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))
 883 #ifndef PRODUCT
 884   if (PrintC1Statistics) {
 885     _throw_class_cast_exception_count++;
 886   }
 887 #endif
 888   ResourceMark rm(current);
 889   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 890   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 891 JRT_END
 892 
 893 
 894 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 895 #ifndef PRODUCT
 896   if (PrintC1Statistics) {
 897     _throw_incompatible_class_change_error_count++;
 898   }
 899 #endif
 900   ResourceMark rm(current);
 901   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 902 JRT_END
 903 
 904 
 905 JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
 906   NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
 907   ResourceMark rm(current);
 908   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
 909 JRT_END
 910 
 911 JRT_ENTRY(void, Runtime1::throw_identity_exception(JavaThread* current, oopDesc* object))
 912   NOT_PRODUCT(_throw_identity_exception_count++;)
 913   ResourceMark rm(current);
 914   char* message = SharedRuntime::generate_identity_exception_message(current, object->klass());
 915   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IdentityException(), message);
 916 JRT_END
 917 
 918 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 919 #ifndef PRODUCT
 920   if (PrintC1Statistics) {
 921     _monitorenter_slowcase_cnt++;
 922   }
 923 #endif
 924   assert(obj == lock->obj(), "must match");
 925   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 926 JRT_END
 927 
 928 
 929 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 930   assert(current == JavaThread::current(), "pre-condition");
 931 #ifndef PRODUCT
 932   if (PrintC1Statistics) {
 933     _monitorexit_slowcase_cnt++;
 934   }
 935 #endif
 936   assert(current->last_Java_sp(), "last_Java_sp must be set");
 937   oop obj = lock->obj();
 938   assert(oopDesc::is_oop(obj), "must be null or an object");
 939   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 940 JRT_END
 941 
 942 // Cf. OptoRuntime::deoptimize_caller_frame
 943 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 944   // Called from within the owner thread, so no need for safepoint
 945   RegisterMap reg_map(current,
 946                       RegisterMap::UpdateMap::skip,
 947                       RegisterMap::ProcessFrames::include,
 948                       RegisterMap::WalkContinuation::skip);
 949   frame stub_frame = current->last_frame();
 950   assert(stub_frame.is_runtime_frame(), "Sanity check");
 951   frame caller_frame = stub_frame.sender(&reg_map);
 952   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 953   assert(nm != nullptr, "Sanity check");
 954   methodHandle method(current, nm->method());
 955   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 956   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 957   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 958 
 959   if (action == Deoptimization::Action_make_not_entrant) {
 960     if (nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE)) {
 961       if (reason == Deoptimization::Reason_tenured) {
 962         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 963         if (trap_mdo != nullptr) {
 964           trap_mdo->inc_tenure_traps();
 965         }
 966       }
 967     }
 968   }
 969 
 970   // Deoptimize the caller frame.
 971   Deoptimization::deoptimize_frame(current, caller_frame.id());
 972   // Return to the now deoptimized frame.
 973 JRT_END
 974 
 975 
 976 #ifndef DEOPTIMIZE_WHEN_PATCHING
 977 
 978 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 979   Bytecode_field field_access(caller, bci);
 980   // This can be static or non-static field access
 981   Bytecodes::Code code       = field_access.code();
 982 
 983   // We must load class, initialize class and resolve the field
 984   fieldDescriptor result; // initialize class if needed
 985   constantPoolHandle constants(THREAD, caller->constants());
 986   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
 987   return result.field_holder();
 988 }
 989 
 990 
 991 //
 992 // This routine patches sites where a class wasn't loaded or
 993 // initialized at the time the code was generated.  It handles
 994 // references to classes, fields and forcing of initialization.  Most
 995 // of the cases are straightforward and involving simply forcing
 996 // resolution of a class, rewriting the instruction stream with the
 997 // needed constant and replacing the call in this function with the
 998 // patched code.  The case for static field is more complicated since
 999 // the thread which is in the process of initializing a class can
1000 // access it's static fields but other threads can't so the code
1001 // either has to deoptimize when this case is detected or execute a
1002 // check that the current thread is the initializing thread.  The
1003 // current
1004 //
1005 // Patches basically look like this:
1006 //
1007 //
1008 // patch_site: jmp patch stub     ;; will be patched
1009 // continue:   ...
1010 //             ...
1011 //             ...
1012 //             ...
1013 //
1014 // They have a stub which looks like this:
1015 //
1016 //             ;; patch body
1017 //             movl <const>, reg           (for class constants)
1018 //        <or> movl [reg1 + <const>], reg  (for field offsets)
1019 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
1020 //             <being_init offset> <bytes to copy> <bytes to skip>
1021 // patch_stub: call Runtime1::patch_code (through a runtime stub)
1022 //             jmp patch_site
1023 //
1024 //
1025 // A normal patch is done by rewriting the patch body, usually a move,
1026 // and then copying it into place over top of the jmp instruction
1027 // being careful to flush caches and doing it in an MP-safe way.  The
1028 // constants following the patch body are used to find various pieces
1029 // of the patch relative to the call site for Runtime1::patch_code.
1030 // The case for getstatic and putstatic is more complicated because
1031 // getstatic and putstatic have special semantics when executing while
1032 // the class is being initialized.  getstatic/putstatic on a class
1033 // which is being_initialized may be executed by the initializing
1034 // thread but other threads have to block when they execute it.  This
1035 // is accomplished in compiled code by executing a test of the current
1036 // thread against the initializing thread of the class.  It's emitted
1037 // as boilerplate in their stub which allows the patched code to be
1038 // executed before it's copied back into the main body of the nmethod.
1039 //
1040 // being_init: get_thread(<tmp reg>
1041 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
1042 //             jne patch_stub
1043 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
1044 //             movl reg, [reg1 + <const>]  (for field offsets)
1045 //             jmp continue
1046 //             <being_init offset> <bytes to copy> <bytes to skip>
1047 // patch_stub: jmp Runtime1::patch_code (through a runtime stub)
1048 //             jmp patch_site
1049 //
1050 // If the class is being initialized the patch body is rewritten and
1051 // the patch site is rewritten to jump to being_init, instead of
1052 // patch_stub.  Whenever this code is executed it checks the current
1053 // thread against the initializing thread so other threads will enter
1054 // the runtime and end up blocked waiting the class to finish
1055 // initializing inside the calls to resolve_field below.  The
1056 // initializing class will continue on it's way.  Once the class is
1057 // fully_initialized, the intializing_thread of the class becomes
1058 // null, so the next thread to execute this code will fail the test,
1059 // call into patch_code and complete the patching process by copying
1060 // the patch body back into the main part of the nmethod and resume
1061 // executing.
1062 
1063 // NB:
1064 //
1065 // Patchable instruction sequences inherently exhibit race conditions,
1066 // where thread A is patching an instruction at the same time thread B
1067 // is executing it.  The algorithms we use ensure that any observation
1068 // that B can make on any intermediate states during A's patching will
1069 // always end up with a correct outcome.  This is easiest if there are
1070 // few or no intermediate states.  (Some inline caches have two
1071 // related instructions that must be patched in tandem.  For those,
1072 // intermediate states seem to be unavoidable, but we will get the
1073 // right answer from all possible observation orders.)
1074 //
1075 // When patching the entry instruction at the head of a method, or a
1076 // linkable call instruction inside of a method, we try very hard to
1077 // use a patch sequence which executes as a single memory transaction.
1078 // This means, in practice, that when thread A patches an instruction,
1079 // it should patch a 32-bit or 64-bit word that somehow overlaps the
1080 // instruction or is contained in it.  We believe that memory hardware
1081 // will never break up such a word write, if it is naturally aligned
1082 // for the word being written.  We also know that some CPUs work very
1083 // hard to create atomic updates even of naturally unaligned words,
1084 // but we don't want to bet the farm on this always working.
1085 //
1086 // Therefore, if there is any chance of a race condition, we try to
1087 // patch only naturally aligned words, as single, full-word writes.
1088 
1089 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, StubId stub_id ))
1090 #ifndef PRODUCT
1091   if (PrintC1Statistics) {
1092     _patch_code_slowcase_cnt++;
1093   }
1094 #endif
1095 
1096   ResourceMark rm(current);
1097   RegisterMap reg_map(current,
1098                       RegisterMap::UpdateMap::skip,
1099                       RegisterMap::ProcessFrames::include,
1100                       RegisterMap::WalkContinuation::skip);
1101   frame runtime_frame = current->last_frame();
1102   frame caller_frame = runtime_frame.sender(&reg_map);
1103 
1104   // last java frame on stack
1105   vframeStream vfst(current, true);
1106   assert(!vfst.at_end(), "Java frame must exist");
1107 
1108   methodHandle caller_method(current, vfst.method());
1109   // Note that caller_method->code() may not be same as caller_code because of OSR's
1110   // Note also that in the presence of inlining it is not guaranteed
1111   // that caller_method() == caller_code->method()
1112 
1113   int bci = vfst.bci();
1114   Bytecodes::Code code = caller_method()->java_code_at(bci);
1115 
1116   // this is used by assertions in the access_field_patching_id
1117   BasicType patch_field_type = T_ILLEGAL;
1118   bool deoptimize_for_volatile = false;
1119   bool deoptimize_for_atomic = false;
1120   bool deoptimize_for_null_free = false;
1121   bool deoptimize_for_flat = false;
1122   bool deoptimize_for_strict_static = false;
1123   int patch_field_offset = -1;
1124   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
1125   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
1126   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
1127   Handle appendix(current, nullptr); // oop needed by appendix_patching code
1128   bool load_klass_or_mirror_patch_id =
1129     (stub_id == StubId::c1_load_klass_patching_id || stub_id == StubId::c1_load_mirror_patching_id);
1130 
1131   if (stub_id == StubId::c1_access_field_patching_id) {
1132 
1133     Bytecode_field field_access(caller_method, bci);
1134     fieldDescriptor result; // initialize class if needed
1135     Bytecodes::Code code = field_access.code();
1136     constantPoolHandle constants(current, caller_method->constants());
1137     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
1138     patch_field_offset = result.offset();
1139 
1140     // If we're patching a field which is volatile then at compile it
1141     // must not have been know to be volatile, so the generated code
1142     // isn't correct for a volatile reference.  The nmethod has to be
1143     // deoptimized so that the code can be regenerated correctly.
1144     // This check is only needed for access_field_patching since this
1145     // is the path for patching field offsets.  load_klass is only
1146     // used for patching references to oops which don't need special
1147     // handling in the volatile case.
1148 
1149     deoptimize_for_volatile = result.access_flags().is_volatile();
1150 
1151     // If we are patching a field which should be atomic, then
1152     // the generated code is not correct either, force deoptimizing.
1153     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1154     // break access atomicity only for them.
1155 
1156     // Strictly speaking, the deoptimization on 64-bit platforms
1157     // is unnecessary, and T_LONG stores on 32-bit platforms need
1158     // to be handled by special patching code when AlwaysAtomicAccesses
1159     // becomes product feature. At this point, we are still going
1160     // for the deoptimization for consistency against volatile
1161     // accesses.
1162 
1163     patch_field_type = result.field_type();
1164     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1165 
1166     // The field we are patching is null-free. Deoptimize and regenerate
1167     // the compiled code if we patch a putfield/putstatic because it
1168     // does not contain the required null check.
1169     deoptimize_for_null_free = result.is_null_free_inline_type() && (field_access.is_putfield() || field_access.is_putstatic());
1170 
1171     // The field we are patching is flat. Deoptimize and regenerate
1172     // the compiled code which can't handle the layout of the flat
1173     // field because it was unknown at compile time.
1174     deoptimize_for_flat = result.is_flat();
1175 
1176     // Strict statics may require tracking if their class is not fully initialized.
1177     // For now we can bail out of the compiler and let the interpreter handle it.
1178     deoptimize_for_strict_static = result.is_strict_static_unset();
1179   } else if (load_klass_or_mirror_patch_id) {
1180     Klass* k = nullptr;
1181     switch (code) {
1182       case Bytecodes::_putstatic:
1183       case Bytecodes::_getstatic:
1184         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1185           init_klass = klass;
1186           mirror = Handle(current, klass->java_mirror());
1187         }
1188         break;
1189       case Bytecodes::_new:
1190         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1191           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1192         }
1193         break;
1194       case Bytecodes::_multianewarray:
1195         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1196           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1197         }
1198         break;
1199       case Bytecodes::_instanceof:
1200         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
1201           k = caller_method->constants()->klass_at(io.index(), CHECK);
1202         }
1203         break;
1204       case Bytecodes::_checkcast:
1205         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
1206           k = caller_method->constants()->klass_at(cc.index(), CHECK);
1207         }
1208         break;
1209       case Bytecodes::_anewarray:
1210         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
1211           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
1212           k = ek->array_klass(CHECK);
1213           if (!k->is_typeArray_klass() && !k->is_refArray_klass() && !k->is_flatArray_klass()) {
1214             k = ObjArrayKlass::cast(k)->klass_with_properties(ArrayProperties::Default(), THREAD);
1215           }
1216           if (k->is_flatArray_klass()) {
1217             deoptimize_for_flat = true;
1218           }
1219         }
1220         break;
1221       case Bytecodes::_ldc:
1222       case Bytecodes::_ldc_w:
1223       case Bytecodes::_ldc2_w:
1224         {
1225           Bytecode_loadconstant cc(caller_method, bci);
1226           oop m = cc.resolve_constant(CHECK);
1227           mirror = Handle(current, m);
1228         }
1229         break;
1230       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
1231     }
1232     load_klass = k;
1233   } else if (stub_id == StubId::c1_load_appendix_patching_id) {
1234     Bytecode_invoke bytecode(caller_method, bci);
1235     Bytecodes::Code bc = bytecode.invoke_code();
1236 
1237     CallInfo info;
1238     constantPoolHandle pool(current, caller_method->constants());
1239     int index = bytecode.index();
1240     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1241     switch (bc) {
1242       case Bytecodes::_invokehandle: {
1243         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1244         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1245         break;
1246       }
1247       case Bytecodes::_invokedynamic: {
1248         appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
1249         break;
1250       }
1251       default: fatal("unexpected bytecode for load_appendix_patching_id");
1252     }
1253   } else {
1254     ShouldNotReachHere();
1255   }
1256 
1257   if (deoptimize_for_volatile  ||
1258       deoptimize_for_atomic    ||
1259       deoptimize_for_null_free ||
1260       deoptimize_for_flat      ||
1261       deoptimize_for_strict_static) {
1262     // At compile time we assumed the field wasn't volatile/atomic but after
1263     // loading it turns out it was volatile/atomic so we have to throw the
1264     // compiled code out and let it be regenerated.
1265     if (TracePatching) {
1266       if (deoptimize_for_volatile) {
1267         tty->print_cr("Deoptimizing for patching volatile field reference");
1268       }
1269       if (deoptimize_for_atomic) {
1270         tty->print_cr("Deoptimizing for patching atomic field reference");
1271       }
1272       if (deoptimize_for_null_free) {
1273         tty->print_cr("Deoptimizing for patching null-free field reference");
1274       }
1275       if (deoptimize_for_flat) {
1276         tty->print_cr("Deoptimizing for patching flat field or array reference");
1277       }
1278       if (deoptimize_for_strict_static) {
1279         tty->print_cr("Deoptimizing for patching strict static field reference");
1280       }
1281     }
1282 
1283     // It's possible the nmethod was invalidated in the last
1284     // safepoint, but if it's still alive then make it not_entrant.
1285     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1286     if (nm != nullptr) {
1287       nm->make_not_entrant(nmethod::InvalidationReason::C1_CODEPATCH);
1288     }
1289 
1290     Deoptimization::deoptimize_frame(current, caller_frame.id());
1291 
1292     // Return to the now deoptimized frame.
1293   }
1294 
1295   // Now copy code back
1296 
1297   {
1298     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1299     //
1300     // Deoptimization may have happened while we waited for the lock.
1301     // In that case we don't bother to do any patching we just return
1302     // and let the deopt happen
1303     if (!caller_is_deopted(current)) {
1304       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1305       address instr_pc = jump->jump_destination();
1306       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1307       if (ni->is_jump() ) {
1308         // the jump has not been patched yet
1309         // The jump destination is slow case and therefore not part of the stubs
1310         // (stubs are only for StaticCalls)
1311 
1312         // format of buffer
1313         //    ....
1314         //    instr byte 0     <-- copy_buff
1315         //    instr byte 1
1316         //    ..
1317         //    instr byte n-1
1318         //      n
1319         //    ....             <-- call destination
1320 
1321         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1322         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1323         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1324         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1325         address copy_buff = stub_location - *byte_skip - *byte_count;
1326         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1327         if (TracePatching) {
1328           ttyLocker ttyl;
1329           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
1330                         p2i(instr_pc), (stub_id == StubId::c1_access_field_patching_id) ? "field" : "klass");
1331           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1332           assert(caller_code != nullptr, "nmethod not found");
1333 
1334           // NOTE we use pc() not original_pc() because we already know they are
1335           // identical otherwise we'd have never entered this block of code
1336 
1337           const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1338           assert(map != nullptr, "null check");
1339           map->print();
1340           tty->cr();
1341 
1342           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1343         }
1344         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1345         bool do_patch = true;
1346         if (stub_id == StubId::c1_access_field_patching_id) {
1347           // The offset may not be correct if the class was not loaded at code generation time.
1348           // Set it now.
1349           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1350           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1351           assert(patch_field_offset >= 0, "illegal offset");
1352           n_move->add_offset_in_bytes(patch_field_offset);
1353         } else if (load_klass_or_mirror_patch_id) {
1354           // If a getstatic or putstatic is referencing a klass which
1355           // isn't fully initialized, the patch body isn't copied into
1356           // place until initialization is complete.  In this case the
1357           // patch site is setup so that any threads besides the
1358           // initializing thread are forced to come into the VM and
1359           // block.
1360           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1361                      InstanceKlass::cast(init_klass)->is_initialized();
1362           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1363           if (jump->jump_destination() == being_initialized_entry) {
1364             assert(do_patch == true, "initialization must be complete at this point");
1365           } else {
1366             // patch the instruction <move reg, klass>
1367             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1368 
1369             assert(n_copy->data() == 0 ||
1370                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
1371                    "illegal init value");
1372             if (stub_id == StubId::c1_load_klass_patching_id) {
1373               assert(load_klass != nullptr, "klass not set");
1374               n_copy->set_data((intx) (load_klass));
1375             } else {
1376               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1377               n_copy->set_data(cast_from_oop<intx>(mirror()));
1378             }
1379 
1380             if (TracePatching) {
1381               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1382             }
1383           }
1384         } else if (stub_id == StubId::c1_load_appendix_patching_id) {
1385           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1386           assert(n_copy->data() == 0 ||
1387                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
1388                  "illegal init value");
1389           n_copy->set_data(cast_from_oop<intx>(appendix()));
1390 
1391           if (TracePatching) {
1392             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1393           }
1394         } else {
1395           ShouldNotReachHere();
1396         }
1397 
1398         if (do_patch) {
1399           // replace instructions
1400           // first replace the tail, then the call
1401 #ifdef ARM
1402           if((load_klass_or_mirror_patch_id ||
1403               stub_id == StubId::c1_load_appendix_patching_id) &&
1404               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1405             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1406             address addr = nullptr;
1407             assert(nm != nullptr, "invalid nmethod_pc");
1408             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1409             while (mds.next()) {
1410               if (mds.type() == relocInfo::oop_type) {
1411                 assert(stub_id == StubId::c1_load_mirror_patching_id ||
1412                        stub_id == StubId::c1_load_appendix_patching_id, "wrong stub id");
1413                 oop_Relocation* r = mds.oop_reloc();
1414                 addr = (address)r->oop_addr();
1415                 break;
1416               } else if (mds.type() == relocInfo::metadata_type) {
1417                 assert(stub_id == StubId::c1_load_klass_patching_id, "wrong stub id");
1418                 metadata_Relocation* r = mds.metadata_reloc();
1419                 addr = (address)r->metadata_addr();
1420                 break;
1421               }
1422             }
1423             assert(addr != nullptr, "metadata relocation must exist");
1424             copy_buff -= *byte_count;
1425             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1426             n_copy2->set_pc_relative_offset(addr, instr_pc);
1427           }
1428 #endif
1429 
1430           for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1431             address ptr = copy_buff + i;
1432             int a_byte = (*ptr) & 0xFF;
1433             address dst = instr_pc + i;
1434             *(unsigned char*)dst = (unsigned char) a_byte;
1435           }
1436           ICache::invalidate_range(instr_pc, *byte_count);
1437           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1438 
1439           if (load_klass_or_mirror_patch_id ||
1440               stub_id == StubId::c1_load_appendix_patching_id) {
1441             relocInfo::relocType rtype =
1442               (stub_id == StubId::c1_load_klass_patching_id) ?
1443                                    relocInfo::metadata_type :
1444                                    relocInfo::oop_type;
1445             // update relocInfo to metadata
1446             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1447             assert(nm != nullptr, "invalid nmethod_pc");
1448 
1449             // The old patch site is now a move instruction so update
1450             // the reloc info so that it will get updated during
1451             // future GCs.
1452             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1453             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1454                                                      relocInfo::none, rtype);
1455           }
1456 
1457         } else {
1458           ICache::invalidate_range(copy_buff, *byte_count);
1459           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1460         }
1461       }
1462     }
1463     // If we are patching in a non-perm oop, make sure the nmethod
1464     // is on the right list.
1465     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1466     guarantee(nm != nullptr, "only nmethods can contain non-perm oops");
1467 
1468     // Since we've patched some oops in the nmethod,
1469     // (re)register it with the heap.
1470     Universe::heap()->register_nmethod(nm);
1471   }
1472 JRT_END
1473 
1474 #else // DEOPTIMIZE_WHEN_PATCHING
1475 
1476 static bool is_patching_needed(JavaThread* current, StubId stub_id) {
1477   if (stub_id == StubId::c1_load_klass_patching_id ||
1478       stub_id == StubId::c1_load_mirror_patching_id) {
1479     // last java frame on stack
1480     vframeStream vfst(current, true);
1481     assert(!vfst.at_end(), "Java frame must exist");
1482 
1483     methodHandle caller_method(current, vfst.method());
1484     int bci = vfst.bci();
1485     Bytecodes::Code code = caller_method()->java_code_at(bci);
1486 
1487     switch (code) {
1488       case Bytecodes::_new:
1489       case Bytecodes::_anewarray:
1490       case Bytecodes::_multianewarray:
1491       case Bytecodes::_instanceof:
1492       case Bytecodes::_checkcast: {
1493         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1494         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1495         if (tag.is_unresolved_klass_in_error()) {
1496           return false; // throws resolution error
1497         }
1498         break;
1499       }
1500 
1501       default: break;
1502     }
1503   }
1504   return true;
1505 }
1506 
1507 void Runtime1::patch_code(JavaThread* current, StubId stub_id) {
1508 #ifndef PRODUCT
1509   if (PrintC1Statistics) {
1510     _patch_code_slowcase_cnt++;
1511   }
1512 #endif
1513 
1514   // Enable WXWrite: the function is called by c1 stub as a runtime function
1515   // (see another implementation above).
1516   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1517 
1518   if (TracePatching) {
1519     tty->print_cr("Deoptimizing because patch is needed");
1520   }
1521 
1522   RegisterMap reg_map(current,
1523                       RegisterMap::UpdateMap::skip,
1524                       RegisterMap::ProcessFrames::include,
1525                       RegisterMap::WalkContinuation::skip);
1526 
1527   frame runtime_frame = current->last_frame();
1528   frame caller_frame = runtime_frame.sender(&reg_map);
1529   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1530 
1531   if (is_patching_needed(current, stub_id)) {
1532     // Make sure the nmethod is invalidated, i.e. made not entrant.
1533     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1534     if (nm != nullptr) {
1535       nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING);
1536     }
1537   }
1538 
1539   Deoptimization::deoptimize_frame(current, caller_frame.id());
1540   // Return to the now deoptimized frame.
1541   postcond(caller_is_deopted(current));
1542 }
1543 
1544 #endif // DEOPTIMIZE_WHEN_PATCHING
1545 
1546 // Entry point for compiled code. We want to patch a nmethod.
1547 // We don't do a normal VM transition here because we want to
1548 // know after the patching is complete and any safepoint(s) are taken
1549 // if the calling nmethod was deoptimized. We do this by calling a
1550 // helper method which does the normal VM transition and when it
1551 // completes we can check for deoptimization. This simplifies the
1552 // assembly code in the cpu directories.
1553 //
1554 int Runtime1::move_klass_patching(JavaThread* current) {
1555 //
1556 // NOTE: we are still in Java
1557 //
1558   DEBUG_ONLY(NoHandleMark nhm;)
1559   {
1560     // Enter VM mode
1561     ResetNoHandleMark rnhm;
1562     patch_code(current, StubId::c1_load_klass_patching_id);
1563   }
1564   // Back in JAVA, use no oops DON'T safepoint
1565 
1566   // Return true if calling code is deoptimized
1567 
1568   return caller_is_deopted(current);
1569 }
1570 
1571 int Runtime1::move_mirror_patching(JavaThread* current) {
1572 //
1573 // NOTE: we are still in Java
1574 //
1575   DEBUG_ONLY(NoHandleMark nhm;)
1576   {
1577     // Enter VM mode
1578     ResetNoHandleMark rnhm;
1579     patch_code(current, StubId::c1_load_mirror_patching_id);
1580   }
1581   // Back in JAVA, use no oops DON'T safepoint
1582 
1583   // Return true if calling code is deoptimized
1584 
1585   return caller_is_deopted(current);
1586 }
1587 
1588 int Runtime1::move_appendix_patching(JavaThread* current) {
1589 //
1590 // NOTE: we are still in Java
1591 //
1592   DEBUG_ONLY(NoHandleMark nhm;)
1593   {
1594     // Enter VM mode
1595     ResetNoHandleMark rnhm;
1596     patch_code(current, StubId::c1_load_appendix_patching_id);
1597   }
1598   // Back in JAVA, use no oops DON'T safepoint
1599 
1600   // Return true if calling code is deoptimized
1601 
1602   return caller_is_deopted(current);
1603 }
1604 
1605 // Entry point for compiled code. We want to patch a nmethod.
1606 // We don't do a normal VM transition here because we want to
1607 // know after the patching is complete and any safepoint(s) are taken
1608 // if the calling nmethod was deoptimized. We do this by calling a
1609 // helper method which does the normal VM transition and when it
1610 // completes we can check for deoptimization. This simplifies the
1611 // assembly code in the cpu directories.
1612 //
1613 int Runtime1::access_field_patching(JavaThread* current) {
1614   //
1615   // NOTE: we are still in Java
1616   //
1617   // Handles created in this function will be deleted by the
1618   // HandleMarkCleaner in the transition to the VM.
1619   NoHandleMark nhm;
1620   {
1621     // Enter VM mode
1622     ResetNoHandleMark rnhm;
1623     patch_code(current, StubId::c1_access_field_patching_id);
1624   }
1625   // Back in JAVA, use no oops DON'T safepoint
1626 
1627   // Return true if calling code is deoptimized
1628 
1629   return caller_is_deopted(current);
1630 }
1631 
1632 
1633 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1634   // for now we just print out the block id
1635   tty->print("%d ", block_id);
1636 JRT_END
1637 
1638 
1639 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1640   // had to return int instead of bool, otherwise there may be a mismatch
1641   // between the C calling convention and the Java one.
1642   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1643   // JVM takes the whole %eax as the return value, which may misinterpret
1644   // the return value as a boolean true.
1645 
1646   assert(mirror != nullptr, "should null-check on mirror before calling");
1647   Klass* k = java_lang_Class::as_Klass(mirror);
1648   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1649 JRT_END
1650 
1651 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1652   ResourceMark rm;
1653 
1654   RegisterMap reg_map(current,
1655                       RegisterMap::UpdateMap::skip,
1656                       RegisterMap::ProcessFrames::include,
1657                       RegisterMap::WalkContinuation::skip);
1658   frame runtime_frame = current->last_frame();
1659   frame caller_frame = runtime_frame.sender(&reg_map);
1660 
1661   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1662   assert (nm != nullptr, "no more nmethod?");
1663   nm->make_not_entrant(nmethod::InvalidationReason::C1_PREDICATE_FAILED_TRAP);
1664 
1665   methodHandle m(current, nm->method());
1666   MethodData* mdo = m->method_data();
1667 
1668   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1669     // Build an MDO.  Ignore errors like OutOfMemory;
1670     // that simply means we won't have an MDO to update.
1671     Method::build_profiling_method_data(m, THREAD);
1672     if (HAS_PENDING_EXCEPTION) {
1673       // Only metaspace OOM is expected. No Java code executed.
1674       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1675       CLEAR_PENDING_EXCEPTION;
1676     }
1677     mdo = m->method_data();
1678   }
1679 
1680   if (mdo != nullptr) {
1681     mdo->inc_trap_count(Deoptimization::Reason_none);
1682   }
1683 
1684   if (TracePredicateFailedTraps) {
1685     stringStream ss1, ss2;
1686     vframeStream vfst(current);
1687     Method* inlinee = vfst.method();
1688     inlinee->print_short_name(&ss1);
1689     m->print_short_name(&ss2);
1690     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc()));
1691   }
1692 
1693 
1694   Deoptimization::deoptimize_frame(current, caller_frame.id());
1695 
1696 JRT_END
1697 
1698 // Check exception if AbortVMOnException flag set
1699 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1700   ResourceMark rm;
1701   const char* message = nullptr;
1702   if (ex->is_a(vmClasses::Throwable_klass())) {
1703     oop msg = java_lang_Throwable::message(ex);
1704     if (msg != nullptr) {
1705       message = java_lang_String::as_utf8_string(msg);
1706     }
1707   }
1708   Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1709 JRT_END
1710 
1711 #ifndef PRODUCT
1712 void Runtime1::print_statistics() {
1713   tty->print_cr("C1 Runtime statistics:");
1714   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1715   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1716   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1717   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1718   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1719   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1720   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1721   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1722   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1723   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1724   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1725   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1726   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1727   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1728 
1729   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1730   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1731   tty->print_cr(" _new_null_free_array_slowcase_cnt: %u", _new_null_free_array_slowcase_cnt);
1732   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1733   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1734   tty->print_cr(" _load_flat_array_slowcase_cnt:   %u", _load_flat_array_slowcase_cnt);
1735   tty->print_cr(" _store_flat_array_slowcase_cnt:  %u", _store_flat_array_slowcase_cnt);
1736   tty->print_cr(" _substitutability_check_slowcase_cnt: %u", _substitutability_check_slowcase_cnt);
1737   tty->print_cr(" _buffer_inline_args_slowcase_cnt:%u", _buffer_inline_args_slowcase_cnt);
1738   tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%u", _buffer_inline_args_no_receiver_slowcase_cnt);
1739 
1740   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1741   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1742   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1743 
1744   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1745   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1746   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1747   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1748   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1749   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1750   tty->print_cr(" _throw_illegal_monitor_state_exception_count:  %u:", _throw_illegal_monitor_state_exception_count);
1751   tty->print_cr(" _throw_identity_exception_count:               %u:", _throw_identity_exception_count);
1752   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1753 
1754   SharedRuntime::print_ic_miss_histogram();
1755   tty->cr();
1756 }
1757 #endif // PRODUCT