1 /*
   2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/codeBuffer.hpp"
  26 #include "c1/c1_CodeStubs.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_LIRAssembler.hpp"
  29 #include "c1/c1_MacroAssembler.hpp"
  30 #include "c1/c1_Runtime1.hpp"
  31 #include "classfile/javaClasses.inline.hpp"
  32 #include "classfile/vmClasses.hpp"
  33 #include "classfile/vmSymbols.hpp"
  34 #include "code/codeBlob.hpp"
  35 #include "code/compiledIC.hpp"
  36 #include "code/scopeDesc.hpp"
  37 #include "code/vtableStubs.hpp"
  38 #include "compiler/compilationPolicy.hpp"
  39 #include "compiler/disassembler.hpp"
  40 #include "compiler/oopMap.hpp"
  41 #include "gc/shared/barrierSet.hpp"
  42 #include "gc/shared/c1/barrierSetC1.hpp"
  43 #include "gc/shared/collectedHeap.hpp"
  44 #include "interpreter/bytecode.hpp"
  45 #include "interpreter/interpreter.hpp"
  46 #include "jfr/support/jfrIntrinsics.hpp"
  47 #include "logging/log.hpp"
  48 #include "memory/oopFactory.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "memory/universe.hpp"
  51 #include "oops/access.inline.hpp"
  52 #include "oops/flatArrayKlass.hpp"
  53 #include "oops/flatArrayOop.inline.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/objArrayOop.inline.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/fieldDescriptor.inline.hpp"
  60 #include "runtime/frame.inline.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/interfaceSupport.inline.hpp"
  63 #include "runtime/javaCalls.hpp"
  64 #include "runtime/sharedRuntime.hpp"
  65 #include "runtime/stackWatermarkSet.hpp"
  66 #include "runtime/stubRoutines.hpp"
  67 #include "runtime/vframe.inline.hpp"
  68 #include "runtime/vframeArray.hpp"
  69 #include "runtime/vm_version.hpp"
  70 #include "utilities/copy.hpp"
  71 #include "utilities/events.hpp"
  72 
  73 
  74 // Implementation of StubAssembler
  75 
  76 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  77   _name = name;
  78   _must_gc_arguments = false;
  79   _frame_size = no_frame_size;
  80   _num_rt_args = 0;
  81   _stub_id = stub_id;
  82 }
  83 
  84 
  85 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  86   _name = name;
  87   _must_gc_arguments = must_gc_arguments;
  88 }
  89 
  90 
  91 void StubAssembler::set_frame_size(int size) {
  92   if (_frame_size == no_frame_size) {
  93     _frame_size = size;
  94   }
  95   assert(_frame_size == size, "can't change the frame size");
  96 }
  97 
  98 
  99 void StubAssembler::set_num_rt_args(int args) {
 100   if (_num_rt_args == 0) {
 101     _num_rt_args = args;
 102   }
 103   assert(_num_rt_args == args, "can't change the number of args");
 104 }
 105 
 106 // Implementation of Runtime1
 107 
 108 CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS];
 109 
 110 #define C1_BLOB_NAME_DEFINE(name)  "C1 Runtime " # name "_blob",
 111 const char *Runtime1::_blob_names[] = {
 112   C1_STUBS_DO(C1_BLOB_NAME_DEFINE)
 113 };
 114 #undef C1_STUB_NAME_DEFINE
 115 
 116 #ifndef PRODUCT
 117 // statistics
 118 uint Runtime1::_generic_arraycopystub_cnt = 0;
 119 uint Runtime1::_arraycopy_slowcase_cnt = 0;
 120 uint Runtime1::_arraycopy_checkcast_cnt = 0;
 121 uint Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 122 uint Runtime1::_new_type_array_slowcase_cnt = 0;
 123 uint Runtime1::_new_object_array_slowcase_cnt = 0;
 124 uint Runtime1::_new_null_free_array_slowcase_cnt = 0;
 125 uint Runtime1::_new_instance_slowcase_cnt = 0;
 126 uint Runtime1::_new_multi_array_slowcase_cnt = 0;
 127 uint Runtime1::_load_flat_array_slowcase_cnt = 0;
 128 uint Runtime1::_store_flat_array_slowcase_cnt = 0;
 129 uint Runtime1::_substitutability_check_slowcase_cnt = 0;
 130 uint Runtime1::_buffer_inline_args_slowcase_cnt = 0;
 131 uint Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
 132 uint Runtime1::_monitorenter_slowcase_cnt = 0;
 133 uint Runtime1::_monitorexit_slowcase_cnt = 0;
 134 uint Runtime1::_patch_code_slowcase_cnt = 0;
 135 uint Runtime1::_throw_range_check_exception_count = 0;
 136 uint Runtime1::_throw_index_exception_count = 0;
 137 uint Runtime1::_throw_div0_exception_count = 0;
 138 uint Runtime1::_throw_null_pointer_exception_count = 0;
 139 uint Runtime1::_throw_class_cast_exception_count = 0;
 140 uint Runtime1::_throw_incompatible_class_change_error_count = 0;
 141 uint Runtime1::_throw_illegal_monitor_state_exception_count = 0;
 142 uint Runtime1::_throw_identity_exception_count = 0;
 143 uint Runtime1::_throw_count = 0;
 144 
 145 static uint _byte_arraycopy_stub_cnt = 0;
 146 static uint _short_arraycopy_stub_cnt = 0;
 147 static uint _int_arraycopy_stub_cnt = 0;
 148 static uint _long_arraycopy_stub_cnt = 0;
 149 static uint _oop_arraycopy_stub_cnt = 0;
 150 
 151 address Runtime1::arraycopy_count_address(BasicType type) {
 152   switch (type) {
 153   case T_BOOLEAN:
 154   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 155   case T_CHAR:
 156   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 157   case T_FLOAT:
 158   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 159   case T_DOUBLE:
 160   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 161   case T_ARRAY:
 162   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
 163   default:
 164     ShouldNotReachHere();
 165     return nullptr;
 166   }
 167 }
 168 
 169 
 170 #endif
 171 
 172 // Simple helper to see if the caller of a runtime stub which
 173 // entered the VM has been deoptimized
 174 
 175 static bool caller_is_deopted(JavaThread* current) {
 176   RegisterMap reg_map(current,
 177                       RegisterMap::UpdateMap::skip,
 178                       RegisterMap::ProcessFrames::include,
 179                       RegisterMap::WalkContinuation::skip);
 180   frame runtime_frame = current->last_frame();
 181   frame caller_frame = runtime_frame.sender(&reg_map);
 182   assert(caller_frame.is_compiled_frame(), "must be compiled");
 183   return caller_frame.is_deoptimized_frame();
 184 }
 185 
 186 // Stress deoptimization
 187 static void deopt_caller(JavaThread* current) {
 188   if (!caller_is_deopted(current)) {
 189     RegisterMap reg_map(current,
 190                         RegisterMap::UpdateMap::skip,
 191                         RegisterMap::ProcessFrames::include,
 192                         RegisterMap::WalkContinuation::skip);
 193     frame runtime_frame = current->last_frame();
 194     frame caller_frame = runtime_frame.sender(&reg_map);
 195     Deoptimization::deoptimize_frame(current, caller_frame.id());
 196     assert(caller_is_deopted(current), "Must be deoptimized");
 197   }
 198 }
 199 
 200 class C1StubIdStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
 201  private:
 202   C1StubId _id;
 203  public:
 204   C1StubIdStubAssemblerCodeGenClosure(C1StubId id) : _id(id) {}
 205   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 206     return Runtime1::generate_code_for(_id, sasm);
 207   }
 208 };
 209 
 210 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
 211   ResourceMark rm;
 212   // create code buffer for code storage
 213   CodeBuffer code(buffer_blob);
 214 
 215   OopMapSet* oop_maps;
 216   int frame_size;
 217   bool must_gc_arguments;
 218 
 219   Compilation::setup_code_buffer(&code, 0);
 220 
 221   // create assembler for code generation
 222   StubAssembler* sasm = new StubAssembler(&code, name, (int)id);
 223   // generate code for runtime stub
 224   oop_maps = cl->generate_code(sasm);
 225   assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size,
 226          "if stub has an oop map it must have a valid frame size");
 227   assert(!expect_oop_map || oop_maps != nullptr, "must have an oopmap");
 228 
 229   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 230   sasm->align(BytesPerWord);
 231   // make sure all code is in code buffer
 232   sasm->flush();
 233 
 234   frame_size = sasm->frame_size();
 235   must_gc_arguments = sasm->must_gc_arguments();
 236   // create blob - distinguish a few special cases
 237   CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
 238                                                  &code,
 239                                                  CodeOffsets::frame_never_safe,
 240                                                  frame_size,
 241                                                  oop_maps,
 242                                                  must_gc_arguments,
 243                                                  false /* alloc_fail_is_fatal */ );
 244   return blob;
 245 }
 246 
 247 bool Runtime1::generate_blob_for(BufferBlob* buffer_blob, C1StubId id) {
 248   assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id");
 249   bool expect_oop_map = true;
 250 #ifdef ASSERT
 251   // Make sure that stubs that need oopmaps have them
 252   switch (id) {
 253     // These stubs don't need to have an oopmap
 254   case C1StubId::dtrace_object_alloc_id:
 255   case C1StubId::slow_subtype_check_id:
 256   case C1StubId::fpu2long_stub_id:
 257   case C1StubId::unwind_exception_id:
 258   case C1StubId::counter_overflow_id:
 259   case C1StubId::is_instance_of_id:
 260     expect_oop_map = false;
 261     break;
 262   default:
 263     break;
 264   }
 265 #endif
 266   C1StubIdStubAssemblerCodeGenClosure cl(id);
 267   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 268   // install blob
 269   _blobs[(int)id] = blob;
 270   return blob != nullptr;
 271 }
 272 
 273 bool Runtime1::initialize(BufferBlob* blob) {
 274   // platform-dependent initialization
 275   initialize_pd();
 276   // generate stubs
 277   int limit = (int)C1StubId::NUM_STUBIDS;
 278   for (int id = 0; id < limit; id++) {
 279     if (!generate_blob_for(blob, (C1StubId) id)) {
 280       return false;
 281     }
 282   }
 283   // printing
 284 #ifndef PRODUCT
 285   if (PrintSimpleStubs) {
 286     ResourceMark rm;
 287     for (int id = 0; id < limit; id++) {
 288       _blobs[id]->print();
 289       if (_blobs[id]->oop_maps() != nullptr) {
 290         _blobs[id]->oop_maps()->print();
 291       }
 292     }
 293   }
 294 #endif
 295   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 296   return bs->generate_c1_runtime_stubs(blob);
 297 }
 298 
 299 CodeBlob* Runtime1::blob_for(C1StubId id) {
 300   assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id");
 301   return _blobs[(int)id];
 302 }
 303 
 304 
 305 const char* Runtime1::name_for(C1StubId id) {
 306   assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id");
 307   return _blob_names[(int)id];
 308 }
 309 
 310 const char* Runtime1::name_for_address(address entry) {
 311   int limit = (int)C1StubId::NUM_STUBIDS;
 312   for (int i = 0; i < limit; i++) {
 313     C1StubId id = (C1StubId)i;
 314     if (entry == entry_for(id)) return name_for(id);
 315   }
 316 
 317 #define FUNCTION_CASE(a, f) \
 318   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 319 
 320   FUNCTION_CASE(entry, os::javaTimeMillis);
 321   FUNCTION_CASE(entry, os::javaTimeNanos);
 322   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
 323   FUNCTION_CASE(entry, SharedRuntime::d2f);
 324   FUNCTION_CASE(entry, SharedRuntime::d2i);
 325   FUNCTION_CASE(entry, SharedRuntime::d2l);
 326   FUNCTION_CASE(entry, SharedRuntime::dcos);
 327   FUNCTION_CASE(entry, SharedRuntime::dexp);
 328   FUNCTION_CASE(entry, SharedRuntime::dlog);
 329   FUNCTION_CASE(entry, SharedRuntime::dlog10);
 330   FUNCTION_CASE(entry, SharedRuntime::dpow);
 331   FUNCTION_CASE(entry, SharedRuntime::drem);
 332   FUNCTION_CASE(entry, SharedRuntime::dsin);
 333   FUNCTION_CASE(entry, SharedRuntime::dtan);
 334   FUNCTION_CASE(entry, SharedRuntime::f2i);
 335   FUNCTION_CASE(entry, SharedRuntime::f2l);
 336   FUNCTION_CASE(entry, SharedRuntime::frem);
 337   FUNCTION_CASE(entry, SharedRuntime::l2d);
 338   FUNCTION_CASE(entry, SharedRuntime::l2f);
 339   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 340   FUNCTION_CASE(entry, SharedRuntime::lmul);
 341   FUNCTION_CASE(entry, SharedRuntime::lrem);
 342   FUNCTION_CASE(entry, SharedRuntime::lrem);
 343   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 344   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 345   FUNCTION_CASE(entry, is_instance_of);
 346   FUNCTION_CASE(entry, trace_block_entry);
 347 #ifdef JFR_HAVE_INTRINSICS
 348   FUNCTION_CASE(entry, JfrTime::time_function());
 349 #endif
 350   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 351   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 352   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 353   FUNCTION_CASE(entry, StubRoutines::dexp());
 354   FUNCTION_CASE(entry, StubRoutines::dlog());
 355   FUNCTION_CASE(entry, StubRoutines::dlog10());
 356   FUNCTION_CASE(entry, StubRoutines::dpow());
 357   FUNCTION_CASE(entry, StubRoutines::dsin());
 358   FUNCTION_CASE(entry, StubRoutines::dcos());
 359   FUNCTION_CASE(entry, StubRoutines::dtan());
 360   FUNCTION_CASE(entry, StubRoutines::dtanh());
 361 
 362 #undef FUNCTION_CASE
 363 
 364   // Soft float adds more runtime names.
 365   return pd_name_for_address(entry);
 366 }
 367 
 368 static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {
 369 #ifndef PRODUCT
 370   if (PrintC1Statistics) {
 371     Runtime1::_new_instance_slowcase_cnt++;
 372   }
 373 #endif
 374   assert(klass->is_klass(), "not a class");
 375   Handle holder(current, klass->klass_holder()); // keep the klass alive
 376   InstanceKlass* h = InstanceKlass::cast(klass);
 377   h->check_valid_for_instantiation(true, CHECK);
 378   // make sure klass is initialized
 379   h->initialize(CHECK);
 380   // allocate instance and return via TLS
 381   oop obj = h->allocate_instance(CHECK);
 382   current->set_vm_result(obj);
 383 JRT_END
 384 
 385 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 386   allocate_instance(current, klass, CHECK);
 387 JRT_END
 388 
 389 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 390 #ifndef PRODUCT
 391   if (PrintC1Statistics) {
 392     _new_type_array_slowcase_cnt++;
 393   }
 394 #endif
 395   // Note: no handle for klass needed since they are not used
 396   //       anymore after new_typeArray() and no GC can happen before.
 397   //       (This may have to change if this code changes!)
 398   assert(klass->is_klass(), "not a class");
 399   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 400   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 401   current->set_vm_result(obj);
 402   // This is pretty rare but this runtime patch is stressful to deoptimization
 403   // if we deoptimize here so force a deopt to stress the path.
 404   if (DeoptimizeALot) {
 405     deopt_caller(current);
 406   }
 407 
 408 JRT_END
 409 
 410 
 411 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 412 #ifndef PRODUCT
 413   if (PrintC1Statistics) {
 414     _new_object_array_slowcase_cnt++;
 415   }
 416 #endif
 417   // Note: no handle for klass needed since they are not used
 418   //       anymore after new_objArray() and no GC can happen before.
 419   //       (This may have to change if this code changes!)
 420   assert(array_klass->is_klass(), "not a class");
 421   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 422   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 423   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 424   current->set_vm_result(obj);
 425   // This is pretty rare but this runtime patch is stressful to deoptimization
 426   // if we deoptimize here so force a deopt to stress the path.
 427   if (DeoptimizeALot) {
 428     deopt_caller(current);
 429   }
 430 JRT_END
 431 
 432 
 433 JRT_ENTRY(void, Runtime1::new_null_free_array(JavaThread* current, Klass* array_klass, jint length))
 434   NOT_PRODUCT(_new_null_free_array_slowcase_cnt++;)
 435   // TODO 8350865 This is dead code since 8325660 because null-free arrays can only be created via the factory methods that are not yet implemented in C1. Should probably be fixed by 8265122.
 436 
 437   // Note: no handle for klass needed since they are not used
 438   //       anymore after new_objArray() and no GC can happen before.
 439   //       (This may have to change if this code changes!)
 440   assert(array_klass->is_klass(), "not a class");
 441   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 442   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 443   assert(elem_klass->is_inline_klass(), "must be");
 444   InlineKlass* vk = InlineKlass::cast(elem_klass);
 445   // Logically creates elements, ensure klass init
 446   elem_klass->initialize(CHECK);
 447   arrayOop obj= nullptr;
 448   if (UseArrayFlattening && vk->has_non_atomic_layout()) {
 449     obj = oopFactory::new_flatArray(elem_klass, length, LayoutKind::NON_ATOMIC_FLAT, CHECK);
 450   } else {
 451     obj = oopFactory::new_null_free_objArray(elem_klass, length, CHECK);
 452   }
 453   current->set_vm_result(obj);
 454   // This is pretty rare but this runtime patch is stressful to deoptimization
 455   // if we deoptimize here so force a deopt to stress the path.
 456   if (DeoptimizeALot) {
 457     deopt_caller(current);
 458   }
 459 JRT_END
 460 
 461 
 462 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 463 #ifndef PRODUCT
 464   if (PrintC1Statistics) {
 465     _new_multi_array_slowcase_cnt++;
 466   }
 467 #endif
 468   assert(klass->is_klass(), "not a class");
 469   assert(rank >= 1, "rank must be nonzero");
 470   Handle holder(current, klass->klass_holder()); // keep the klass alive
 471   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 472   current->set_vm_result(obj);
 473 JRT_END
 474 
 475 
 476 static void profile_flat_array(JavaThread* current, bool load, bool null_free) {
 477   ResourceMark rm(current);
 478   vframeStream vfst(current, true);
 479   assert(!vfst.at_end(), "Java frame must exist");
 480   // Check if array access profiling is enabled
 481   if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
 482     return;
 483   }
 484   int bci = vfst.bci();
 485   Method* method = vfst.method();
 486   MethodData* md = method->method_data();
 487   if (md != nullptr) {
 488     // Lock to access ProfileData, and ensure lock is not broken by a safepoint
 489     MutexLocker ml(md->extra_data_lock(), Mutex::_no_safepoint_check_flag);
 490 
 491     ProfileData* data = md->bci_to_data(bci);
 492     assert(data != nullptr, "incorrect profiling entry");
 493     if (data->is_ArrayLoadData()) {
 494       assert(load, "should be an array load");
 495       ArrayLoadData* load_data = (ArrayLoadData*) data;
 496       load_data->set_flat_array();
 497       if (null_free) {
 498         load_data->set_null_free_array();
 499       }
 500     } else {
 501       assert(data->is_ArrayStoreData(), "");
 502       assert(!load, "should be an array store");
 503       ArrayStoreData* store_data = (ArrayStoreData*) data;
 504       store_data->set_flat_array();
 505       if (null_free) {
 506         store_data->set_null_free_array();
 507       }
 508     }
 509   }
 510 }
 511 
 512 JRT_ENTRY(void, Runtime1::load_flat_array(JavaThread* current, flatArrayOopDesc* array, int index))
 513   assert(array->klass()->is_flatArray_klass(), "should not be called");
 514   profile_flat_array(current, true, array->is_null_free_array());
 515 
 516   NOT_PRODUCT(_load_flat_array_slowcase_cnt++;)
 517   assert(array->length() > 0 && index < array->length(), "already checked");
 518   flatArrayHandle vah(current, array);
 519   oop obj = array->read_value_from_flat_array(index, CHECK);
 520   current->set_vm_result(obj);
 521 JRT_END
 522 
 523 JRT_ENTRY(void, Runtime1::store_flat_array(JavaThread* current, flatArrayOopDesc* array, int index, oopDesc* value))
 524   // TOOD 8350865 We can call here with a non-flat array because of LIR_Assembler::emit_opFlattenedArrayCheck
 525   if (array->klass()->is_flatArray_klass()) {
 526     profile_flat_array(current, false, array->is_null_free_array());
 527   }
 528 
 529   NOT_PRODUCT(_store_flat_array_slowcase_cnt++;)
 530   if (value == nullptr && array->is_null_free_array()) {
 531     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 532   } else {
 533     assert(array->klass()->is_flatArray_klass(), "should not be called");
 534     array->write_value_to_flat_array(value, index, CHECK);
 535   }
 536 JRT_END
 537 
 538 JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
 539   NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
 540   JavaCallArguments args;
 541   args.push_oop(Handle(THREAD, left));
 542   args.push_oop(Handle(THREAD, right));
 543   JavaValue result(T_BOOLEAN);
 544   JavaCalls::call_static(&result,
 545                          vmClasses::ValueObjectMethods_klass(),
 546                          vmSymbols::isSubstitutable_name(),
 547                          vmSymbols::object_object_boolean_signature(),
 548                          &args, CHECK_0);
 549   return result.get_jboolean() ? 1 : 0;
 550 JRT_END
 551 
 552 
 553 extern "C" void ps();
 554 
 555 void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
 556   JavaThread* THREAD = current;
 557   methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
 558   oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
 559   current->set_vm_result(obj);
 560 }
 561 
 562 JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
 563   NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
 564   buffer_inline_args_impl(current, method, true);
 565 JRT_END
 566 
 567 JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
 568   NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
 569   buffer_inline_args_impl(current, method, false);
 570 JRT_END
 571 
 572 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id))
 573   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 574 JRT_END
 575 
 576 
 577 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 578   ResourceMark rm(current);
 579   const char* klass_name = obj->klass()->external_name();
 580   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 581 JRT_END
 582 
 583 
 584 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 585 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 586 // method) method is passed as an argument. In order to do that it is embedded in the code as
 587 // a constant.
 588 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 589   nmethod* osr_nm = nullptr;
 590   methodHandle method(current, m);
 591 
 592   RegisterMap map(current,
 593                   RegisterMap::UpdateMap::skip,
 594                   RegisterMap::ProcessFrames::include,
 595                   RegisterMap::WalkContinuation::skip);
 596   frame fr =  current->last_frame().sender(&map);
 597   nmethod* nm = (nmethod*) fr.cb();
 598   assert(nm!= nullptr && nm->is_nmethod(), "Sanity check");
 599   methodHandle enclosing_method(current, nm->method());
 600 
 601   CompLevel level = (CompLevel)nm->comp_level();
 602   int bci = InvocationEntryBci;
 603   if (branch_bci != InvocationEntryBci) {
 604     // Compute destination bci
 605     address pc = method()->code_base() + branch_bci;
 606     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
 607     int offset = 0;
 608     switch (branch) {
 609       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 610       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 611       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 612       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 613       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 614       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 615       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 616         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 617         break;
 618       case Bytecodes::_goto_w:
 619         offset = Bytes::get_Java_u4(pc + 1);
 620         break;
 621       default: ;
 622     }
 623     bci = branch_bci + offset;
 624   }
 625   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 626   return osr_nm;
 627 }
 628 
 629 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 630   nmethod* osr_nm;
 631   JRT_BLOCK
 632     osr_nm = counter_overflow_helper(current, bci, method);
 633     if (osr_nm != nullptr) {
 634       RegisterMap map(current,
 635                       RegisterMap::UpdateMap::skip,
 636                       RegisterMap::ProcessFrames::include,
 637                       RegisterMap::WalkContinuation::skip);
 638       frame fr =  current->last_frame().sender(&map);
 639       Deoptimization::deoptimize_frame(current, fr.id());
 640     }
 641   JRT_BLOCK_END
 642   return nullptr;
 643 JRT_END
 644 
 645 extern void vm_exit(int code);
 646 
 647 // Enter this method from compiled code handler below. This is where we transition
 648 // to VM mode. This is done as a helper routine so that the method called directly
 649 // from compiled code does not have to transition to VM. This allows the entry
 650 // method to see if the nmethod that we have just looked up a handler for has
 651 // been deoptimized while we were in the vm. This simplifies the assembly code
 652 // cpu directories.
 653 //
 654 // We are entering here from exception stub (via the entry method below)
 655 // If there is a compiled exception handler in this method, we will continue there;
 656 // otherwise we will unwind the stack and continue at the caller of top frame method
 657 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 658 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 659 // check to see if the handler we are going to return is now in a nmethod that has
 660 // been deoptimized. If that is the case we return the deopt blob
 661 // unpack_with_exception entry instead. This makes life for the exception blob easier
 662 // because making that same check and diverting is painful from assembly language.
 663 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 664   // Reset method handle flag.
 665   current->set_is_method_handle_return(false);
 666 
 667   Handle exception(current, ex);
 668 
 669   // This function is called when we are about to throw an exception. Therefore,
 670   // we have to poll the stack watermark barrier to make sure that not yet safe
 671   // stack frames are made safe before returning into them.
 672   if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) {
 673     // The C1StubId::handle_exception_from_callee_id handler is invoked after the
 674     // frame has been unwound. It instead builds its own stub frame, to call the
 675     // runtime. But the throwing frame has already been unwound here.
 676     StackWatermarkSet::after_unwind(current);
 677   }
 678 
 679   nm = CodeCache::find_nmethod(pc);
 680   assert(nm != nullptr, "this is not an nmethod");
 681   // Adjust the pc as needed/
 682   if (nm->is_deopt_pc(pc)) {
 683     RegisterMap map(current,
 684                     RegisterMap::UpdateMap::skip,
 685                     RegisterMap::ProcessFrames::include,
 686                     RegisterMap::WalkContinuation::skip);
 687     frame exception_frame = current->last_frame().sender(&map);
 688     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 689     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 690     pc = exception_frame.pc();
 691   }
 692   assert(exception.not_null(), "null exceptions should be handled by throw_exception");
 693   // Check that exception is a subclass of Throwable
 694   assert(exception->is_a(vmClasses::Throwable_klass()),
 695          "Exception not subclass of Throwable");
 696 
 697   // debugging support
 698   // tracing
 699   if (log_is_enabled(Info, exceptions)) {
 700     ResourceMark rm; // print_value_string
 701     stringStream tempst;
 702     assert(nm->method() != nullptr, "Unexpected null method()");
 703     tempst.print("C1 compiled method <%s>\n"
 704                  " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 705                  nm->method()->print_value_string(), p2i(pc), p2i(current));
 706     Exceptions::log_exception(exception, tempst.freeze());
 707   }
 708   // for AbortVMOnException flag
 709   Exceptions::debug_check_abort(exception);
 710 
 711   // Check the stack guard pages and re-enable them if necessary and there is
 712   // enough space on the stack to do so.  Use fast exceptions only if the guard
 713   // pages are enabled.
 714   bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
 715 
 716   if (JvmtiExport::can_post_on_exceptions()) {
 717     // To ensure correct notification of exception catches and throws
 718     // we have to deoptimize here.  If we attempted to notify the
 719     // catches and throws during this exception lookup it's possible
 720     // we could deoptimize on the way out of the VM and end back in
 721     // the interpreter at the throw site.  This would result in double
 722     // notifications since the interpreter would also notify about
 723     // these same catches and throws as it unwound the frame.
 724 
 725     RegisterMap reg_map(current,
 726                         RegisterMap::UpdateMap::include,
 727                         RegisterMap::ProcessFrames::include,
 728                         RegisterMap::WalkContinuation::skip);
 729     frame stub_frame = current->last_frame();
 730     frame caller_frame = stub_frame.sender(&reg_map);
 731 
 732     // We don't really want to deoptimize the nmethod itself since we
 733     // can actually continue in the exception handler ourselves but I
 734     // don't see an easy way to have the desired effect.
 735     Deoptimization::deoptimize_frame(current, caller_frame.id());
 736     assert(caller_is_deopted(current), "Must be deoptimized");
 737 
 738     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 739   }
 740 
 741   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 742   if (guard_pages_enabled) {
 743     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
 744     if (fast_continuation != nullptr) {
 745       // Set flag if return address is a method handle call site.
 746       current->set_is_method_handle_return(nm->is_method_handle_return(pc));
 747       return fast_continuation;
 748     }
 749   }
 750 
 751   // If the stack guard pages are enabled, check whether there is a handler in
 752   // the current method.  Otherwise (guard pages disabled), force an unwind and
 753   // skip the exception cache update (i.e., just leave continuation as null).
 754   address continuation = nullptr;
 755   if (guard_pages_enabled) {
 756 
 757     // New exception handling mechanism can support inlined methods
 758     // with exception handlers since the mappings are from PC to PC
 759 
 760     // Clear out the exception oop and pc since looking up an
 761     // exception handler can cause class loading, which might throw an
 762     // exception and those fields are expected to be clear during
 763     // normal bytecode execution.
 764     current->clear_exception_oop_and_pc();
 765 
 766     bool recursive_exception = false;
 767     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
 768     // If an exception was thrown during exception dispatch, the exception oop may have changed
 769     current->set_exception_oop(exception());
 770     current->set_exception_pc(pc);
 771 
 772     // the exception cache is used only by non-implicit exceptions
 773     // Update the exception cache only when there didn't happen
 774     // another exception during the computation of the compiled
 775     // exception handler. Checking for exception oop equality is not
 776     // sufficient because some exceptions are pre-allocated and reused.
 777     if (continuation != nullptr && !recursive_exception) {
 778       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
 779     }
 780   }
 781 
 782   current->set_vm_result(exception());
 783   // Set flag if return address is a method handle call site.
 784   current->set_is_method_handle_return(nm->is_method_handle_return(pc));
 785 
 786   if (log_is_enabled(Info, exceptions)) {
 787     ResourceMark rm;
 788     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 789                          " for exception thrown at PC " PTR_FORMAT,
 790                          p2i(current), p2i(continuation), p2i(pc));
 791   }
 792 
 793   return continuation;
 794 JRT_END
 795 
 796 // Enter this method from compiled code only if there is a Java exception handler
 797 // in the method handling the exception.
 798 // We are entering here from exception stub. We don't do a normal VM transition here.
 799 // We do it in a helper. This is so we can check to see if the nmethod we have just
 800 // searched for an exception handler has been deoptimized in the meantime.
 801 address Runtime1::exception_handler_for_pc(JavaThread* current) {
 802   oop exception = current->exception_oop();
 803   address pc = current->exception_pc();
 804   // Still in Java mode
 805   DEBUG_ONLY(NoHandleMark nhm);
 806   nmethod* nm = nullptr;
 807   address continuation = nullptr;
 808   {
 809     // Enter VM mode by calling the helper
 810     ResetNoHandleMark rnhm;
 811     continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
 812   }
 813   // Back in JAVA, use no oops DON'T safepoint
 814 
 815   // Now check to see if the nmethod we were called from is now deoptimized.
 816   // If so we must return to the deopt blob and deoptimize the nmethod
 817   if (nm != nullptr && caller_is_deopted(current)) {
 818     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 819   }
 820 
 821   assert(continuation != nullptr, "no handler found");
 822   return continuation;
 823 }
 824 
 825 
 826 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))
 827 #ifndef PRODUCT
 828   if (PrintC1Statistics) {
 829     _throw_range_check_exception_count++;
 830   }
 831 #endif
 832   const int len = 35;
 833   assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
 834   char message[2 * jintAsStringSize + len];
 835   os::snprintf_checked(message, sizeof(message), "Index %d out of bounds for length %d", index, a->length());
 836   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
 837 JRT_END
 838 
 839 
 840 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))
 841 #ifndef PRODUCT
 842   if (PrintC1Statistics) {
 843     _throw_index_exception_count++;
 844   }
 845 #endif
 846   char message[16];
 847   os::snprintf_checked(message, sizeof(message), "%d", index);
 848   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
 849 JRT_END
 850 
 851 
 852 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))
 853 #ifndef PRODUCT
 854   if (PrintC1Statistics) {
 855     _throw_div0_exception_count++;
 856   }
 857 #endif
 858   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 859 JRT_END
 860 
 861 
 862 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))
 863 #ifndef PRODUCT
 864   if (PrintC1Statistics) {
 865     _throw_null_pointer_exception_count++;
 866   }
 867 #endif
 868   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 869 JRT_END
 870 
 871 
 872 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))
 873 #ifndef PRODUCT
 874   if (PrintC1Statistics) {
 875     _throw_class_cast_exception_count++;
 876   }
 877 #endif
 878   ResourceMark rm(current);
 879   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 880   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 881 JRT_END
 882 
 883 
 884 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 885 #ifndef PRODUCT
 886   if (PrintC1Statistics) {
 887     _throw_incompatible_class_change_error_count++;
 888   }
 889 #endif
 890   ResourceMark rm(current);
 891   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 892 JRT_END
 893 
 894 
 895 JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
 896   NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
 897   ResourceMark rm(current);
 898   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
 899 JRT_END
 900 
 901 JRT_ENTRY(void, Runtime1::throw_identity_exception(JavaThread* current, oopDesc* object))
 902   NOT_PRODUCT(_throw_identity_exception_count++;)
 903   ResourceMark rm(current);
 904   char* message = SharedRuntime::generate_identity_exception_message(current, object->klass());
 905   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IdentityException(), message);
 906 JRT_END
 907 
 908 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 909 #ifndef PRODUCT
 910   if (PrintC1Statistics) {
 911     _monitorenter_slowcase_cnt++;
 912   }
 913 #endif
 914   if (LockingMode == LM_MONITOR) {
 915     lock->set_obj(obj);
 916   }
 917   assert(obj == lock->obj(), "must match");
 918   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 919 JRT_END
 920 
 921 
 922 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 923   assert(current == JavaThread::current(), "pre-condition");
 924 #ifndef PRODUCT
 925   if (PrintC1Statistics) {
 926     _monitorexit_slowcase_cnt++;
 927   }
 928 #endif
 929   assert(current->last_Java_sp(), "last_Java_sp must be set");
 930   oop obj = lock->obj();
 931   assert(oopDesc::is_oop(obj), "must be null or an object");
 932   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 933 JRT_END
 934 
 935 // Cf. OptoRuntime::deoptimize_caller_frame
 936 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 937   // Called from within the owner thread, so no need for safepoint
 938   RegisterMap reg_map(current,
 939                       RegisterMap::UpdateMap::skip,
 940                       RegisterMap::ProcessFrames::include,
 941                       RegisterMap::WalkContinuation::skip);
 942   frame stub_frame = current->last_frame();
 943   assert(stub_frame.is_runtime_frame(), "Sanity check");
 944   frame caller_frame = stub_frame.sender(&reg_map);
 945   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 946   assert(nm != nullptr, "Sanity check");
 947   methodHandle method(current, nm->method());
 948   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 949   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 950   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 951 
 952   if (action == Deoptimization::Action_make_not_entrant) {
 953     if (nm->make_not_entrant("C1 deoptimize")) {
 954       if (reason == Deoptimization::Reason_tenured) {
 955         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 956         if (trap_mdo != nullptr) {
 957           trap_mdo->inc_tenure_traps();
 958         }
 959       }
 960     }
 961   }
 962 
 963   // Deoptimize the caller frame.
 964   Deoptimization::deoptimize_frame(current, caller_frame.id());
 965   // Return to the now deoptimized frame.
 966 JRT_END
 967 
 968 
 969 #ifndef DEOPTIMIZE_WHEN_PATCHING
 970 
 971 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 972   Bytecode_field field_access(caller, bci);
 973   // This can be static or non-static field access
 974   Bytecodes::Code code       = field_access.code();
 975 
 976   // We must load class, initialize class and resolve the field
 977   fieldDescriptor result; // initialize class if needed
 978   constantPoolHandle constants(THREAD, caller->constants());
 979   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
 980   return result.field_holder();
 981 }
 982 
 983 
 984 //
 985 // This routine patches sites where a class wasn't loaded or
 986 // initialized at the time the code was generated.  It handles
 987 // references to classes, fields and forcing of initialization.  Most
 988 // of the cases are straightforward and involving simply forcing
 989 // resolution of a class, rewriting the instruction stream with the
 990 // needed constant and replacing the call in this function with the
 991 // patched code.  The case for static field is more complicated since
 992 // the thread which is in the process of initializing a class can
 993 // access it's static fields but other threads can't so the code
 994 // either has to deoptimize when this case is detected or execute a
 995 // check that the current thread is the initializing thread.  The
 996 // current
 997 //
 998 // Patches basically look like this:
 999 //
1000 //
1001 // patch_site: jmp patch stub     ;; will be patched
1002 // continue:   ...
1003 //             ...
1004 //             ...
1005 //             ...
1006 //
1007 // They have a stub which looks like this:
1008 //
1009 //             ;; patch body
1010 //             movl <const>, reg           (for class constants)
1011 //        <or> movl [reg1 + <const>], reg  (for field offsets)
1012 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
1013 //             <being_init offset> <bytes to copy> <bytes to skip>
1014 // patch_stub: call Runtime1::patch_code (through a runtime stub)
1015 //             jmp patch_site
1016 //
1017 //
1018 // A normal patch is done by rewriting the patch body, usually a move,
1019 // and then copying it into place over top of the jmp instruction
1020 // being careful to flush caches and doing it in an MP-safe way.  The
1021 // constants following the patch body are used to find various pieces
1022 // of the patch relative to the call site for Runtime1::patch_code.
1023 // The case for getstatic and putstatic is more complicated because
1024 // getstatic and putstatic have special semantics when executing while
1025 // the class is being initialized.  getstatic/putstatic on a class
1026 // which is being_initialized may be executed by the initializing
1027 // thread but other threads have to block when they execute it.  This
1028 // is accomplished in compiled code by executing a test of the current
1029 // thread against the initializing thread of the class.  It's emitted
1030 // as boilerplate in their stub which allows the patched code to be
1031 // executed before it's copied back into the main body of the nmethod.
1032 //
1033 // being_init: get_thread(<tmp reg>
1034 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
1035 //             jne patch_stub
1036 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
1037 //             movl reg, [reg1 + <const>]  (for field offsets)
1038 //             jmp continue
1039 //             <being_init offset> <bytes to copy> <bytes to skip>
1040 // patch_stub: jmp Runtime1::patch_code (through a runtime stub)
1041 //             jmp patch_site
1042 //
1043 // If the class is being initialized the patch body is rewritten and
1044 // the patch site is rewritten to jump to being_init, instead of
1045 // patch_stub.  Whenever this code is executed it checks the current
1046 // thread against the initializing thread so other threads will enter
1047 // the runtime and end up blocked waiting the class to finish
1048 // initializing inside the calls to resolve_field below.  The
1049 // initializing class will continue on it's way.  Once the class is
1050 // fully_initialized, the intializing_thread of the class becomes
1051 // null, so the next thread to execute this code will fail the test,
1052 // call into patch_code and complete the patching process by copying
1053 // the patch body back into the main part of the nmethod and resume
1054 // executing.
1055 
1056 // NB:
1057 //
1058 // Patchable instruction sequences inherently exhibit race conditions,
1059 // where thread A is patching an instruction at the same time thread B
1060 // is executing it.  The algorithms we use ensure that any observation
1061 // that B can make on any intermediate states during A's patching will
1062 // always end up with a correct outcome.  This is easiest if there are
1063 // few or no intermediate states.  (Some inline caches have two
1064 // related instructions that must be patched in tandem.  For those,
1065 // intermediate states seem to be unavoidable, but we will get the
1066 // right answer from all possible observation orders.)
1067 //
1068 // When patching the entry instruction at the head of a method, or a
1069 // linkable call instruction inside of a method, we try very hard to
1070 // use a patch sequence which executes as a single memory transaction.
1071 // This means, in practice, that when thread A patches an instruction,
1072 // it should patch a 32-bit or 64-bit word that somehow overlaps the
1073 // instruction or is contained in it.  We believe that memory hardware
1074 // will never break up such a word write, if it is naturally aligned
1075 // for the word being written.  We also know that some CPUs work very
1076 // hard to create atomic updates even of naturally unaligned words,
1077 // but we don't want to bet the farm on this always working.
1078 //
1079 // Therefore, if there is any chance of a race condition, we try to
1080 // patch only naturally aligned words, as single, full-word writes.
1081 
1082 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
1083 #ifndef PRODUCT
1084   if (PrintC1Statistics) {
1085     _patch_code_slowcase_cnt++;
1086   }
1087 #endif
1088 
1089   ResourceMark rm(current);
1090   RegisterMap reg_map(current,
1091                       RegisterMap::UpdateMap::skip,
1092                       RegisterMap::ProcessFrames::include,
1093                       RegisterMap::WalkContinuation::skip);
1094   frame runtime_frame = current->last_frame();
1095   frame caller_frame = runtime_frame.sender(&reg_map);
1096 
1097   // last java frame on stack
1098   vframeStream vfst(current, true);
1099   assert(!vfst.at_end(), "Java frame must exist");
1100 
1101   methodHandle caller_method(current, vfst.method());
1102   // Note that caller_method->code() may not be same as caller_code because of OSR's
1103   // Note also that in the presence of inlining it is not guaranteed
1104   // that caller_method() == caller_code->method()
1105 
1106   int bci = vfst.bci();
1107   Bytecodes::Code code = caller_method()->java_code_at(bci);
1108 
1109   // this is used by assertions in the access_field_patching_id
1110   BasicType patch_field_type = T_ILLEGAL;
1111   bool deoptimize_for_volatile = false;
1112   bool deoptimize_for_atomic = false;
1113   bool deoptimize_for_null_free = false;
1114   bool deoptimize_for_flat = false;
1115   int patch_field_offset = -1;
1116   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
1117   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
1118   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
1119   Handle appendix(current, nullptr); // oop needed by appendix_patching code
1120   bool load_klass_or_mirror_patch_id =
1121     (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id);
1122 
1123   if (stub_id == C1StubId::access_field_patching_id) {
1124 
1125     Bytecode_field field_access(caller_method, bci);
1126     fieldDescriptor result; // initialize class if needed
1127     Bytecodes::Code code = field_access.code();
1128     constantPoolHandle constants(current, caller_method->constants());
1129     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
1130     patch_field_offset = result.offset();
1131 
1132     // If we're patching a field which is volatile then at compile it
1133     // must not have been know to be volatile, so the generated code
1134     // isn't correct for a volatile reference.  The nmethod has to be
1135     // deoptimized so that the code can be regenerated correctly.
1136     // This check is only needed for access_field_patching since this
1137     // is the path for patching field offsets.  load_klass is only
1138     // used for patching references to oops which don't need special
1139     // handling in the volatile case.
1140 
1141     deoptimize_for_volatile = result.access_flags().is_volatile();
1142 
1143     // If we are patching a field which should be atomic, then
1144     // the generated code is not correct either, force deoptimizing.
1145     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1146     // break access atomicity only for them.
1147 
1148     // Strictly speaking, the deoptimization on 64-bit platforms
1149     // is unnecessary, and T_LONG stores on 32-bit platforms need
1150     // to be handled by special patching code when AlwaysAtomicAccesses
1151     // becomes product feature. At this point, we are still going
1152     // for the deoptimization for consistency against volatile
1153     // accesses.
1154 
1155     patch_field_type = result.field_type();
1156     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1157 
1158     // The field we are patching is null-free. Deoptimize and regenerate
1159     // the compiled code if we patch a putfield/putstatic because it
1160     // does not contain the required null check.
1161     deoptimize_for_null_free = result.is_null_free_inline_type() && (field_access.is_putfield() || field_access.is_putstatic());
1162 
1163     // The field we are patching is flat. Deoptimize and regenerate
1164     // the compiled code which can't handle the layout of the flat
1165     // field because it was unknown at compile time.
1166     deoptimize_for_flat = result.is_flat();
1167 
1168   } else if (load_klass_or_mirror_patch_id) {
1169     Klass* k = nullptr;
1170     switch (code) {
1171       case Bytecodes::_putstatic:
1172       case Bytecodes::_getstatic:
1173         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1174           init_klass = klass;
1175           mirror = Handle(current, klass->java_mirror());
1176         }
1177         break;
1178       case Bytecodes::_new:
1179         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1180           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1181         }
1182         break;
1183       case Bytecodes::_multianewarray:
1184         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1185           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1186         }
1187         break;
1188       case Bytecodes::_instanceof:
1189         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
1190           k = caller_method->constants()->klass_at(io.index(), CHECK);
1191         }
1192         break;
1193       case Bytecodes::_checkcast:
1194         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
1195           k = caller_method->constants()->klass_at(cc.index(), CHECK);
1196         }
1197         break;
1198       case Bytecodes::_anewarray:
1199         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
1200           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
1201           k = ek->array_klass(CHECK);
1202         }
1203         break;
1204       case Bytecodes::_ldc:
1205       case Bytecodes::_ldc_w:
1206       case Bytecodes::_ldc2_w:
1207         {
1208           Bytecode_loadconstant cc(caller_method, bci);
1209           oop m = cc.resolve_constant(CHECK);
1210           mirror = Handle(current, m);
1211         }
1212         break;
1213       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
1214     }
1215     load_klass = k;
1216   } else if (stub_id == C1StubId::load_appendix_patching_id) {
1217     Bytecode_invoke bytecode(caller_method, bci);
1218     Bytecodes::Code bc = bytecode.invoke_code();
1219 
1220     CallInfo info;
1221     constantPoolHandle pool(current, caller_method->constants());
1222     int index = bytecode.index();
1223     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1224     switch (bc) {
1225       case Bytecodes::_invokehandle: {
1226         ResolvedMethodEntry* entry = pool->cache()->set_method_handle(index, info);
1227         appendix = Handle(current, pool->cache()->appendix_if_resolved(entry));
1228         break;
1229       }
1230       case Bytecodes::_invokedynamic: {
1231         appendix = Handle(current, pool->cache()->set_dynamic_call(info, index));
1232         break;
1233       }
1234       default: fatal("unexpected bytecode for load_appendix_patching_id");
1235     }
1236   } else {
1237     ShouldNotReachHere();
1238   }
1239 
1240   if (deoptimize_for_volatile || deoptimize_for_atomic || deoptimize_for_null_free || deoptimize_for_flat) {
1241     // At compile time we assumed the field wasn't volatile/atomic but after
1242     // loading it turns out it was volatile/atomic so we have to throw the
1243     // compiled code out and let it be regenerated.
1244     if (TracePatching) {
1245       if (deoptimize_for_volatile) {
1246         tty->print_cr("Deoptimizing for patching volatile field reference");
1247       }
1248       if (deoptimize_for_atomic) {
1249         tty->print_cr("Deoptimizing for patching atomic field reference");
1250       }
1251       if (deoptimize_for_null_free) {
1252         tty->print_cr("Deoptimizing for patching null-free field reference");
1253       }
1254       if (deoptimize_for_flat) {
1255         tty->print_cr("Deoptimizing for patching flat field reference");
1256       }
1257     }
1258 
1259     // It's possible the nmethod was invalidated in the last
1260     // safepoint, but if it's still alive then make it not_entrant.
1261     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1262     if (nm != nullptr) {
1263       nm->make_not_entrant("C1 code patch");
1264     }
1265 
1266     Deoptimization::deoptimize_frame(current, caller_frame.id());
1267 
1268     // Return to the now deoptimized frame.
1269   }
1270 
1271   // Now copy code back
1272 
1273   {
1274     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1275     //
1276     // Deoptimization may have happened while we waited for the lock.
1277     // In that case we don't bother to do any patching we just return
1278     // and let the deopt happen
1279     if (!caller_is_deopted(current)) {
1280       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1281       address instr_pc = jump->jump_destination();
1282       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1283       if (ni->is_jump() ) {
1284         // the jump has not been patched yet
1285         // The jump destination is slow case and therefore not part of the stubs
1286         // (stubs are only for StaticCalls)
1287 
1288         // format of buffer
1289         //    ....
1290         //    instr byte 0     <-- copy_buff
1291         //    instr byte 1
1292         //    ..
1293         //    instr byte n-1
1294         //      n
1295         //    ....             <-- call destination
1296 
1297         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1298         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1299         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1300         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1301         address copy_buff = stub_location - *byte_skip - *byte_count;
1302         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1303         if (TracePatching) {
1304           ttyLocker ttyl;
1305           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
1306                         p2i(instr_pc), (stub_id == C1StubId::access_field_patching_id) ? "field" : "klass");
1307           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1308           assert(caller_code != nullptr, "nmethod not found");
1309 
1310           // NOTE we use pc() not original_pc() because we already know they are
1311           // identical otherwise we'd have never entered this block of code
1312 
1313           const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1314           assert(map != nullptr, "null check");
1315           map->print();
1316           tty->cr();
1317 
1318           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1319         }
1320         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1321         bool do_patch = true;
1322         if (stub_id == C1StubId::access_field_patching_id) {
1323           // The offset may not be correct if the class was not loaded at code generation time.
1324           // Set it now.
1325           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1326           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1327           assert(patch_field_offset >= 0, "illegal offset");
1328           n_move->add_offset_in_bytes(patch_field_offset);
1329         } else if (load_klass_or_mirror_patch_id) {
1330           // If a getstatic or putstatic is referencing a klass which
1331           // isn't fully initialized, the patch body isn't copied into
1332           // place until initialization is complete.  In this case the
1333           // patch site is setup so that any threads besides the
1334           // initializing thread are forced to come into the VM and
1335           // block.
1336           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1337                      InstanceKlass::cast(init_klass)->is_initialized();
1338           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1339           if (jump->jump_destination() == being_initialized_entry) {
1340             assert(do_patch == true, "initialization must be complete at this point");
1341           } else {
1342             // patch the instruction <move reg, klass>
1343             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1344 
1345             assert(n_copy->data() == 0 ||
1346                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
1347                    "illegal init value");
1348             if (stub_id == C1StubId::load_klass_patching_id) {
1349               assert(load_klass != nullptr, "klass not set");
1350               n_copy->set_data((intx) (load_klass));
1351             } else {
1352               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1353               n_copy->set_data(cast_from_oop<intx>(mirror()));
1354             }
1355 
1356             if (TracePatching) {
1357               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1358             }
1359           }
1360         } else if (stub_id == C1StubId::load_appendix_patching_id) {
1361           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1362           assert(n_copy->data() == 0 ||
1363                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
1364                  "illegal init value");
1365           n_copy->set_data(cast_from_oop<intx>(appendix()));
1366 
1367           if (TracePatching) {
1368             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1369           }
1370         } else {
1371           ShouldNotReachHere();
1372         }
1373 
1374         if (do_patch) {
1375           // replace instructions
1376           // first replace the tail, then the call
1377 #ifdef ARM
1378           if((load_klass_or_mirror_patch_id ||
1379               stub_id == C1StubId::load_appendix_patching_id) &&
1380               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1381             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1382             address addr = nullptr;
1383             assert(nm != nullptr, "invalid nmethod_pc");
1384             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1385             while (mds.next()) {
1386               if (mds.type() == relocInfo::oop_type) {
1387                 assert(stub_id == C1StubId::load_mirror_patching_id ||
1388                        stub_id == C1StubId::load_appendix_patching_id, "wrong stub id");
1389                 oop_Relocation* r = mds.oop_reloc();
1390                 addr = (address)r->oop_addr();
1391                 break;
1392               } else if (mds.type() == relocInfo::metadata_type) {
1393                 assert(stub_id == C1StubId::load_klass_patching_id, "wrong stub id");
1394                 metadata_Relocation* r = mds.metadata_reloc();
1395                 addr = (address)r->metadata_addr();
1396                 break;
1397               }
1398             }
1399             assert(addr != nullptr, "metadata relocation must exist");
1400             copy_buff -= *byte_count;
1401             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1402             n_copy2->set_pc_relative_offset(addr, instr_pc);
1403           }
1404 #endif
1405 
1406           for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1407             address ptr = copy_buff + i;
1408             int a_byte = (*ptr) & 0xFF;
1409             address dst = instr_pc + i;
1410             *(unsigned char*)dst = (unsigned char) a_byte;
1411           }
1412           ICache::invalidate_range(instr_pc, *byte_count);
1413           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1414 
1415           if (load_klass_or_mirror_patch_id ||
1416               stub_id == C1StubId::load_appendix_patching_id) {
1417             relocInfo::relocType rtype =
1418               (stub_id == C1StubId::load_klass_patching_id) ?
1419                                    relocInfo::metadata_type :
1420                                    relocInfo::oop_type;
1421             // update relocInfo to metadata
1422             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1423             assert(nm != nullptr, "invalid nmethod_pc");
1424 
1425             // The old patch site is now a move instruction so update
1426             // the reloc info so that it will get updated during
1427             // future GCs.
1428             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1429             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1430                                                      relocInfo::none, rtype);
1431           }
1432 
1433         } else {
1434           ICache::invalidate_range(copy_buff, *byte_count);
1435           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1436         }
1437       }
1438     }
1439     // If we are patching in a non-perm oop, make sure the nmethod
1440     // is on the right list.
1441     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1442     guarantee(nm != nullptr, "only nmethods can contain non-perm oops");
1443 
1444     // Since we've patched some oops in the nmethod,
1445     // (re)register it with the heap.
1446     Universe::heap()->register_nmethod(nm);
1447   }
1448 JRT_END
1449 
1450 #else // DEOPTIMIZE_WHEN_PATCHING
1451 
1452 static bool is_patching_needed(JavaThread* current, C1StubId stub_id) {
1453   if (stub_id == C1StubId::load_klass_patching_id ||
1454       stub_id == C1StubId::load_mirror_patching_id) {
1455     // last java frame on stack
1456     vframeStream vfst(current, true);
1457     assert(!vfst.at_end(), "Java frame must exist");
1458 
1459     methodHandle caller_method(current, vfst.method());
1460     int bci = vfst.bci();
1461     Bytecodes::Code code = caller_method()->java_code_at(bci);
1462 
1463     switch (code) {
1464       case Bytecodes::_new:
1465       case Bytecodes::_anewarray:
1466       case Bytecodes::_multianewarray:
1467       case Bytecodes::_instanceof:
1468       case Bytecodes::_checkcast: {
1469         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1470         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1471         if (tag.is_unresolved_klass_in_error()) {
1472           return false; // throws resolution error
1473         }
1474         break;
1475       }
1476 
1477       default: break;
1478     }
1479   }
1480   return true;
1481 }
1482 
1483 void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
1484 #ifndef PRODUCT
1485   if (PrintC1Statistics) {
1486     _patch_code_slowcase_cnt++;
1487   }
1488 #endif
1489 
1490   // Enable WXWrite: the function is called by c1 stub as a runtime function
1491   // (see another implementation above).
1492   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1493 
1494   if (TracePatching) {
1495     tty->print_cr("Deoptimizing because patch is needed");
1496   }
1497 
1498   RegisterMap reg_map(current,
1499                       RegisterMap::UpdateMap::skip,
1500                       RegisterMap::ProcessFrames::include,
1501                       RegisterMap::WalkContinuation::skip);
1502 
1503   frame runtime_frame = current->last_frame();
1504   frame caller_frame = runtime_frame.sender(&reg_map);
1505   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1506 
1507   if (is_patching_needed(current, stub_id)) {
1508     // Make sure the nmethod is invalidated, i.e. made not entrant.
1509     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1510     if (nm != nullptr) {
1511       nm->make_not_entrant("C1 deoptimize for patching");
1512     }
1513   }
1514 
1515   Deoptimization::deoptimize_frame(current, caller_frame.id());
1516   // Return to the now deoptimized frame.
1517   postcond(caller_is_deopted(current));
1518 }
1519 
1520 #endif // DEOPTIMIZE_WHEN_PATCHING
1521 
1522 // Entry point for compiled code. We want to patch a nmethod.
1523 // We don't do a normal VM transition here because we want to
1524 // know after the patching is complete and any safepoint(s) are taken
1525 // if the calling nmethod was deoptimized. We do this by calling a
1526 // helper method which does the normal VM transition and when it
1527 // completes we can check for deoptimization. This simplifies the
1528 // assembly code in the cpu directories.
1529 //
1530 int Runtime1::move_klass_patching(JavaThread* current) {
1531 //
1532 // NOTE: we are still in Java
1533 //
1534   debug_only(NoHandleMark nhm;)
1535   {
1536     // Enter VM mode
1537     ResetNoHandleMark rnhm;
1538     patch_code(current, C1StubId::load_klass_patching_id);
1539   }
1540   // Back in JAVA, use no oops DON'T safepoint
1541 
1542   // Return true if calling code is deoptimized
1543 
1544   return caller_is_deopted(current);
1545 }
1546 
1547 int Runtime1::move_mirror_patching(JavaThread* current) {
1548 //
1549 // NOTE: we are still in Java
1550 //
1551   debug_only(NoHandleMark nhm;)
1552   {
1553     // Enter VM mode
1554     ResetNoHandleMark rnhm;
1555     patch_code(current, C1StubId::load_mirror_patching_id);
1556   }
1557   // Back in JAVA, use no oops DON'T safepoint
1558 
1559   // Return true if calling code is deoptimized
1560 
1561   return caller_is_deopted(current);
1562 }
1563 
1564 int Runtime1::move_appendix_patching(JavaThread* current) {
1565 //
1566 // NOTE: we are still in Java
1567 //
1568   debug_only(NoHandleMark nhm;)
1569   {
1570     // Enter VM mode
1571     ResetNoHandleMark rnhm;
1572     patch_code(current, C1StubId::load_appendix_patching_id);
1573   }
1574   // Back in JAVA, use no oops DON'T safepoint
1575 
1576   // Return true if calling code is deoptimized
1577 
1578   return caller_is_deopted(current);
1579 }
1580 
1581 // Entry point for compiled code. We want to patch a nmethod.
1582 // We don't do a normal VM transition here because we want to
1583 // know after the patching is complete and any safepoint(s) are taken
1584 // if the calling nmethod was deoptimized. We do this by calling a
1585 // helper method which does the normal VM transition and when it
1586 // completes we can check for deoptimization. This simplifies the
1587 // assembly code in the cpu directories.
1588 //
1589 int Runtime1::access_field_patching(JavaThread* current) {
1590   //
1591   // NOTE: we are still in Java
1592   //
1593   // Handles created in this function will be deleted by the
1594   // HandleMarkCleaner in the transition to the VM.
1595   NoHandleMark nhm;
1596   {
1597     // Enter VM mode
1598     ResetNoHandleMark rnhm;
1599     patch_code(current, C1StubId::access_field_patching_id);
1600   }
1601   // Back in JAVA, use no oops DON'T safepoint
1602 
1603   // Return true if calling code is deoptimized
1604 
1605   return caller_is_deopted(current);
1606 }
1607 
1608 
1609 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1610   // for now we just print out the block id
1611   tty->print("%d ", block_id);
1612 JRT_END
1613 
1614 
1615 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1616   // had to return int instead of bool, otherwise there may be a mismatch
1617   // between the C calling convention and the Java one.
1618   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1619   // JVM takes the whole %eax as the return value, which may misinterpret
1620   // the return value as a boolean true.
1621 
1622   assert(mirror != nullptr, "should null-check on mirror before calling");
1623   Klass* k = java_lang_Class::as_Klass(mirror);
1624   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1625 JRT_END
1626 
1627 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1628   ResourceMark rm;
1629 
1630   RegisterMap reg_map(current,
1631                       RegisterMap::UpdateMap::skip,
1632                       RegisterMap::ProcessFrames::include,
1633                       RegisterMap::WalkContinuation::skip);
1634   frame runtime_frame = current->last_frame();
1635   frame caller_frame = runtime_frame.sender(&reg_map);
1636 
1637   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1638   assert (nm != nullptr, "no more nmethod?");
1639   nm->make_not_entrant("C1 predicate failed trap");
1640 
1641   methodHandle m(current, nm->method());
1642   MethodData* mdo = m->method_data();
1643 
1644   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1645     // Build an MDO.  Ignore errors like OutOfMemory;
1646     // that simply means we won't have an MDO to update.
1647     Method::build_profiling_method_data(m, THREAD);
1648     if (HAS_PENDING_EXCEPTION) {
1649       // Only metaspace OOM is expected. No Java code executed.
1650       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1651       CLEAR_PENDING_EXCEPTION;
1652     }
1653     mdo = m->method_data();
1654   }
1655 
1656   if (mdo != nullptr) {
1657     mdo->inc_trap_count(Deoptimization::Reason_none);
1658   }
1659 
1660   if (TracePredicateFailedTraps) {
1661     stringStream ss1, ss2;
1662     vframeStream vfst(current);
1663     Method* inlinee = vfst.method();
1664     inlinee->print_short_name(&ss1);
1665     m->print_short_name(&ss2);
1666     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc()));
1667   }
1668 
1669 
1670   Deoptimization::deoptimize_frame(current, caller_frame.id());
1671 
1672 JRT_END
1673 
1674 // Check exception if AbortVMOnException flag set
1675 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1676   ResourceMark rm;
1677   const char* message = nullptr;
1678   if (ex->is_a(vmClasses::Throwable_klass())) {
1679     oop msg = java_lang_Throwable::message(ex);
1680     if (msg != nullptr) {
1681       message = java_lang_String::as_utf8_string(msg);
1682     }
1683   }
1684   Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1685 JRT_END
1686 
1687 #ifndef PRODUCT
1688 void Runtime1::print_statistics() {
1689   tty->print_cr("C1 Runtime statistics:");
1690   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1691   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1692   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1693   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1694   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1695   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1696   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1697   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1698   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1699   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1700   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1701   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1702   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1703   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1704 
1705   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1706   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1707   tty->print_cr(" _new_null_free_array_slowcase_cnt: %u", _new_null_free_array_slowcase_cnt);
1708   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1709   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1710   tty->print_cr(" _load_flat_array_slowcase_cnt:   %u", _load_flat_array_slowcase_cnt);
1711   tty->print_cr(" _store_flat_array_slowcase_cnt:  %u", _store_flat_array_slowcase_cnt);
1712   tty->print_cr(" _substitutability_check_slowcase_cnt: %u", _substitutability_check_slowcase_cnt);
1713   tty->print_cr(" _buffer_inline_args_slowcase_cnt:%u", _buffer_inline_args_slowcase_cnt);
1714   tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%u", _buffer_inline_args_no_receiver_slowcase_cnt);
1715 
1716   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1717   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1718   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1719 
1720   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1721   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1722   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1723   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1724   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1725   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1726   tty->print_cr(" _throw_illegal_monitor_state_exception_count:  %u:", _throw_illegal_monitor_state_exception_count);
1727   tty->print_cr(" _throw_identity_exception_count:               %u:", _throw_identity_exception_count);
1728   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1729 
1730   SharedRuntime::print_ic_miss_histogram();
1731   tty->cr();
1732 }
1733 #endif // PRODUCT