1 /*
   2  * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/codeBuffer.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "classfile/javaClasses.inline.hpp"
  34 #include "classfile/vmClasses.hpp"
  35 #include "classfile/vmSymbols.hpp"
  36 #include "code/codeBlob.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "code/pcDesc.hpp"
  39 #include "code/scopeDesc.hpp"
  40 #include "code/vtableStubs.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/c1/barrierSetC1.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.hpp"
  48 #include "interpreter/interpreter.hpp"
  49 #include "jfr/support/jfrIntrinsics.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/oopFactory.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/access.inline.hpp"
  56 #include "oops/flatArrayKlass.hpp"
  57 #include "oops/flatArrayOop.inline.hpp"
  58 #include "oops/klass.inline.hpp"
  59 #include "oops/objArrayOop.inline.hpp"
  60 #include "oops/objArrayKlass.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/atomic.hpp"
  64 #include "runtime/fieldDescriptor.inline.hpp"
  65 #include "runtime/frame.inline.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/interfaceSupport.inline.hpp"
  68 #include "runtime/javaCalls.hpp"
  69 #include "runtime/sharedRuntime.hpp"
  70 #include "runtime/stackWatermarkSet.hpp"
  71 #include "runtime/stubRoutines.hpp"
  72 #include "runtime/threadCritical.hpp"
  73 #include "runtime/vframe.inline.hpp"
  74 #include "runtime/vframeArray.hpp"
  75 #include "runtime/vm_version.hpp"
  76 #include "utilities/copy.hpp"
  77 #include "utilities/events.hpp"
  78 
  79 
  80 // Implementation of StubAssembler
  81 
  82 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  83   _name = name;
  84   _must_gc_arguments = false;
  85   _frame_size = no_frame_size;
  86   _num_rt_args = 0;
  87   _stub_id = stub_id;
  88 }
  89 
  90 
  91 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  92   _name = name;
  93   _must_gc_arguments = must_gc_arguments;
  94 }
  95 
  96 
  97 void StubAssembler::set_frame_size(int size) {
  98   if (_frame_size == no_frame_size) {
  99     _frame_size = size;
 100   }
 101   assert(_frame_size == size, "can't change the frame size");
 102 }
 103 
 104 
 105 void StubAssembler::set_num_rt_args(int args) {
 106   if (_num_rt_args == 0) {
 107     _num_rt_args = args;
 108   }
 109   assert(_num_rt_args == args, "can't change the number of args");
 110 }
 111 
 112 // Implementation of Runtime1
 113 
 114 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 115 const char *Runtime1::_blob_names[] = {
 116   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
 117 };
 118 
 119 #ifndef PRODUCT
 120 // statistics
 121 int Runtime1::_generic_arraycopystub_cnt = 0;
 122 int Runtime1::_arraycopy_slowcase_cnt = 0;
 123 int Runtime1::_arraycopy_checkcast_cnt = 0;
 124 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 125 int Runtime1::_new_type_array_slowcase_cnt = 0;
 126 int Runtime1::_new_object_array_slowcase_cnt = 0;
 127 int Runtime1::_new_flat_array_slowcase_cnt = 0;
 128 int Runtime1::_new_instance_slowcase_cnt = 0;
 129 int Runtime1::_new_multi_array_slowcase_cnt = 0;
 130 int Runtime1::_load_flattened_array_slowcase_cnt = 0;
 131 int Runtime1::_store_flattened_array_slowcase_cnt = 0;
 132 int Runtime1::_substitutability_check_slowcase_cnt = 0;
 133 int Runtime1::_buffer_inline_args_slowcase_cnt = 0;
 134 int Runtime1::_buffer_inline_args_no_receiver_slowcase_cnt = 0;
 135 int Runtime1::_monitorenter_slowcase_cnt = 0;
 136 int Runtime1::_monitorexit_slowcase_cnt = 0;
 137 int Runtime1::_patch_code_slowcase_cnt = 0;
 138 int Runtime1::_throw_range_check_exception_count = 0;
 139 int Runtime1::_throw_index_exception_count = 0;
 140 int Runtime1::_throw_div0_exception_count = 0;
 141 int Runtime1::_throw_null_pointer_exception_count = 0;
 142 int Runtime1::_throw_class_cast_exception_count = 0;
 143 int Runtime1::_throw_incompatible_class_change_error_count = 0;
 144 int Runtime1::_throw_illegal_monitor_state_exception_count = 0;
 145 int Runtime1::_throw_count = 0;
 146 
 147 static int _byte_arraycopy_stub_cnt = 0;
 148 static int _short_arraycopy_stub_cnt = 0;
 149 static int _int_arraycopy_stub_cnt = 0;
 150 static int _long_arraycopy_stub_cnt = 0;
 151 static int _oop_arraycopy_stub_cnt = 0;
 152 
 153 address Runtime1::arraycopy_count_address(BasicType type) {
 154   switch (type) {
 155   case T_BOOLEAN:
 156   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 157   case T_CHAR:
 158   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 159   case T_FLOAT:
 160   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 161   case T_DOUBLE:
 162   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 163   case T_ARRAY:
 164   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
 165   default:
 166     ShouldNotReachHere();
 167     return NULL;
 168   }
 169 }
 170 
 171 
 172 #endif
 173 
 174 // Simple helper to see if the caller of a runtime stub which
 175 // entered the VM has been deoptimized
 176 
 177 static bool caller_is_deopted(JavaThread* current) {
 178   RegisterMap reg_map(current,
 179                       RegisterMap::UpdateMap::skip,
 180                       RegisterMap::ProcessFrames::include,
 181                       RegisterMap::WalkContinuation::skip);
 182   frame runtime_frame = current->last_frame();
 183   frame caller_frame = runtime_frame.sender(&reg_map);
 184   assert(caller_frame.is_compiled_frame(), "must be compiled");
 185   return caller_frame.is_deoptimized_frame();
 186 }
 187 
 188 // Stress deoptimization
 189 static void deopt_caller(JavaThread* current) {
 190   if (!caller_is_deopted(current)) {
 191     RegisterMap reg_map(current,
 192                         RegisterMap::UpdateMap::skip,
 193                         RegisterMap::ProcessFrames::include,
 194                         RegisterMap::WalkContinuation::skip);
 195     frame runtime_frame = current->last_frame();
 196     frame caller_frame = runtime_frame.sender(&reg_map);
 197     Deoptimization::deoptimize_frame(current, caller_frame.id());
 198     assert(caller_is_deopted(current), "Must be deoptimized");
 199   }
 200 }
 201 
 202 class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
 203  private:
 204   Runtime1::StubID _id;
 205  public:
 206   StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
 207   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 208     return Runtime1::generate_code_for(_id, sasm);
 209   }
 210 };
 211 
 212 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
 213   ResourceMark rm;
 214   // create code buffer for code storage
 215   CodeBuffer code(buffer_blob);
 216 
 217   OopMapSet* oop_maps;
 218   int frame_size;
 219   bool must_gc_arguments;
 220 
 221   Compilation::setup_code_buffer(&code, 0);
 222 
 223   // create assembler for code generation
 224   StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
 225   // generate code for runtime stub
 226   oop_maps = cl->generate_code(sasm);
 227   assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
 228          "if stub has an oop map it must have a valid frame size");
 229   assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");
 230 
 231   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 232   sasm->align(BytesPerWord);
 233   // make sure all code is in code buffer
 234   sasm->flush();
 235 
 236   frame_size = sasm->frame_size();
 237   must_gc_arguments = sasm->must_gc_arguments();
 238   // create blob - distinguish a few special cases
 239   CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
 240                                                  &code,
 241                                                  CodeOffsets::frame_never_safe,
 242                                                  frame_size,
 243                                                  oop_maps,
 244                                                  must_gc_arguments);
 245   assert(blob != NULL, "blob must exist");
 246   return blob;
 247 }
 248 
 249 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
 250   assert(0 <= id && id < number_of_ids, "illegal stub id");
 251   bool expect_oop_map = true;
 252 #ifdef ASSERT
 253   // Make sure that stubs that need oopmaps have them
 254   switch (id) {
 255     // These stubs don't need to have an oopmap
 256   case dtrace_object_alloc_id:
 257   case slow_subtype_check_id:
 258   case fpu2long_stub_id:
 259   case unwind_exception_id:
 260   case counter_overflow_id:
 261     expect_oop_map = false;
 262     break;
 263   default:
 264     break;
 265   }
 266 #endif
 267   StubIDStubAssemblerCodeGenClosure cl(id);
 268   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 269   // install blob
 270   _blobs[id] = blob;
 271 }
 272 
 273 void Runtime1::initialize(BufferBlob* blob) {
 274   // platform-dependent initialization
 275   initialize_pd();
 276   // generate stubs
 277   for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
 278   // printing
 279 #ifndef PRODUCT
 280   if (PrintSimpleStubs) {
 281     ResourceMark rm;
 282     for (int id = 0; id < number_of_ids; id++) {
 283       _blobs[id]->print();
 284       if (_blobs[id]->oop_maps() != NULL) {
 285         _blobs[id]->oop_maps()->print();
 286       }
 287     }
 288   }
 289 #endif
 290   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 291   bs->generate_c1_runtime_stubs(blob);
 292 }
 293 
 294 CodeBlob* Runtime1::blob_for(StubID id) {
 295   assert(0 <= id && id < number_of_ids, "illegal stub id");
 296   return _blobs[id];
 297 }
 298 
 299 
 300 const char* Runtime1::name_for(StubID id) {
 301   assert(0 <= id && id < number_of_ids, "illegal stub id");
 302   return _blob_names[id];
 303 }
 304 
 305 const char* Runtime1::name_for_address(address entry) {
 306   for (int id = 0; id < number_of_ids; id++) {
 307     if (entry == entry_for((StubID)id)) return name_for((StubID)id);
 308   }
 309 
 310 #define FUNCTION_CASE(a, f) \
 311   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 312 
 313   FUNCTION_CASE(entry, os::javaTimeMillis);
 314   FUNCTION_CASE(entry, os::javaTimeNanos);
 315   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
 316   FUNCTION_CASE(entry, SharedRuntime::d2f);
 317   FUNCTION_CASE(entry, SharedRuntime::d2i);
 318   FUNCTION_CASE(entry, SharedRuntime::d2l);
 319   FUNCTION_CASE(entry, SharedRuntime::dcos);
 320   FUNCTION_CASE(entry, SharedRuntime::dexp);
 321   FUNCTION_CASE(entry, SharedRuntime::dlog);
 322   FUNCTION_CASE(entry, SharedRuntime::dlog10);
 323   FUNCTION_CASE(entry, SharedRuntime::dpow);
 324   FUNCTION_CASE(entry, SharedRuntime::drem);
 325   FUNCTION_CASE(entry, SharedRuntime::dsin);
 326   FUNCTION_CASE(entry, SharedRuntime::dtan);
 327   FUNCTION_CASE(entry, SharedRuntime::f2i);
 328   FUNCTION_CASE(entry, SharedRuntime::f2l);
 329   FUNCTION_CASE(entry, SharedRuntime::frem);
 330   FUNCTION_CASE(entry, SharedRuntime::l2d);
 331   FUNCTION_CASE(entry, SharedRuntime::l2f);
 332   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 333   FUNCTION_CASE(entry, SharedRuntime::lmul);
 334   FUNCTION_CASE(entry, SharedRuntime::lrem);
 335   FUNCTION_CASE(entry, SharedRuntime::lrem);
 336   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 337   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 338   FUNCTION_CASE(entry, is_instance_of);
 339   FUNCTION_CASE(entry, trace_block_entry);
 340 #ifdef JFR_HAVE_INTRINSICS
 341   FUNCTION_CASE(entry, JfrTime::time_function());
 342 #endif
 343   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 344   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 345   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 346   FUNCTION_CASE(entry, StubRoutines::dexp());
 347   FUNCTION_CASE(entry, StubRoutines::dlog());
 348   FUNCTION_CASE(entry, StubRoutines::dlog10());
 349   FUNCTION_CASE(entry, StubRoutines::dpow());
 350   FUNCTION_CASE(entry, StubRoutines::dsin());
 351   FUNCTION_CASE(entry, StubRoutines::dcos());
 352   FUNCTION_CASE(entry, StubRoutines::dtan());
 353 
 354 #undef FUNCTION_CASE
 355 
 356   // Soft float adds more runtime names.
 357   return pd_name_for_address(entry);
 358 }
 359 
 360 static void allocate_instance(JavaThread* current, Klass* klass, TRAPS) {
 361 #ifndef PRODUCT
 362   if (PrintC1Statistics) {
 363     Runtime1::_new_instance_slowcase_cnt++;
 364   }
 365 #endif
 366   assert(klass->is_klass(), "not a class");
 367   Handle holder(current, klass->klass_holder()); // keep the klass alive
 368   InstanceKlass* h = InstanceKlass::cast(klass);
 369   h->check_valid_for_instantiation(true, CHECK);
 370   // make sure klass is initialized
 371   h->initialize(CHECK);
 372   // allocate instance and return via TLS
 373   oop obj = h->allocate_instance(CHECK);
 374   current->set_vm_result(obj);
 375 JRT_END
 376 
 377 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 378   allocate_instance(current, klass, CHECK);
 379 JRT_END
 380 
 381 // Same as new_instance but throws error for inline klasses
 382 JRT_ENTRY(void, Runtime1::new_instance_no_inline(JavaThread* current, Klass* klass))
 383   if (klass->is_inline_klass()) {
 384     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_InstantiationError());
 385   } else {
 386     allocate_instance(current, klass, CHECK);
 387   }
 388 JRT_END
 389 
 390 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 391 #ifndef PRODUCT
 392   if (PrintC1Statistics) {
 393     _new_type_array_slowcase_cnt++;
 394   }
 395 #endif
 396   // Note: no handle for klass needed since they are not used
 397   //       anymore after new_typeArray() and no GC can happen before.
 398   //       (This may have to change if this code changes!)
 399   assert(klass->is_klass(), "not a class");
 400   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 401   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 402   current->set_vm_result(obj);
 403   // This is pretty rare but this runtime patch is stressful to deoptimization
 404   // if we deoptimize here so force a deopt to stress the path.
 405   if (DeoptimizeALot) {
 406     deopt_caller(current);
 407   }
 408 
 409 JRT_END
 410 
 411 
 412 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 413 #ifndef PRODUCT
 414   if (PrintC1Statistics) {
 415     _new_object_array_slowcase_cnt++;
 416   }
 417 #endif
 418   // Note: no handle for klass needed since they are not used
 419   //       anymore after new_objArray() and no GC can happen before.
 420   //       (This may have to change if this code changes!)
 421   assert(array_klass->is_klass(), "not a class");
 422   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 423   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 424   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 425   current->set_vm_result(obj);
 426   // This is pretty rare but this runtime patch is stressful to deoptimization
 427   // if we deoptimize here so force a deopt to stress the path.
 428   if (DeoptimizeALot) {
 429     deopt_caller(current);
 430   }
 431 JRT_END
 432 
 433 
 434 JRT_ENTRY(void, Runtime1::new_flat_array(JavaThread* current, Klass* array_klass, jint length))
 435   NOT_PRODUCT(_new_flat_array_slowcase_cnt++;)
 436 
 437   // Note: no handle for klass needed since they are not used
 438   //       anymore after new_objArray() and no GC can happen before.
 439   //       (This may have to change if this code changes!)
 440   assert(array_klass->is_klass(), "not a class");
 441   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
 442   Klass* elem_klass = ArrayKlass::cast(array_klass)->element_klass();
 443   assert(elem_klass->is_inline_klass(), "must be");
 444   // Logically creates elements, ensure klass init
 445   elem_klass->initialize(CHECK);
 446   arrayOop obj = oopFactory::new_valueArray(elem_klass, length, CHECK);
 447   current->set_vm_result(obj);
 448   // This is pretty rare but this runtime patch is stressful to deoptimization
 449   // if we deoptimize here so force a deopt to stress the path.
 450   if (DeoptimizeALot) {
 451     deopt_caller(current);
 452   }
 453 JRT_END
 454 
 455 
 456 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 457 #ifndef PRODUCT
 458   if (PrintC1Statistics) {
 459     _new_multi_array_slowcase_cnt++;
 460   }
 461 #endif
 462   assert(klass->is_klass(), "not a class");
 463   assert(rank >= 1, "rank must be nonzero");
 464   Handle holder(current, klass->klass_holder()); // keep the klass alive
 465   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 466   current->set_vm_result(obj);
 467 JRT_END
 468 
 469 
 470 static void profile_flat_array(JavaThread* current) {
 471   ResourceMark rm(current);
 472   vframeStream vfst(current, true);
 473   assert(!vfst.at_end(), "Java frame must exist");
 474   // Check if array access profiling is enabled
 475   if (vfst.nm()->comp_level() != CompLevel_full_profile || !C1UpdateMethodData) {
 476     return;
 477   }
 478   int bci = vfst.bci();
 479   Method* method = vfst.method();
 480   MethodData* md = method->method_data();
 481   if (md != NULL) {
 482     ProfileData* data = md->bci_to_data(bci);
 483     assert(data != NULL && data->is_ArrayLoadStoreData(), "incorrect profiling entry");
 484     ArrayLoadStoreData* load_store = (ArrayLoadStoreData*)data;
 485     load_store->set_flat_array();
 486   }
 487 }
 488 
 489 JRT_ENTRY(void, Runtime1::load_flattened_array(JavaThread* current, flatArrayOopDesc* array, int index))
 490   assert(array->klass()->is_flatArray_klass(), "should not be called");
 491   profile_flat_array(current);
 492 
 493   NOT_PRODUCT(_load_flattened_array_slowcase_cnt++;)
 494   assert(array->length() > 0 && index < array->length(), "already checked");
 495   flatArrayHandle vah(current, array);
 496   oop obj = flatArrayOopDesc::value_alloc_copy_from_index(vah, index, CHECK);
 497   current->set_vm_result(obj);
 498 JRT_END
 499 
 500 
 501 JRT_ENTRY(void, Runtime1::store_flattened_array(JavaThread* current, flatArrayOopDesc* array, int index, oopDesc* value))
 502   if (array->klass()->is_flatArray_klass()) {
 503     profile_flat_array(current);
 504   }
 505 
 506   NOT_PRODUCT(_store_flattened_array_slowcase_cnt++;)
 507   if (value == NULL) {
 508     assert(array->klass()->is_flatArray_klass() || array->klass()->is_null_free_array_klass(), "should not be called");
 509     SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 510   } else {
 511     assert(array->klass()->is_flatArray_klass(), "should not be called");
 512     array->value_copy_to_index(value, index);
 513   }
 514 JRT_END
 515 
 516 
 517 JRT_ENTRY(int, Runtime1::substitutability_check(JavaThread* current, oopDesc* left, oopDesc* right))
 518   NOT_PRODUCT(_substitutability_check_slowcase_cnt++;)
 519   JavaCallArguments args;
 520   args.push_oop(Handle(THREAD, left));
 521   args.push_oop(Handle(THREAD, right));
 522   JavaValue result(T_BOOLEAN);
 523   JavaCalls::call_static(&result,
 524                          vmClasses::PrimitiveObjectMethods_klass(),
 525                          vmSymbols::isSubstitutable_name(),
 526                          vmSymbols::object_object_boolean_signature(),
 527                          &args, CHECK_0);
 528   return result.get_jboolean() ? 1 : 0;
 529 JRT_END
 530 
 531 
 532 extern "C" void ps();
 533 
 534 void Runtime1::buffer_inline_args_impl(JavaThread* current, Method* m, bool allocate_receiver) {
 535   JavaThread* THREAD = current;
 536   methodHandle method(current, m); // We are inside the verified_entry or verified_inline_ro_entry of this method.
 537   oop obj = SharedRuntime::allocate_inline_types_impl(current, method, allocate_receiver, CHECK);
 538   current->set_vm_result(obj);
 539 }
 540 
 541 JRT_ENTRY(void, Runtime1::buffer_inline_args(JavaThread* current, Method* method))
 542   NOT_PRODUCT(_buffer_inline_args_slowcase_cnt++;)
 543   buffer_inline_args_impl(current, method, true);
 544 JRT_END
 545 
 546 JRT_ENTRY(void, Runtime1::buffer_inline_args_no_receiver(JavaThread* current, Method* method))
 547   NOT_PRODUCT(_buffer_inline_args_no_receiver_slowcase_cnt++;)
 548   buffer_inline_args_impl(current, method, false);
 549 JRT_END
 550 
 551 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
 552   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 553 JRT_END
 554 
 555 
 556 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 557   ResourceMark rm(current);
 558   const char* klass_name = obj->klass()->external_name();
 559   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 560 JRT_END
 561 
 562 
 563 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 564 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 565 // method) method is passed as an argument. In order to do that it is embedded in the code as
 566 // a constant.
 567 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 568   nmethod* osr_nm = NULL;
 569   methodHandle method(current, m);
 570 
 571   RegisterMap map(current,
 572                   RegisterMap::UpdateMap::skip,
 573                   RegisterMap::ProcessFrames::include,
 574                   RegisterMap::WalkContinuation::skip);
 575   frame fr =  current->last_frame().sender(&map);
 576   nmethod* nm = (nmethod*) fr.cb();
 577   assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
 578   methodHandle enclosing_method(current, nm->method());
 579 
 580   CompLevel level = (CompLevel)nm->comp_level();
 581   int bci = InvocationEntryBci;
 582   if (branch_bci != InvocationEntryBci) {
 583     // Compute destination bci
 584     address pc = method()->code_base() + branch_bci;
 585     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
 586     int offset = 0;
 587     switch (branch) {
 588       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 589       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 590       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 591       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 592       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 593       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 594       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 595         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 596         break;
 597       case Bytecodes::_goto_w:
 598         offset = Bytes::get_Java_u4(pc + 1);
 599         break;
 600       default: ;
 601     }
 602     bci = branch_bci + offset;
 603   }
 604   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 605   return osr_nm;
 606 }
 607 
 608 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 609   nmethod* osr_nm;
 610   JRT_BLOCK
 611     osr_nm = counter_overflow_helper(current, bci, method);
 612     if (osr_nm != NULL) {
 613       RegisterMap map(current,
 614                       RegisterMap::UpdateMap::skip,
 615                       RegisterMap::ProcessFrames::include,
 616                       RegisterMap::WalkContinuation::skip);
 617       frame fr =  current->last_frame().sender(&map);
 618       Deoptimization::deoptimize_frame(current, fr.id());
 619     }
 620   JRT_BLOCK_END
 621   return NULL;
 622 JRT_END
 623 
 624 extern void vm_exit(int code);
 625 
 626 // Enter this method from compiled code handler below. This is where we transition
 627 // to VM mode. This is done as a helper routine so that the method called directly
 628 // from compiled code does not have to transition to VM. This allows the entry
 629 // method to see if the nmethod that we have just looked up a handler for has
 630 // been deoptimized while we were in the vm. This simplifies the assembly code
 631 // cpu directories.
 632 //
 633 // We are entering here from exception stub (via the entry method below)
 634 // If there is a compiled exception handler in this method, we will continue there;
 635 // otherwise we will unwind the stack and continue at the caller of top frame method
 636 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 637 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 638 // check to see if the handler we are going to return is now in a nmethod that has
 639 // been deoptimized. If that is the case we return the deopt blob
 640 // unpack_with_exception entry instead. This makes life for the exception blob easier
 641 // because making that same check and diverting is painful from assembly language.
 642 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 643   // Reset method handle flag.
 644   current->set_is_method_handle_return(false);
 645 
 646   Handle exception(current, ex);
 647 
 648   // This function is called when we are about to throw an exception. Therefore,
 649   // we have to poll the stack watermark barrier to make sure that not yet safe
 650   // stack frames are made safe before returning into them.
 651   if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {
 652     // The Runtime1::handle_exception_from_callee_id handler is invoked after the
 653     // frame has been unwound. It instead builds its own stub frame, to call the
 654     // runtime. But the throwing frame has already been unwound here.
 655     StackWatermarkSet::after_unwind(current);
 656   }
 657 
 658   nm = CodeCache::find_nmethod(pc);
 659   assert(nm != NULL, "this is not an nmethod");
 660   // Adjust the pc as needed/
 661   if (nm->is_deopt_pc(pc)) {
 662     RegisterMap map(current,
 663                     RegisterMap::UpdateMap::skip,
 664                     RegisterMap::ProcessFrames::include,
 665                     RegisterMap::WalkContinuation::skip);
 666     frame exception_frame = current->last_frame().sender(&map);
 667     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 668     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 669     pc = exception_frame.pc();
 670   }
 671   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 672   // Check that exception is a subclass of Throwable
 673   assert(exception->is_a(vmClasses::Throwable_klass()),
 674          "Exception not subclass of Throwable");
 675 
 676   // debugging support
 677   // tracing
 678   if (log_is_enabled(Info, exceptions)) {
 679     ResourceMark rm; // print_value_string
 680     stringStream tempst;
 681     assert(nm->method() != NULL, "Unexpected NULL method()");
 682     tempst.print("C1 compiled method <%s>\n"
 683                  " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 684                  nm->method()->print_value_string(), p2i(pc), p2i(current));
 685     Exceptions::log_exception(exception, tempst.freeze());
 686   }
 687   // for AbortVMOnException flag
 688   Exceptions::debug_check_abort(exception);
 689 
 690   // Check the stack guard pages and re-enable them if necessary and there is
 691   // enough space on the stack to do so.  Use fast exceptions only if the guard
 692   // pages are enabled.
 693   bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
 694 
 695   if (JvmtiExport::can_post_on_exceptions()) {
 696     // To ensure correct notification of exception catches and throws
 697     // we have to deoptimize here.  If we attempted to notify the
 698     // catches and throws during this exception lookup it's possible
 699     // we could deoptimize on the way out of the VM and end back in
 700     // the interpreter at the throw site.  This would result in double
 701     // notifications since the interpreter would also notify about
 702     // these same catches and throws as it unwound the frame.
 703 
 704     RegisterMap reg_map(current,
 705                         RegisterMap::UpdateMap::include,
 706                         RegisterMap::ProcessFrames::include,
 707                         RegisterMap::WalkContinuation::skip);
 708     frame stub_frame = current->last_frame();
 709     frame caller_frame = stub_frame.sender(&reg_map);
 710 
 711     // We don't really want to deoptimize the nmethod itself since we
 712     // can actually continue in the exception handler ourselves but I
 713     // don't see an easy way to have the desired effect.
 714     Deoptimization::deoptimize_frame(current, caller_frame.id());
 715     assert(caller_is_deopted(current), "Must be deoptimized");
 716 
 717     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 718   }
 719 
 720   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 721   if (guard_pages_enabled) {
 722     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
 723     if (fast_continuation != NULL) {
 724       // Set flag if return address is a method handle call site.
 725       current->set_is_method_handle_return(nm->is_method_handle_return(pc));
 726       return fast_continuation;
 727     }
 728   }
 729 
 730   // If the stack guard pages are enabled, check whether there is a handler in
 731   // the current method.  Otherwise (guard pages disabled), force an unwind and
 732   // skip the exception cache update (i.e., just leave continuation==NULL).
 733   address continuation = NULL;
 734   if (guard_pages_enabled) {
 735 
 736     // New exception handling mechanism can support inlined methods
 737     // with exception handlers since the mappings are from PC to PC
 738 
 739     // Clear out the exception oop and pc since looking up an
 740     // exception handler can cause class loading, which might throw an
 741     // exception and those fields are expected to be clear during
 742     // normal bytecode execution.
 743     current->clear_exception_oop_and_pc();
 744 
 745     bool recursive_exception = false;
 746     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
 747     // If an exception was thrown during exception dispatch, the exception oop may have changed
 748     current->set_exception_oop(exception());
 749     current->set_exception_pc(pc);
 750 
 751     // the exception cache is used only by non-implicit exceptions
 752     // Update the exception cache only when there didn't happen
 753     // another exception during the computation of the compiled
 754     // exception handler. Checking for exception oop equality is not
 755     // sufficient because some exceptions are pre-allocated and reused.
 756     if (continuation != NULL && !recursive_exception) {
 757       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
 758     }
 759   }
 760 
 761   current->set_vm_result(exception());
 762   // Set flag if return address is a method handle call site.
 763   current->set_is_method_handle_return(nm->is_method_handle_return(pc));
 764 
 765   if (log_is_enabled(Info, exceptions)) {
 766     ResourceMark rm;
 767     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 768                          " for exception thrown at PC " PTR_FORMAT,
 769                          p2i(current), p2i(continuation), p2i(pc));
 770   }
 771 
 772   return continuation;
 773 JRT_END
 774 
 775 // Enter this method from compiled code only if there is a Java exception handler
 776 // in the method handling the exception.
 777 // We are entering here from exception stub. We don't do a normal VM transition here.
 778 // We do it in a helper. This is so we can check to see if the nmethod we have just
 779 // searched for an exception handler has been deoptimized in the meantime.
 780 address Runtime1::exception_handler_for_pc(JavaThread* current) {
 781   oop exception = current->exception_oop();
 782   address pc = current->exception_pc();
 783   // Still in Java mode
 784   DEBUG_ONLY(NoHandleMark nhm);
 785   nmethod* nm = NULL;
 786   address continuation = NULL;
 787   {
 788     // Enter VM mode by calling the helper
 789     ResetNoHandleMark rnhm;
 790     continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
 791   }
 792   // Back in JAVA, use no oops DON'T safepoint
 793 
 794   // Now check to see if the nmethod we were called from is now deoptimized.
 795   // If so we must return to the deopt blob and deoptimize the nmethod
 796   if (nm != NULL && caller_is_deopted(current)) {
 797     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 798   }
 799 
 800   assert(continuation != NULL, "no handler found");
 801   return continuation;
 802 }
 803 
 804 
 805 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))
 806 #ifndef PRODUCT
 807   if (PrintC1Statistics) {
 808     _throw_range_check_exception_count++;
 809   }
 810 #endif
 811   const int len = 35;
 812   assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
 813   char message[2 * jintAsStringSize + len];
 814   sprintf(message, "Index %d out of bounds for length %d", index, a->length());
 815   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
 816 JRT_END
 817 
 818 
 819 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))
 820 #ifndef PRODUCT
 821   if (PrintC1Statistics) {
 822     _throw_index_exception_count++;
 823   }
 824 #endif
 825   char message[16];
 826   sprintf(message, "%d", index);
 827   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
 828 JRT_END
 829 
 830 
 831 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))
 832 #ifndef PRODUCT
 833   if (PrintC1Statistics) {
 834     _throw_div0_exception_count++;
 835   }
 836 #endif
 837   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 838 JRT_END
 839 
 840 
 841 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))
 842 #ifndef PRODUCT
 843   if (PrintC1Statistics) {
 844     _throw_null_pointer_exception_count++;
 845   }
 846 #endif
 847   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 848 JRT_END
 849 
 850 
 851 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))
 852 #ifndef PRODUCT
 853   if (PrintC1Statistics) {
 854     _throw_class_cast_exception_count++;
 855   }
 856 #endif
 857   ResourceMark rm(current);
 858   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 859   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 860 JRT_END
 861 
 862 
 863 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 864 #ifndef PRODUCT
 865   if (PrintC1Statistics) {
 866     _throw_incompatible_class_change_error_count++;
 867   }
 868 #endif
 869   ResourceMark rm(current);
 870   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 871 JRT_END
 872 
 873 
 874 JRT_ENTRY(void, Runtime1::throw_illegal_monitor_state_exception(JavaThread* current))
 875   NOT_PRODUCT(_throw_illegal_monitor_state_exception_count++;)
 876   ResourceMark rm(current);
 877   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IllegalMonitorStateException());
 878 JRT_END
 879 
 880 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 881 #ifndef PRODUCT
 882   if (PrintC1Statistics) {
 883     _monitorenter_slowcase_cnt++;
 884   }
 885 #endif
 886   if (UseHeavyMonitors) {
 887     lock->set_obj(obj);
 888   }
 889   assert(obj == lock->obj(), "must match");
 890   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 891 JRT_END
 892 
 893 
 894 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 895 #ifndef PRODUCT
 896   if (PrintC1Statistics) {
 897     _monitorexit_slowcase_cnt++;
 898   }
 899 #endif
 900   assert(current->last_Java_sp(), "last_Java_sp must be set");
 901   oop obj = lock->obj();
 902   assert(oopDesc::is_oop(obj), "must be NULL or an object");
 903   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 904 JRT_END
 905 
 906 // Cf. OptoRuntime::deoptimize_caller_frame
 907 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 908   // Called from within the owner thread, so no need for safepoint
 909   RegisterMap reg_map(current,
 910                       RegisterMap::UpdateMap::skip,
 911                       RegisterMap::ProcessFrames::include,
 912                       RegisterMap::WalkContinuation::skip);
 913   frame stub_frame = current->last_frame();
 914   assert(stub_frame.is_runtime_frame(), "Sanity check");
 915   frame caller_frame = stub_frame.sender(&reg_map);
 916   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 917   assert(nm != NULL, "Sanity check");
 918   methodHandle method(current, nm->method());
 919   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 920   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 921   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 922 
 923   if (action == Deoptimization::Action_make_not_entrant) {
 924     if (nm->make_not_entrant()) {
 925       if (reason == Deoptimization::Reason_tenured) {
 926         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 927         if (trap_mdo != NULL) {
 928           trap_mdo->inc_tenure_traps();
 929         }
 930       }
 931     }
 932   }
 933 
 934   // Deoptimize the caller frame.
 935   Deoptimization::deoptimize_frame(current, caller_frame.id());
 936   // Return to the now deoptimized frame.
 937 JRT_END
 938 
 939 
 940 #ifndef DEOPTIMIZE_WHEN_PATCHING
 941 
 942 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 943   Bytecode_field field_access(caller, bci);
 944   // This can be static or non-static field access
 945   Bytecodes::Code code       = field_access.code();
 946 
 947   // We must load class, initialize class and resolve the field
 948   fieldDescriptor result; // initialize class if needed
 949   constantPoolHandle constants(THREAD, caller->constants());
 950   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
 951   return result.field_holder();
 952 }
 953 
 954 
 955 //
 956 // This routine patches sites where a class wasn't loaded or
 957 // initialized at the time the code was generated.  It handles
 958 // references to classes, fields and forcing of initialization.  Most
 959 // of the cases are straightforward and involving simply forcing
 960 // resolution of a class, rewriting the instruction stream with the
 961 // needed constant and replacing the call in this function with the
 962 // patched code.  The case for static field is more complicated since
 963 // the thread which is in the process of initializing a class can
 964 // access it's static fields but other threads can't so the code
 965 // either has to deoptimize when this case is detected or execute a
 966 // check that the current thread is the initializing thread.  The
 967 // current
 968 //
 969 // Patches basically look like this:
 970 //
 971 //
 972 // patch_site: jmp patch stub     ;; will be patched
 973 // continue:   ...
 974 //             ...
 975 //             ...
 976 //             ...
 977 //
 978 // They have a stub which looks like this:
 979 //
 980 //             ;; patch body
 981 //             movl <const>, reg           (for class constants)
 982 //        <or> movl [reg1 + <const>], reg  (for field offsets)
 983 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
 984 //             <being_init offset> <bytes to copy> <bytes to skip>
 985 // patch_stub: call Runtime1::patch_code (through a runtime stub)
 986 //             jmp patch_site
 987 //
 988 //
 989 // A normal patch is done by rewriting the patch body, usually a move,
 990 // and then copying it into place over top of the jmp instruction
 991 // being careful to flush caches and doing it in an MP-safe way.  The
 992 // constants following the patch body are used to find various pieces
 993 // of the patch relative to the call site for Runtime1::patch_code.
 994 // The case for getstatic and putstatic is more complicated because
 995 // getstatic and putstatic have special semantics when executing while
 996 // the class is being initialized.  getstatic/putstatic on a class
 997 // which is being_initialized may be executed by the initializing
 998 // thread but other threads have to block when they execute it.  This
 999 // is accomplished in compiled code by executing a test of the current
1000 // thread against the initializing thread of the class.  It's emitted
1001 // as boilerplate in their stub which allows the patched code to be
1002 // executed before it's copied back into the main body of the nmethod.
1003 //
1004 // being_init: get_thread(<tmp reg>
1005 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
1006 //             jne patch_stub
1007 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
1008 //             movl reg, [reg1 + <const>]  (for field offsets)
1009 //             jmp continue
1010 //             <being_init offset> <bytes to copy> <bytes to skip>
1011 // patch_stub: jmp Runtim1::patch_code (through a runtime stub)
1012 //             jmp patch_site
1013 //
1014 // If the class is being initialized the patch body is rewritten and
1015 // the patch site is rewritten to jump to being_init, instead of
1016 // patch_stub.  Whenever this code is executed it checks the current
1017 // thread against the initializing thread so other threads will enter
1018 // the runtime and end up blocked waiting the class to finish
1019 // initializing inside the calls to resolve_field below.  The
1020 // initializing class will continue on it's way.  Once the class is
1021 // fully_initialized, the intializing_thread of the class becomes
1022 // NULL, so the next thread to execute this code will fail the test,
1023 // call into patch_code and complete the patching process by copying
1024 // the patch body back into the main part of the nmethod and resume
1025 // executing.
1026 
1027 // NB:
1028 //
1029 // Patchable instruction sequences inherently exhibit race conditions,
1030 // where thread A is patching an instruction at the same time thread B
1031 // is executing it.  The algorithms we use ensure that any observation
1032 // that B can make on any intermediate states during A's patching will
1033 // always end up with a correct outcome.  This is easiest if there are
1034 // few or no intermediate states.  (Some inline caches have two
1035 // related instructions that must be patched in tandem.  For those,
1036 // intermediate states seem to be unavoidable, but we will get the
1037 // right answer from all possible observation orders.)
1038 //
1039 // When patching the entry instruction at the head of a method, or a
1040 // linkable call instruction inside of a method, we try very hard to
1041 // use a patch sequence which executes as a single memory transaction.
1042 // This means, in practice, that when thread A patches an instruction,
1043 // it should patch a 32-bit or 64-bit word that somehow overlaps the
1044 // instruction or is contained in it.  We believe that memory hardware
1045 // will never break up such a word write, if it is naturally aligned
1046 // for the word being written.  We also know that some CPUs work very
1047 // hard to create atomic updates even of naturally unaligned words,
1048 // but we don't want to bet the farm on this always working.
1049 //
1050 // Therefore, if there is any chance of a race condition, we try to
1051 // patch only naturally aligned words, as single, full-word writes.
1052 
1053 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id ))
1054 #ifndef PRODUCT
1055   if (PrintC1Statistics) {
1056     _patch_code_slowcase_cnt++;
1057   }
1058 #endif
1059 
1060   ResourceMark rm(current);
1061   RegisterMap reg_map(current,
1062                       RegisterMap::UpdateMap::skip,
1063                       RegisterMap::ProcessFrames::include,
1064                       RegisterMap::WalkContinuation::skip);
1065   frame runtime_frame = current->last_frame();
1066   frame caller_frame = runtime_frame.sender(&reg_map);
1067 
1068   // last java frame on stack
1069   vframeStream vfst(current, true);
1070   assert(!vfst.at_end(), "Java frame must exist");
1071 
1072   methodHandle caller_method(current, vfst.method());
1073   // Note that caller_method->code() may not be same as caller_code because of OSR's
1074   // Note also that in the presence of inlining it is not guaranteed
1075   // that caller_method() == caller_code->method()
1076 
1077   int bci = vfst.bci();
1078   Bytecodes::Code code = caller_method()->java_code_at(bci);
1079 
1080   // this is used by assertions in the access_field_patching_id
1081   BasicType patch_field_type = T_ILLEGAL;
1082   bool deoptimize_for_volatile = false;
1083   bool deoptimize_for_atomic = false;
1084   int patch_field_offset = -1;
1085   Klass* init_klass = NULL; // klass needed by load_klass_patching code
1086   Klass* load_klass = NULL; // klass needed by load_klass_patching code
1087   Handle mirror(current, NULL);                    // oop needed by load_mirror_patching code
1088   Handle appendix(current, NULL);                  // oop needed by appendix_patching code
1089   bool load_klass_or_mirror_patch_id =
1090     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
1091 
1092   if (stub_id == Runtime1::access_field_patching_id) {
1093 
1094     Bytecode_field field_access(caller_method, bci);
1095     fieldDescriptor result; // initialize class if needed
1096     Bytecodes::Code code = field_access.code();
1097     constantPoolHandle constants(current, caller_method->constants());
1098     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
1099     patch_field_offset = result.offset();
1100     assert(!result.is_inlined(), "Can not patch access to flattened field");
1101 
1102     // If we're patching a field which is volatile then at compile it
1103     // must not have been know to be volatile, so the generated code
1104     // isn't correct for a volatile reference.  The nmethod has to be
1105     // deoptimized so that the code can be regenerated correctly.
1106     // This check is only needed for access_field_patching since this
1107     // is the path for patching field offsets.  load_klass is only
1108     // used for patching references to oops which don't need special
1109     // handling in the volatile case.
1110 
1111     deoptimize_for_volatile = result.access_flags().is_volatile();
1112 
1113     // If we are patching a field which should be atomic, then
1114     // the generated code is not correct either, force deoptimizing.
1115     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1116     // break access atomicity only for them.
1117 
1118     // Strictly speaking, the deoptimization on 64-bit platforms
1119     // is unnecessary, and T_LONG stores on 32-bit platforms need
1120     // to be handled by special patching code when AlwaysAtomicAccesses
1121     // becomes product feature. At this point, we are still going
1122     // for the deoptimization for consistency against volatile
1123     // accesses.
1124 
1125     patch_field_type = result.field_type();
1126     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
1127 
1128   } else if (load_klass_or_mirror_patch_id) {
1129     Klass* k = NULL;
1130     switch (code) {
1131       case Bytecodes::_putstatic:
1132       case Bytecodes::_getstatic:
1133         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
1134           init_klass = klass;
1135           mirror = Handle(current, klass->java_mirror());
1136         }
1137         break;
1138       case Bytecodes::_new:
1139         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
1140           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
1141         }
1142         break;
1143       case Bytecodes::_aconst_init:
1144         { Bytecode_aconst_init baconst_init(caller_method(), caller_method->bcp_from(bci));
1145           k = caller_method->constants()->klass_at(baconst_init.index(), CHECK);
1146         }
1147         break;
1148       case Bytecodes::_multianewarray:
1149         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
1150           k = caller_method->constants()->klass_at(mna.index(), CHECK);
1151           if (k->name()->is_Q_array_signature()) {
1152             // Logically creates elements, ensure klass init
1153             k->initialize(CHECK);
1154           }
1155         }
1156         break;
1157       case Bytecodes::_instanceof:
1158         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
1159           k = caller_method->constants()->klass_at(io.index(), CHECK);
1160         }
1161         break;
1162       case Bytecodes::_checkcast:
1163         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
1164           k = caller_method->constants()->klass_at(cc.index(), CHECK);
1165         }
1166         break;
1167       case Bytecodes::_anewarray:
1168         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
1169           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
1170           k = ek->array_klass(CHECK);
1171         }
1172         break;
1173       case Bytecodes::_ldc:
1174       case Bytecodes::_ldc_w:
1175       case Bytecodes::_ldc2_w:
1176         {
1177           Bytecode_loadconstant cc(caller_method, bci);
1178           oop m = cc.resolve_constant(CHECK);
1179           mirror = Handle(current, m);
1180         }
1181         break;
1182       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
1183     }
1184     load_klass = k;
1185   } else if (stub_id == load_appendix_patching_id) {
1186     Bytecode_invoke bytecode(caller_method, bci);
1187     Bytecodes::Code bc = bytecode.invoke_code();
1188 
1189     CallInfo info;
1190     constantPoolHandle pool(current, caller_method->constants());
1191     int index = bytecode.index();
1192     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
1193     switch (bc) {
1194       case Bytecodes::_invokehandle: {
1195         int cache_index = ConstantPool::decode_cpcache_index(index, true);
1196         assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
1197         ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
1198         cpce->set_method_handle(pool, info);
1199         appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
1200         break;
1201       }
1202       case Bytecodes::_invokedynamic: {
1203         ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
1204         cpce->set_dynamic_call(pool, info);
1205         appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
1206         break;
1207       }
1208       default: fatal("unexpected bytecode for load_appendix_patching_id");
1209     }
1210   } else {
1211     ShouldNotReachHere();
1212   }
1213 
1214   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1215     // At compile time we assumed the field wasn't volatile/atomic but after
1216     // loading it turns out it was volatile/atomic so we have to throw the
1217     // compiled code out and let it be regenerated.
1218     if (TracePatching) {
1219       if (deoptimize_for_volatile) {
1220         tty->print_cr("Deoptimizing for patching volatile field reference");
1221       }
1222       if (deoptimize_for_atomic) {
1223         tty->print_cr("Deoptimizing for patching atomic field reference");
1224       }
1225     }
1226 
1227     // It's possible the nmethod was invalidated in the last
1228     // safepoint, but if it's still alive then make it not_entrant.
1229     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1230     if (nm != NULL) {
1231       nm->make_not_entrant();
1232     }
1233 
1234     Deoptimization::deoptimize_frame(current, caller_frame.id());
1235 
1236     // Return to the now deoptimized frame.
1237   }
1238 
1239   // Now copy code back
1240 
1241   {
1242     MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag);
1243     //
1244     // Deoptimization may have happened while we waited for the lock.
1245     // In that case we don't bother to do any patching we just return
1246     // and let the deopt happen
1247     if (!caller_is_deopted(current)) {
1248       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1249       address instr_pc = jump->jump_destination();
1250       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1251       if (ni->is_jump() ) {
1252         // the jump has not been patched yet
1253         // The jump destination is slow case and therefore not part of the stubs
1254         // (stubs are only for StaticCalls)
1255 
1256         // format of buffer
1257         //    ....
1258         //    instr byte 0     <-- copy_buff
1259         //    instr byte 1
1260         //    ..
1261         //    instr byte n-1
1262         //      n
1263         //    ....             <-- call destination
1264 
1265         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1266         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1267         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1268         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1269         address copy_buff = stub_location - *byte_skip - *byte_count;
1270         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1271         if (TracePatching) {
1272           ttyLocker ttyl;
1273           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
1274                         p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
1275           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1276           assert(caller_code != NULL, "nmethod not found");
1277 
1278           // NOTE we use pc() not original_pc() because we already know they are
1279           // identical otherwise we'd have never entered this block of code
1280 
1281           const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1282           assert(map != NULL, "null check");
1283           map->print();
1284           tty->cr();
1285 
1286           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1287         }
1288         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1289         bool do_patch = true;
1290         if (stub_id == Runtime1::access_field_patching_id) {
1291           // The offset may not be correct if the class was not loaded at code generation time.
1292           // Set it now.
1293           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1294           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1295           assert(patch_field_offset >= 0, "illegal offset");
1296           n_move->add_offset_in_bytes(patch_field_offset);
1297         } else if (load_klass_or_mirror_patch_id) {
1298           // If a getstatic or putstatic is referencing a klass which
1299           // isn't fully initialized, the patch body isn't copied into
1300           // place until initialization is complete.  In this case the
1301           // patch site is setup so that any threads besides the
1302           // initializing thread are forced to come into the VM and
1303           // block.
1304           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1305                      InstanceKlass::cast(init_klass)->is_initialized();
1306           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1307           if (jump->jump_destination() == being_initialized_entry) {
1308             assert(do_patch == true, "initialization must be complete at this point");
1309           } else {
1310             // patch the instruction <move reg, klass>
1311             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1312 
1313             assert(n_copy->data() == 0 ||
1314                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
1315                    "illegal init value");
1316             if (stub_id == Runtime1::load_klass_patching_id) {
1317               assert(load_klass != NULL, "klass not set");
1318               n_copy->set_data((intx) (load_klass));
1319             } else {
1320               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1321               n_copy->set_data(cast_from_oop<intx>(mirror()));
1322             }
1323 
1324             if (TracePatching) {
1325               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1326             }
1327           }
1328         } else if (stub_id == Runtime1::load_appendix_patching_id) {
1329           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1330           assert(n_copy->data() == 0 ||
1331                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
1332                  "illegal init value");
1333           n_copy->set_data(cast_from_oop<intx>(appendix()));
1334 
1335           if (TracePatching) {
1336             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1337           }
1338         } else {
1339           ShouldNotReachHere();
1340         }
1341 
1342         if (do_patch) {
1343           // replace instructions
1344           // first replace the tail, then the call
1345 #ifdef ARM
1346           if((load_klass_or_mirror_patch_id ||
1347               stub_id == Runtime1::load_appendix_patching_id) &&
1348               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1349             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1350             address addr = NULL;
1351             assert(nm != NULL, "invalid nmethod_pc");
1352             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1353             while (mds.next()) {
1354               if (mds.type() == relocInfo::oop_type) {
1355                 assert(stub_id == Runtime1::load_mirror_patching_id ||
1356                        stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1357                 oop_Relocation* r = mds.oop_reloc();
1358                 addr = (address)r->oop_addr();
1359                 break;
1360               } else if (mds.type() == relocInfo::metadata_type) {
1361                 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1362                 metadata_Relocation* r = mds.metadata_reloc();
1363                 addr = (address)r->metadata_addr();
1364                 break;
1365               }
1366             }
1367             assert(addr != NULL, "metadata relocation must exist");
1368             copy_buff -= *byte_count;
1369             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1370             n_copy2->set_pc_relative_offset(addr, instr_pc);
1371           }
1372 #endif
1373 
1374           for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1375             address ptr = copy_buff + i;
1376             int a_byte = (*ptr) & 0xFF;
1377             address dst = instr_pc + i;
1378             *(unsigned char*)dst = (unsigned char) a_byte;
1379           }
1380           ICache::invalidate_range(instr_pc, *byte_count);
1381           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1382 
1383           if (load_klass_or_mirror_patch_id ||
1384               stub_id == Runtime1::load_appendix_patching_id) {
1385             relocInfo::relocType rtype =
1386               (stub_id == Runtime1::load_klass_patching_id) ?
1387                                    relocInfo::metadata_type :
1388                                    relocInfo::oop_type;
1389             // update relocInfo to metadata
1390             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1391             assert(nm != NULL, "invalid nmethod_pc");
1392 
1393             // The old patch site is now a move instruction so update
1394             // the reloc info so that it will get updated during
1395             // future GCs.
1396             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1397             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1398                                                      relocInfo::none, rtype);
1399           }
1400 
1401         } else {
1402           ICache::invalidate_range(copy_buff, *byte_count);
1403           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1404         }
1405       }
1406     }
1407   }
1408 
1409   // If we are patching in a non-perm oop, make sure the nmethod
1410   // is on the right list.
1411   {
1412     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1413     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1414     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1415 
1416     // Since we've patched some oops in the nmethod,
1417     // (re)register it with the heap.
1418     Universe::heap()->register_nmethod(nm);
1419   }
1420 JRT_END
1421 
1422 #else // DEOPTIMIZE_WHEN_PATCHING
1423 
1424 static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) {
1425   if (stub_id == Runtime1::load_klass_patching_id ||
1426       stub_id == Runtime1::load_mirror_patching_id) {
1427     // last java frame on stack
1428     vframeStream vfst(current, true);
1429     assert(!vfst.at_end(), "Java frame must exist");
1430 
1431     methodHandle caller_method(current, vfst.method());
1432     int bci = vfst.bci();
1433     Bytecodes::Code code = caller_method()->java_code_at(bci);
1434 
1435     switch (code) {
1436       case Bytecodes::_new:
1437       case Bytecodes::_anewarray:
1438       case Bytecodes::_multianewarray:
1439       case Bytecodes::_instanceof:
1440       case Bytecodes::_checkcast: {
1441         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1442         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1443         if (tag.is_unresolved_klass_in_error()) {
1444           return false; // throws resolution error
1445         }
1446         break;
1447       }
1448 
1449       default: break;
1450     }
1451   }
1452   return true;
1453 }
1454 
1455 void Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id) {
1456 #ifndef PRODUCT
1457   if (PrintC1Statistics) {
1458     _patch_code_slowcase_cnt++;
1459   }
1460 #endif
1461 
1462   // Enable WXWrite: the function is called by c1 stub as a runtime function
1463   // (see another implementation above).
1464   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1465 
1466   if (TracePatching) {
1467     tty->print_cr("Deoptimizing because patch is needed");
1468   }
1469 
1470   RegisterMap reg_map(current,
1471                       RegisterMap::UpdateMap::skip,
1472                       RegisterMap::ProcessFrames::include,
1473                       RegisterMap::WalkContinuation::skip);
1474 
1475   frame runtime_frame = current->last_frame();
1476   frame caller_frame = runtime_frame.sender(&reg_map);
1477   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1478 
1479   if (is_patching_needed(current, stub_id)) {
1480     // Make sure the nmethod is invalidated, i.e. made not entrant.
1481     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1482     if (nm != NULL) {
1483       nm->make_not_entrant();
1484     }
1485   }
1486 
1487   Deoptimization::deoptimize_frame(current, caller_frame.id());
1488   // Return to the now deoptimized frame.
1489   postcond(caller_is_deopted(current));
1490 }
1491 
1492 #endif // DEOPTIMIZE_WHEN_PATCHING
1493 
1494 // Entry point for compiled code. We want to patch a nmethod.
1495 // We don't do a normal VM transition here because we want to
1496 // know after the patching is complete and any safepoint(s) are taken
1497 // if the calling nmethod was deoptimized. We do this by calling a
1498 // helper method which does the normal VM transition and when it
1499 // completes we can check for deoptimization. This simplifies the
1500 // assembly code in the cpu directories.
1501 //
1502 int Runtime1::move_klass_patching(JavaThread* current) {
1503 //
1504 // NOTE: we are still in Java
1505 //
1506   debug_only(NoHandleMark nhm;)
1507   {
1508     // Enter VM mode
1509     ResetNoHandleMark rnhm;
1510     patch_code(current, load_klass_patching_id);
1511   }
1512   // Back in JAVA, use no oops DON'T safepoint
1513 
1514   // Return true if calling code is deoptimized
1515 
1516   return caller_is_deopted(current);
1517 }
1518 
1519 int Runtime1::move_mirror_patching(JavaThread* current) {
1520 //
1521 // NOTE: we are still in Java
1522 //
1523   debug_only(NoHandleMark nhm;)
1524   {
1525     // Enter VM mode
1526     ResetNoHandleMark rnhm;
1527     patch_code(current, load_mirror_patching_id);
1528   }
1529   // Back in JAVA, use no oops DON'T safepoint
1530 
1531   // Return true if calling code is deoptimized
1532 
1533   return caller_is_deopted(current);
1534 }
1535 
1536 int Runtime1::move_appendix_patching(JavaThread* current) {
1537 //
1538 // NOTE: we are still in Java
1539 //
1540   debug_only(NoHandleMark nhm;)
1541   {
1542     // Enter VM mode
1543     ResetNoHandleMark rnhm;
1544     patch_code(current, load_appendix_patching_id);
1545   }
1546   // Back in JAVA, use no oops DON'T safepoint
1547 
1548   // Return true if calling code is deoptimized
1549 
1550   return caller_is_deopted(current);
1551 }
1552 
1553 // Entry point for compiled code. We want to patch a nmethod.
1554 // We don't do a normal VM transition here because we want to
1555 // know after the patching is complete and any safepoint(s) are taken
1556 // if the calling nmethod was deoptimized. We do this by calling a
1557 // helper method which does the normal VM transition and when it
1558 // completes we can check for deoptimization. This simplifies the
1559 // assembly code in the cpu directories.
1560 //
1561 int Runtime1::access_field_patching(JavaThread* current) {
1562   //
1563   // NOTE: we are still in Java
1564   //
1565   // Handles created in this function will be deleted by the
1566   // HandleMarkCleaner in the transition to the VM.
1567   NoHandleMark nhm;
1568   {
1569     // Enter VM mode
1570     ResetNoHandleMark rnhm;
1571     patch_code(current, access_field_patching_id);
1572   }
1573   // Back in JAVA, use no oops DON'T safepoint
1574 
1575   // Return true if calling code is deoptimized
1576 
1577   return caller_is_deopted(current);
1578 }
1579 
1580 
1581 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1582   // for now we just print out the block id
1583   tty->print("%d ", block_id);
1584 JRT_END
1585 
1586 
1587 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1588   // had to return int instead of bool, otherwise there may be a mismatch
1589   // between the C calling convention and the Java one.
1590   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1591   // JVM takes the whole %eax as the return value, which may misinterpret
1592   // the return value as a boolean true.
1593 
1594   assert(mirror != NULL, "should null-check on mirror before calling");
1595   Klass* k = java_lang_Class::as_Klass(mirror);
1596   return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1597 JRT_END
1598 
1599 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1600   ResourceMark rm;
1601 
1602   RegisterMap reg_map(current,
1603                       RegisterMap::UpdateMap::skip,
1604                       RegisterMap::ProcessFrames::include,
1605                       RegisterMap::WalkContinuation::skip);
1606   frame runtime_frame = current->last_frame();
1607   frame caller_frame = runtime_frame.sender(&reg_map);
1608 
1609   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1610   assert (nm != NULL, "no more nmethod?");
1611   nm->make_not_entrant();
1612 
1613   methodHandle m(current, nm->method());
1614   MethodData* mdo = m->method_data();
1615 
1616   if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
1617     // Build an MDO.  Ignore errors like OutOfMemory;
1618     // that simply means we won't have an MDO to update.
1619     Method::build_profiling_method_data(m, THREAD);
1620     if (HAS_PENDING_EXCEPTION) {
1621       // Only metaspace OOM is expected. No Java code executed.
1622       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1623       CLEAR_PENDING_EXCEPTION;
1624     }
1625     mdo = m->method_data();
1626   }
1627 
1628   if (mdo != NULL) {
1629     mdo->inc_trap_count(Deoptimization::Reason_none);
1630   }
1631 
1632   if (TracePredicateFailedTraps) {
1633     stringStream ss1, ss2;
1634     vframeStream vfst(current);
1635     Method* inlinee = vfst.method();
1636     inlinee->print_short_name(&ss1);
1637     m->print_short_name(&ss2);
1638     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.freeze(), vfst.bci(), ss2.freeze(), p2i(caller_frame.pc()));
1639   }
1640 
1641 
1642   Deoptimization::deoptimize_frame(current, caller_frame.id());
1643 
1644 JRT_END
1645 
1646 #ifndef PRODUCT
1647 void Runtime1::print_statistics() {
1648   tty->print_cr("C1 Runtime statistics:");
1649   tty->print_cr(" _resolve_invoke_virtual_cnt:     %d", SharedRuntime::_resolve_virtual_ctr);
1650   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
1651   tty->print_cr(" _resolve_invoke_static_cnt:      %d", SharedRuntime::_resolve_static_ctr);
1652   tty->print_cr(" _handle_wrong_method_cnt:        %d", SharedRuntime::_wrong_method_ctr);
1653   tty->print_cr(" _ic_miss_cnt:                    %d", SharedRuntime::_ic_miss_ctr);
1654   tty->print_cr(" _generic_arraycopystub_cnt:      %d", _generic_arraycopystub_cnt);
1655   tty->print_cr(" _byte_arraycopy_cnt:             %d", _byte_arraycopy_stub_cnt);
1656   tty->print_cr(" _short_arraycopy_cnt:            %d", _short_arraycopy_stub_cnt);
1657   tty->print_cr(" _int_arraycopy_cnt:              %d", _int_arraycopy_stub_cnt);
1658   tty->print_cr(" _long_arraycopy_cnt:             %d", _long_arraycopy_stub_cnt);
1659   tty->print_cr(" _oop_arraycopy_cnt:              %d", _oop_arraycopy_stub_cnt);
1660   tty->print_cr(" _arraycopy_slowcase_cnt:         %d", _arraycopy_slowcase_cnt);
1661   tty->print_cr(" _arraycopy_checkcast_cnt:        %d", _arraycopy_checkcast_cnt);
1662   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
1663 
1664   tty->print_cr(" _new_type_array_slowcase_cnt:    %d", _new_type_array_slowcase_cnt);
1665   tty->print_cr(" _new_object_array_slowcase_cnt:  %d", _new_object_array_slowcase_cnt);
1666   tty->print_cr(" _new_flat_array_slowcase_cnt:    %d", _new_flat_array_slowcase_cnt);
1667   tty->print_cr(" _new_instance_slowcase_cnt:      %d", _new_instance_slowcase_cnt);
1668   tty->print_cr(" _new_multi_array_slowcase_cnt:   %d", _new_multi_array_slowcase_cnt);
1669   tty->print_cr(" _load_flattened_array_slowcase_cnt:   %d", _load_flattened_array_slowcase_cnt);
1670   tty->print_cr(" _store_flattened_array_slowcase_cnt:  %d", _store_flattened_array_slowcase_cnt);
1671   tty->print_cr(" _substitutability_check_slowcase_cnt: %d", _substitutability_check_slowcase_cnt);
1672   tty->print_cr(" _buffer_inline_args_slowcase_cnt:%d", _buffer_inline_args_slowcase_cnt);
1673   tty->print_cr(" _buffer_inline_args_no_receiver_slowcase_cnt:%d", _buffer_inline_args_no_receiver_slowcase_cnt);
1674 
1675   tty->print_cr(" _monitorenter_slowcase_cnt:      %d", _monitorenter_slowcase_cnt);
1676   tty->print_cr(" _monitorexit_slowcase_cnt:       %d", _monitorexit_slowcase_cnt);
1677   tty->print_cr(" _patch_code_slowcase_cnt:        %d", _patch_code_slowcase_cnt);
1678 
1679   tty->print_cr(" _throw_range_check_exception_count:            %d:", _throw_range_check_exception_count);
1680   tty->print_cr(" _throw_index_exception_count:                  %d:", _throw_index_exception_count);
1681   tty->print_cr(" _throw_div0_exception_count:                   %d:", _throw_div0_exception_count);
1682   tty->print_cr(" _throw_null_pointer_exception_count:           %d:", _throw_null_pointer_exception_count);
1683   tty->print_cr(" _throw_class_cast_exception_count:             %d:", _throw_class_cast_exception_count);
1684   tty->print_cr(" _throw_incompatible_class_change_error_count:  %d:", _throw_incompatible_class_change_error_count);
1685   tty->print_cr(" _throw_illegal_monitor_state_exception_count:  %d:", _throw_illegal_monitor_state_exception_count);
1686   tty->print_cr(" _throw_count:                                  %d:", _throw_count);
1687 
1688   SharedRuntime::print_ic_miss_histogram();
1689   tty->cr();
1690 }
1691 #endif // PRODUCT