1 /*
   2  * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/codeBuffer.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_FrameMap.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_MacroAssembler.hpp"
  32 #include "c1/c1_Runtime1.hpp"
  33 #include "classfile/javaClasses.inline.hpp"
  34 #include "classfile/vmClasses.hpp"
  35 #include "classfile/vmSymbols.hpp"
  36 #include "code/codeBlob.hpp"
  37 #include "code/compiledIC.hpp"
  38 #include "code/pcDesc.hpp"
  39 #include "code/scopeDesc.hpp"
  40 #include "code/vtableStubs.hpp"
  41 #include "compiler/compilationPolicy.hpp"
  42 #include "compiler/disassembler.hpp"
  43 #include "compiler/oopMap.hpp"
  44 #include "gc/shared/barrierSet.hpp"
  45 #include "gc/shared/c1/barrierSetC1.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "interpreter/bytecode.hpp"
  48 #include "interpreter/interpreter.hpp"
  49 #include "jfr/support/jfrIntrinsics.hpp"
  50 #include "logging/log.hpp"
  51 #include "memory/allocation.inline.hpp"
  52 #include "memory/oopFactory.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/access.inline.hpp"
  56 #include "oops/klass.inline.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/objArrayKlass.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "runtime/atomic.hpp"
  62 #include "runtime/biasedLocking.hpp"
  63 #include "runtime/fieldDescriptor.inline.hpp"
  64 #include "runtime/frame.inline.hpp"
  65 #include "runtime/handles.inline.hpp"
  66 #include "runtime/interfaceSupport.inline.hpp"
  67 #include "runtime/javaCalls.hpp"
  68 #include "runtime/sharedRuntime.hpp"
  69 #include "runtime/stackWatermarkSet.hpp"
  70 #include "runtime/stubRoutines.hpp"
  71 #include "runtime/threadCritical.hpp"
  72 #include "runtime/vframe.inline.hpp"
  73 #include "runtime/vframeArray.hpp"
  74 #include "runtime/vm_version.hpp"
  75 #include "utilities/copy.hpp"
  76 #include "utilities/events.hpp"
  77 
  78 
  79 // Implementation of StubAssembler
  80 
  81 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  82   _name = name;
  83   _must_gc_arguments = false;
  84   _frame_size = no_frame_size;
  85   _num_rt_args = 0;
  86   _stub_id = stub_id;
  87 }
  88 
  89 
  90 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  91   _name = name;
  92   _must_gc_arguments = must_gc_arguments;
  93 }
  94 
  95 
  96 void StubAssembler::set_frame_size(int size) {
  97   if (_frame_size == no_frame_size) {
  98     _frame_size = size;
  99   }
 100   assert(_frame_size == size, "can't change the frame size");
 101 }
 102 
 103 
 104 void StubAssembler::set_num_rt_args(int args) {
 105   if (_num_rt_args == 0) {
 106     _num_rt_args = args;
 107   }
 108   assert(_num_rt_args == args, "can't change the number of args");
 109 }
 110 
 111 // Implementation of Runtime1
 112 
 113 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
 114 const char *Runtime1::_blob_names[] = {
 115   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
 116 };
 117 
 118 #ifndef PRODUCT
 119 // statistics
 120 int Runtime1::_generic_arraycopystub_cnt = 0;
 121 int Runtime1::_arraycopy_slowcase_cnt = 0;
 122 int Runtime1::_arraycopy_checkcast_cnt = 0;
 123 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
 124 int Runtime1::_new_type_array_slowcase_cnt = 0;
 125 int Runtime1::_new_object_array_slowcase_cnt = 0;
 126 int Runtime1::_new_instance_slowcase_cnt = 0;
 127 int Runtime1::_new_multi_array_slowcase_cnt = 0;
 128 int Runtime1::_monitorenter_slowcase_cnt = 0;
 129 int Runtime1::_monitorexit_slowcase_cnt = 0;
 130 int Runtime1::_patch_code_slowcase_cnt = 0;
 131 int Runtime1::_throw_range_check_exception_count = 0;
 132 int Runtime1::_throw_index_exception_count = 0;
 133 int Runtime1::_throw_div0_exception_count = 0;
 134 int Runtime1::_throw_null_pointer_exception_count = 0;
 135 int Runtime1::_throw_class_cast_exception_count = 0;
 136 int Runtime1::_throw_incompatible_class_change_error_count = 0;
 137 int Runtime1::_throw_count = 0;
 138 
 139 static int _byte_arraycopy_stub_cnt = 0;
 140 static int _short_arraycopy_stub_cnt = 0;
 141 static int _int_arraycopy_stub_cnt = 0;
 142 static int _long_arraycopy_stub_cnt = 0;
 143 static int _oop_arraycopy_stub_cnt = 0;
 144 
 145 address Runtime1::arraycopy_count_address(BasicType type) {
 146   switch (type) {
 147   case T_BOOLEAN:
 148   case T_BYTE:   return (address)&_byte_arraycopy_stub_cnt;
 149   case T_CHAR:
 150   case T_SHORT:  return (address)&_short_arraycopy_stub_cnt;
 151   case T_FLOAT:
 152   case T_INT:    return (address)&_int_arraycopy_stub_cnt;
 153   case T_DOUBLE:
 154   case T_LONG:   return (address)&_long_arraycopy_stub_cnt;
 155   case T_ARRAY:
 156   case T_OBJECT: return (address)&_oop_arraycopy_stub_cnt;
 157   default:
 158     ShouldNotReachHere();
 159     return NULL;
 160   }
 161 }
 162 
 163 
 164 #endif
 165 
 166 // Simple helper to see if the caller of a runtime stub which
 167 // entered the VM has been deoptimized
 168 
 169 static bool caller_is_deopted(JavaThread* current) {
 170   RegisterMap reg_map(current, false);
 171   frame runtime_frame = current->last_frame();
 172   frame caller_frame = runtime_frame.sender(&reg_map);
 173   assert(caller_frame.is_compiled_frame(), "must be compiled");
 174   return caller_frame.is_deoptimized_frame();
 175 }
 176 
 177 // Stress deoptimization
 178 static void deopt_caller(JavaThread* current) {
 179   if (!caller_is_deopted(current)) {
 180     RegisterMap reg_map(current, false);
 181     frame runtime_frame = current->last_frame();
 182     frame caller_frame = runtime_frame.sender(&reg_map);
 183     Deoptimization::deoptimize_frame(current, caller_frame.id());
 184     assert(caller_is_deopted(current), "Must be deoptimized");
 185   }
 186 }
 187 
 188 class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
 189  private:
 190   Runtime1::StubID _id;
 191  public:
 192   StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
 193   virtual OopMapSet* generate_code(StubAssembler* sasm) {
 194     return Runtime1::generate_code_for(_id, sasm);
 195   }
 196 };
 197 
 198 CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
 199   ResourceMark rm;
 200   // create code buffer for code storage
 201   CodeBuffer code(buffer_blob);
 202 
 203   OopMapSet* oop_maps;
 204   int frame_size;
 205   bool must_gc_arguments;
 206 
 207   Compilation::setup_code_buffer(&code, 0);
 208 
 209   // create assembler for code generation
 210   StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
 211   // generate code for runtime stub
 212   oop_maps = cl->generate_code(sasm);
 213   assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
 214          "if stub has an oop map it must have a valid frame size");
 215   assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");
 216 
 217   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
 218   sasm->align(BytesPerWord);
 219   // make sure all code is in code buffer
 220   sasm->flush();
 221 
 222   frame_size = sasm->frame_size();
 223   must_gc_arguments = sasm->must_gc_arguments();
 224   // create blob - distinguish a few special cases
 225   CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
 226                                                  &code,
 227                                                  CodeOffsets::frame_never_safe,
 228                                                  frame_size,
 229                                                  oop_maps,
 230                                                  must_gc_arguments);
 231   assert(blob != NULL, "blob must exist");
 232   return blob;
 233 }
 234 
 235 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
 236   assert(0 <= id && id < number_of_ids, "illegal stub id");
 237   bool expect_oop_map = true;
 238 #ifdef ASSERT
 239   // Make sure that stubs that need oopmaps have them
 240   switch (id) {
 241     // These stubs don't need to have an oopmap
 242   case dtrace_object_alloc_id:
 243   case slow_subtype_check_id:
 244   case fpu2long_stub_id:
 245   case unwind_exception_id:
 246   case counter_overflow_id:
 247 #if defined(PPC32)
 248   case handle_exception_nofpu_id:
 249 #endif
 250     expect_oop_map = false;
 251     break;
 252   default:
 253     break;
 254   }
 255 #endif
 256   StubIDStubAssemblerCodeGenClosure cl(id);
 257   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 258   // install blob
 259   _blobs[id] = blob;
 260 }
 261 
 262 void Runtime1::initialize(BufferBlob* blob) {
 263   // platform-dependent initialization
 264   initialize_pd();
 265   // generate stubs
 266   for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
 267   // printing
 268 #ifndef PRODUCT
 269   if (PrintSimpleStubs) {
 270     ResourceMark rm;
 271     for (int id = 0; id < number_of_ids; id++) {
 272       _blobs[id]->print();
 273       if (_blobs[id]->oop_maps() != NULL) {
 274         _blobs[id]->oop_maps()->print();
 275       }
 276     }
 277   }
 278 #endif
 279   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 280   bs->generate_c1_runtime_stubs(blob);
 281 }
 282 
 283 CodeBlob* Runtime1::blob_for(StubID id) {
 284   assert(0 <= id && id < number_of_ids, "illegal stub id");
 285   return _blobs[id];
 286 }
 287 
 288 
 289 const char* Runtime1::name_for(StubID id) {
 290   assert(0 <= id && id < number_of_ids, "illegal stub id");
 291   return _blob_names[id];
 292 }
 293 
 294 const char* Runtime1::name_for_address(address entry) {
 295   for (int id = 0; id < number_of_ids; id++) {
 296     if (entry == entry_for((StubID)id)) return name_for((StubID)id);
 297   }
 298 
 299 #define FUNCTION_CASE(a, f) \
 300   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
 301 
 302   FUNCTION_CASE(entry, os::javaTimeMillis);
 303   FUNCTION_CASE(entry, os::javaTimeNanos);
 304   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
 305   FUNCTION_CASE(entry, SharedRuntime::d2f);
 306   FUNCTION_CASE(entry, SharedRuntime::d2i);
 307   FUNCTION_CASE(entry, SharedRuntime::d2l);
 308   FUNCTION_CASE(entry, SharedRuntime::dcos);
 309   FUNCTION_CASE(entry, SharedRuntime::dexp);
 310   FUNCTION_CASE(entry, SharedRuntime::dlog);
 311   FUNCTION_CASE(entry, SharedRuntime::dlog10);
 312   FUNCTION_CASE(entry, SharedRuntime::dpow);
 313   FUNCTION_CASE(entry, SharedRuntime::drem);
 314   FUNCTION_CASE(entry, SharedRuntime::dsin);
 315   FUNCTION_CASE(entry, SharedRuntime::dtan);
 316   FUNCTION_CASE(entry, SharedRuntime::f2i);
 317   FUNCTION_CASE(entry, SharedRuntime::f2l);
 318   FUNCTION_CASE(entry, SharedRuntime::frem);
 319   FUNCTION_CASE(entry, SharedRuntime::l2d);
 320   FUNCTION_CASE(entry, SharedRuntime::l2f);
 321   FUNCTION_CASE(entry, SharedRuntime::ldiv);
 322   FUNCTION_CASE(entry, SharedRuntime::lmul);
 323   FUNCTION_CASE(entry, SharedRuntime::lrem);
 324   FUNCTION_CASE(entry, SharedRuntime::lrem);
 325   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
 326   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 327   FUNCTION_CASE(entry, is_instance_of);
 328   FUNCTION_CASE(entry, trace_block_entry);
 329 #ifdef JFR_HAVE_INTRINSICS
 330   FUNCTION_CASE(entry, JFR_TIME_FUNCTION);
 331 #endif
 332   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 333   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 334   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 335   FUNCTION_CASE(entry, StubRoutines::dexp());
 336   FUNCTION_CASE(entry, StubRoutines::dlog());
 337   FUNCTION_CASE(entry, StubRoutines::dlog10());
 338   FUNCTION_CASE(entry, StubRoutines::dpow());
 339   FUNCTION_CASE(entry, StubRoutines::dsin());
 340   FUNCTION_CASE(entry, StubRoutines::dcos());
 341   FUNCTION_CASE(entry, StubRoutines::dtan());
 342 
 343 #undef FUNCTION_CASE
 344 
 345   // Soft float adds more runtime names.
 346   return pd_name_for_address(entry);
 347 }
 348 
 349 
 350 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 351   NOT_PRODUCT(_new_instance_slowcase_cnt++;)
 352 
 353   assert(klass->is_klass(), "not a class");
 354   Handle holder(current, klass->klass_holder()); // keep the klass alive
 355   InstanceKlass* h = InstanceKlass::cast(klass);
 356   h->check_valid_for_instantiation(true, CHECK);
 357   // make sure klass is initialized
 358   h->initialize(CHECK);
 359   // allocate instance and return via TLS
 360   oop obj = h->allocate_instance(CHECK);
 361   current->set_vm_result(obj);
 362 JRT_END
 363 
 364 
 365 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 366   NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
 367   // Note: no handle for klass needed since they are not used
 368   //       anymore after new_typeArray() and no GC can happen before.
 369   //       (This may have to change if this code changes!)
 370   assert(klass->is_klass(), "not a class");
 371   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 372   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 373   current->set_vm_result(obj);
 374   // This is pretty rare but this runtime patch is stressful to deoptimization
 375   // if we deoptimize here so force a deopt to stress the path.
 376   if (DeoptimizeALot) {
 377     deopt_caller(current);
 378   }
 379 
 380 JRT_END
 381 
 382 
 383 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 384   NOT_PRODUCT(_new_object_array_slowcase_cnt++;)
 385 
 386   // Note: no handle for klass needed since they are not used
 387   //       anymore after new_objArray() and no GC can happen before.
 388   //       (This may have to change if this code changes!)
 389   assert(array_klass->is_klass(), "not a class");
 390   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 391   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 392   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 393   current->set_vm_result(obj);
 394   // This is pretty rare but this runtime patch is stressful to deoptimization
 395   // if we deoptimize here so force a deopt to stress the path.
 396   if (DeoptimizeALot) {
 397     deopt_caller(current);
 398   }
 399 JRT_END
 400 
 401 
 402 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 403   NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
 404 
 405   assert(klass->is_klass(), "not a class");
 406   assert(rank >= 1, "rank must be nonzero");
 407   Handle holder(current, klass->klass_holder()); // keep the klass alive
 408   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 409   current->set_vm_result(obj);
 410 JRT_END
 411 
 412 
 413 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
 414   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
 415 JRT_END
 416 
 417 
 418 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 419   ResourceMark rm(current);
 420   const char* klass_name = obj->klass()->external_name();
 421   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayStoreException(), klass_name);
 422 JRT_END
 423 
 424 
 425 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
 426 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
 427 // method) method is passed as an argument. In order to do that it is embedded in the code as
 428 // a constant.
 429 static nmethod* counter_overflow_helper(JavaThread* current, int branch_bci, Method* m) {
 430   nmethod* osr_nm = NULL;
 431   methodHandle method(current, m);
 432 
 433   RegisterMap map(current, false);
 434   frame fr =  current->last_frame().sender(&map);
 435   nmethod* nm = (nmethod*) fr.cb();
 436   assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
 437   methodHandle enclosing_method(current, nm->method());
 438 
 439   CompLevel level = (CompLevel)nm->comp_level();
 440   int bci = InvocationEntryBci;
 441   if (branch_bci != InvocationEntryBci) {
 442     // Compute destination bci
 443     address pc = method()->code_base() + branch_bci;
 444     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
 445     int offset = 0;
 446     switch (branch) {
 447       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 448       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 449       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 450       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 451       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 452       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 453       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 454         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 455         break;
 456       case Bytecodes::_goto_w:
 457         offset = Bytes::get_Java_u4(pc + 1);
 458         break;
 459       default: ;
 460     }
 461     bci = branch_bci + offset;
 462   }
 463   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 464   return osr_nm;
 465 }
 466 
 467 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 468   nmethod* osr_nm;
 469   JRT_BLOCK
 470     osr_nm = counter_overflow_helper(current, bci, method);
 471     if (osr_nm != NULL) {
 472       RegisterMap map(current, false);
 473       frame fr =  current->last_frame().sender(&map);
 474       Deoptimization::deoptimize_frame(current, fr.id());
 475     }
 476   JRT_BLOCK_END
 477   return NULL;
 478 JRT_END
 479 
 480 extern void vm_exit(int code);
 481 
 482 // Enter this method from compiled code handler below. This is where we transition
 483 // to VM mode. This is done as a helper routine so that the method called directly
 484 // from compiled code does not have to transition to VM. This allows the entry
 485 // method to see if the nmethod that we have just looked up a handler for has
 486 // been deoptimized while we were in the vm. This simplifies the assembly code
 487 // cpu directories.
 488 //
 489 // We are entering here from exception stub (via the entry method below)
 490 // If there is a compiled exception handler in this method, we will continue there;
 491 // otherwise we will unwind the stack and continue at the caller of top frame method
 492 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 493 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 494 // check to see if the handler we are going to return is now in a nmethod that has
 495 // been deoptimized. If that is the case we return the deopt blob
 496 // unpack_with_exception entry instead. This makes life for the exception blob easier
 497 // because making that same check and diverting is painful from assembly language.
 498 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 499   // Reset method handle flag.
 500   current->set_is_method_handle_return(false);
 501 
 502   Handle exception(current, ex);
 503 
 504   // This function is called when we are about to throw an exception. Therefore,
 505   // we have to poll the stack watermark barrier to make sure that not yet safe
 506   // stack frames are made safe before returning into them.
 507   if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {
 508     // The Runtime1::handle_exception_from_callee_id handler is invoked after the
 509     // frame has been unwound. It instead builds its own stub frame, to call the
 510     // runtime. But the throwing frame has already been unwound here.
 511     StackWatermarkSet::after_unwind(current);
 512   }
 513 
 514   nm = CodeCache::find_nmethod(pc);
 515   assert(nm != NULL, "this is not an nmethod");
 516   // Adjust the pc as needed/
 517   if (nm->is_deopt_pc(pc)) {
 518     RegisterMap map(current, false);
 519     frame exception_frame = current->last_frame().sender(&map);
 520     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
 521     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
 522     pc = exception_frame.pc();
 523   }
 524   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
 525   // Check that exception is a subclass of Throwable
 526   assert(exception->is_a(vmClasses::Throwable_klass()),
 527          "Exception not subclass of Throwable");
 528 
 529   // debugging support
 530   // tracing
 531   if (log_is_enabled(Info, exceptions)) {
 532     ResourceMark rm;
 533     stringStream tempst;
 534     assert(nm->method() != NULL, "Unexpected NULL method()");
 535     tempst.print("C1 compiled method <%s>\n"
 536                  " at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
 537                  nm->method()->print_value_string(), p2i(pc), p2i(current));
 538     Exceptions::log_exception(exception, tempst.as_string());
 539   }
 540   // for AbortVMOnException flag
 541   Exceptions::debug_check_abort(exception);
 542 
 543   // Check the stack guard pages and reenable them if necessary and there is
 544   // enough space on the stack to do so.  Use fast exceptions only if the guard
 545   // pages are enabled.
 546   bool guard_pages_enabled = current->stack_overflow_state()->reguard_stack_if_needed();
 547 
 548   if (JvmtiExport::can_post_on_exceptions()) {
 549     // To ensure correct notification of exception catches and throws
 550     // we have to deoptimize here.  If we attempted to notify the
 551     // catches and throws during this exception lookup it's possible
 552     // we could deoptimize on the way out of the VM and end back in
 553     // the interpreter at the throw site.  This would result in double
 554     // notifications since the interpreter would also notify about
 555     // these same catches and throws as it unwound the frame.
 556 
 557     RegisterMap reg_map(current);
 558     frame stub_frame = current->last_frame();
 559     frame caller_frame = stub_frame.sender(&reg_map);
 560 
 561     // We don't really want to deoptimize the nmethod itself since we
 562     // can actually continue in the exception handler ourselves but I
 563     // don't see an easy way to have the desired effect.
 564     Deoptimization::deoptimize_frame(current, caller_frame.id());
 565     assert(caller_is_deopted(current), "Must be deoptimized");
 566 
 567     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 568   }
 569 
 570   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
 571   if (guard_pages_enabled) {
 572     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
 573     if (fast_continuation != NULL) {
 574       // Set flag if return address is a method handle call site.
 575       current->set_is_method_handle_return(nm->is_method_handle_return(pc));
 576       return fast_continuation;
 577     }
 578   }
 579 
 580   // If the stack guard pages are enabled, check whether there is a handler in
 581   // the current method.  Otherwise (guard pages disabled), force an unwind and
 582   // skip the exception cache update (i.e., just leave continuation==NULL).
 583   address continuation = NULL;
 584   if (guard_pages_enabled) {
 585 
 586     // New exception handling mechanism can support inlined methods
 587     // with exception handlers since the mappings are from PC to PC
 588 
 589     // Clear out the exception oop and pc since looking up an
 590     // exception handler can cause class loading, which might throw an
 591     // exception and those fields are expected to be clear during
 592     // normal bytecode execution.
 593     current->clear_exception_oop_and_pc();
 594 
 595     bool recursive_exception = false;
 596     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false, recursive_exception);
 597     // If an exception was thrown during exception dispatch, the exception oop may have changed
 598     current->set_exception_oop(exception());
 599     current->set_exception_pc(pc);
 600 
 601     // the exception cache is used only by non-implicit exceptions
 602     // Update the exception cache only when there didn't happen
 603     // another exception during the computation of the compiled
 604     // exception handler. Checking for exception oop equality is not
 605     // sufficient because some exceptions are pre-allocated and reused.
 606     if (continuation != NULL && !recursive_exception) {
 607       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
 608     }
 609   }
 610 
 611   current->set_vm_result(exception());
 612   // Set flag if return address is a method handle call site.
 613   current->set_is_method_handle_return(nm->is_method_handle_return(pc));
 614 
 615   if (log_is_enabled(Info, exceptions)) {
 616     ResourceMark rm;
 617     log_info(exceptions)("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT
 618                          " for exception thrown at PC " PTR_FORMAT,
 619                          p2i(current), p2i(continuation), p2i(pc));
 620   }
 621 
 622   return continuation;
 623 JRT_END
 624 
 625 // Enter this method from compiled code only if there is a Java exception handler
 626 // in the method handling the exception.
 627 // We are entering here from exception stub. We don't do a normal VM transition here.
 628 // We do it in a helper. This is so we can check to see if the nmethod we have just
 629 // searched for an exception handler has been deoptimized in the meantime.
 630 address Runtime1::exception_handler_for_pc(JavaThread* current) {
 631   oop exception = current->exception_oop();
 632   address pc = current->exception_pc();
 633   // Still in Java mode
 634   DEBUG_ONLY(NoHandleMark nhm);
 635   nmethod* nm = NULL;
 636   address continuation = NULL;
 637   {
 638     // Enter VM mode by calling the helper
 639     ResetNoHandleMark rnhm;
 640     continuation = exception_handler_for_pc_helper(current, exception, pc, nm);
 641   }
 642   // Back in JAVA, use no oops DON'T safepoint
 643 
 644   // Now check to see if the nmethod we were called from is now deoptimized.
 645   // If so we must return to the deopt blob and deoptimize the nmethod
 646   if (nm != NULL && caller_is_deopted(current)) {
 647     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
 648   }
 649 
 650   assert(continuation != NULL, "no handler found");
 651   return continuation;
 652 }
 653 
 654 
 655 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* current, int index, arrayOopDesc* a))
 656   NOT_PRODUCT(_throw_range_check_exception_count++;)
 657   const int len = 35;
 658   assert(len < strlen("Index %d out of bounds for length %d"), "Must allocate more space for message.");
 659   char message[2 * jintAsStringSize + len];
 660   sprintf(message, "Index %d out of bounds for length %d", index, a->length());
 661   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
 662 JRT_END
 663 
 664 
 665 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* current, int index))
 666   NOT_PRODUCT(_throw_index_exception_count++;)
 667   char message[16];
 668   sprintf(message, "%d", index);
 669   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
 670 JRT_END
 671 
 672 
 673 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* current))
 674   NOT_PRODUCT(_throw_div0_exception_count++;)
 675   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
 676 JRT_END
 677 
 678 
 679 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* current))
 680   NOT_PRODUCT(_throw_null_pointer_exception_count++;)
 681   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_NullPointerException());
 682 JRT_END
 683 
 684 
 685 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* current, oopDesc* object))
 686   NOT_PRODUCT(_throw_class_cast_exception_count++;)
 687   ResourceMark rm(current);
 688   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 689   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 690 JRT_END
 691 
 692 
 693 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 694   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
 695   ResourceMark rm(current);
 696   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 697 JRT_END
 698 
 699 
 700 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 701   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
 702   if (LockingMode == LM_MONITOR) {
 703     lock->set_obj(obj);
 704   }
 705   assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match");
 706   SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? NULL : lock->lock(), current);
 707 JRT_END
 708 
 709 
 710 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 711   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
 712   assert(current->last_Java_sp(), "last_Java_sp must be set");
 713   oop obj = lock->obj();
 714   assert(oopDesc::is_oop(obj), "must be NULL or an object");
 715   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 716 JRT_END
 717 
 718 // Cf. OptoRuntime::deoptimize_caller_frame
 719 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 720   // Called from within the owner thread, so no need for safepoint
 721   RegisterMap reg_map(current, false);
 722   frame stub_frame = current->last_frame();
 723   assert(stub_frame.is_runtime_frame(), "Sanity check");
 724   frame caller_frame = stub_frame.sender(&reg_map);
 725   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 726   assert(nm != NULL, "Sanity check");
 727   methodHandle method(current, nm->method());
 728   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 729   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 730   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 731 
 732   if (action == Deoptimization::Action_make_not_entrant) {
 733     if (nm->make_not_entrant()) {
 734       if (reason == Deoptimization::Reason_tenured) {
 735         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 736         if (trap_mdo != NULL) {
 737           trap_mdo->inc_tenure_traps();
 738         }
 739       }
 740     }
 741   }
 742 
 743   // Deoptimize the caller frame.
 744   Deoptimization::deoptimize_frame(current, caller_frame.id());
 745   // Return to the now deoptimized frame.
 746 JRT_END
 747 
 748 
 749 #ifndef DEOPTIMIZE_WHEN_PATCHING
 750 
 751 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 752   Bytecode_field field_access(caller, bci);
 753   // This can be static or non-static field access
 754   Bytecodes::Code code       = field_access.code();
 755 
 756   // We must load class, initialize class and resolve the field
 757   fieldDescriptor result; // initialize class if needed
 758   constantPoolHandle constants(THREAD, caller->constants());
 759   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
 760   return result.field_holder();
 761 }
 762 
 763 
 764 //
 765 // This routine patches sites where a class wasn't loaded or
 766 // initialized at the time the code was generated.  It handles
 767 // references to classes, fields and forcing of initialization.  Most
 768 // of the cases are straightforward and involving simply forcing
 769 // resolution of a class, rewriting the instruction stream with the
 770 // needed constant and replacing the call in this function with the
 771 // patched code.  The case for static field is more complicated since
 772 // the thread which is in the process of initializing a class can
 773 // access it's static fields but other threads can't so the code
 774 // either has to deoptimize when this case is detected or execute a
 775 // check that the current thread is the initializing thread.  The
 776 // current
 777 //
 778 // Patches basically look like this:
 779 //
 780 //
 781 // patch_site: jmp patch stub     ;; will be patched
 782 // continue:   ...
 783 //             ...
 784 //             ...
 785 //             ...
 786 //
 787 // They have a stub which looks like this:
 788 //
 789 //             ;; patch body
 790 //             movl <const>, reg           (for class constants)
 791 //        <or> movl [reg1 + <const>], reg  (for field offsets)
 792 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
 793 //             <being_init offset> <bytes to copy> <bytes to skip>
 794 // patch_stub: call Runtime1::patch_code (through a runtime stub)
 795 //             jmp patch_site
 796 //
 797 //
 798 // A normal patch is done by rewriting the patch body, usually a move,
 799 // and then copying it into place over top of the jmp instruction
 800 // being careful to flush caches and doing it in an MP-safe way.  The
 801 // constants following the patch body are used to find various pieces
 802 // of the patch relative to the call site for Runtime1::patch_code.
 803 // The case for getstatic and putstatic is more complicated because
 804 // getstatic and putstatic have special semantics when executing while
 805 // the class is being initialized.  getstatic/putstatic on a class
 806 // which is being_initialized may be executed by the initializing
 807 // thread but other threads have to block when they execute it.  This
 808 // is accomplished in compiled code by executing a test of the current
 809 // thread against the initializing thread of the class.  It's emitted
 810 // as boilerplate in their stub which allows the patched code to be
 811 // executed before it's copied back into the main body of the nmethod.
 812 //
 813 // being_init: get_thread(<tmp reg>
 814 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
 815 //             jne patch_stub
 816 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
 817 //             movl reg, [reg1 + <const>]  (for field offsets)
 818 //             jmp continue
 819 //             <being_init offset> <bytes to copy> <bytes to skip>
 820 // patch_stub: jmp Runtim1::patch_code (through a runtime stub)
 821 //             jmp patch_site
 822 //
 823 // If the class is being initialized the patch body is rewritten and
 824 // the patch site is rewritten to jump to being_init, instead of
 825 // patch_stub.  Whenever this code is executed it checks the current
 826 // thread against the intializing thread so other threads will enter
 827 // the runtime and end up blocked waiting the class to finish
 828 // initializing inside the calls to resolve_field below.  The
 829 // initializing class will continue on it's way.  Once the class is
 830 // fully_initialized, the intializing_thread of the class becomes
 831 // NULL, so the next thread to execute this code will fail the test,
 832 // call into patch_code and complete the patching process by copying
 833 // the patch body back into the main part of the nmethod and resume
 834 // executing.
 835 
 836 // NB:
 837 //
 838 // Patchable instruction sequences inherently exhibit race conditions,
 839 // where thread A is patching an instruction at the same time thread B
 840 // is executing it.  The algorithms we use ensure that any observation
 841 // that B can make on any intermediate states during A's patching will
 842 // always end up with a correct outcome.  This is easiest if there are
 843 // few or no intermediate states.  (Some inline caches have two
 844 // related instructions that must be patched in tandem.  For those,
 845 // intermediate states seem to be unavoidable, but we will get the
 846 // right answer from all possible observation orders.)
 847 //
 848 // When patching the entry instruction at the head of a method, or a
 849 // linkable call instruction inside of a method, we try very hard to
 850 // use a patch sequence which executes as a single memory transaction.
 851 // This means, in practice, that when thread A patches an instruction,
 852 // it should patch a 32-bit or 64-bit word that somehow overlaps the
 853 // instruction or is contained in it.  We believe that memory hardware
 854 // will never break up such a word write, if it is naturally aligned
 855 // for the word being written.  We also know that some CPUs work very
 856 // hard to create atomic updates even of naturally unaligned words,
 857 // but we don't want to bet the farm on this always working.
 858 //
 859 // Therefore, if there is any chance of a race condition, we try to
 860 // patch only naturally aligned words, as single, full-word writes.
 861 
 862 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id ))
 863   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 864 
 865   ResourceMark rm(current);
 866   RegisterMap reg_map(current, false);
 867   frame runtime_frame = current->last_frame();
 868   frame caller_frame = runtime_frame.sender(&reg_map);
 869 
 870   // last java frame on stack
 871   vframeStream vfst(current, true);
 872   assert(!vfst.at_end(), "Java frame must exist");
 873 
 874   methodHandle caller_method(current, vfst.method());
 875   // Note that caller_method->code() may not be same as caller_code because of OSR's
 876   // Note also that in the presence of inlining it is not guaranteed
 877   // that caller_method() == caller_code->method()
 878 
 879   int bci = vfst.bci();
 880   Bytecodes::Code code = caller_method()->java_code_at(bci);
 881 
 882   // this is used by assertions in the access_field_patching_id
 883   BasicType patch_field_type = T_ILLEGAL;
 884   bool deoptimize_for_volatile = false;
 885   bool deoptimize_for_atomic = false;
 886   int patch_field_offset = -1;
 887   Klass* init_klass = NULL; // klass needed by load_klass_patching code
 888   Klass* load_klass = NULL; // klass needed by load_klass_patching code
 889   Handle mirror(current, NULL);                    // oop needed by load_mirror_patching code
 890   Handle appendix(current, NULL);                  // oop needed by appendix_patching code
 891   bool load_klass_or_mirror_patch_id =
 892     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 893 
 894   if (stub_id == Runtime1::access_field_patching_id) {
 895 
 896     Bytecode_field field_access(caller_method, bci);
 897     fieldDescriptor result; // initialize class if needed
 898     Bytecodes::Code code = field_access.code();
 899     constantPoolHandle constants(current, caller_method->constants());
 900     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
 901     patch_field_offset = result.offset();
 902 
 903     // If we're patching a field which is volatile then at compile it
 904     // must not have been know to be volatile, so the generated code
 905     // isn't correct for a volatile reference.  The nmethod has to be
 906     // deoptimized so that the code can be regenerated correctly.
 907     // This check is only needed for access_field_patching since this
 908     // is the path for patching field offsets.  load_klass is only
 909     // used for patching references to oops which don't need special
 910     // handling in the volatile case.
 911 
 912     deoptimize_for_volatile = result.access_flags().is_volatile();
 913 
 914     // If we are patching a field which should be atomic, then
 915     // the generated code is not correct either, force deoptimizing.
 916     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 917     // break access atomicity only for them.
 918 
 919     // Strictly speaking, the deoptimization on 64-bit platforms
 920     // is unnecessary, and T_LONG stores on 32-bit platforms need
 921     // to be handled by special patching code when AlwaysAtomicAccesses
 922     // becomes product feature. At this point, we are still going
 923     // for the deoptimization for consistency against volatile
 924     // accesses.
 925 
 926     patch_field_type = result.field_type();
 927     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
 928 
 929   } else if (load_klass_or_mirror_patch_id) {
 930     Klass* k = NULL;
 931     switch (code) {
 932       case Bytecodes::_putstatic:
 933       case Bytecodes::_getstatic:
 934         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
 935           init_klass = klass;
 936           mirror = Handle(current, klass->java_mirror());
 937         }
 938         break;
 939       case Bytecodes::_new:
 940         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
 941           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
 942         }
 943         break;
 944       case Bytecodes::_multianewarray:
 945         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
 946           k = caller_method->constants()->klass_at(mna.index(), CHECK);
 947         }
 948         break;
 949       case Bytecodes::_instanceof:
 950         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
 951           k = caller_method->constants()->klass_at(io.index(), CHECK);
 952         }
 953         break;
 954       case Bytecodes::_checkcast:
 955         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
 956           k = caller_method->constants()->klass_at(cc.index(), CHECK);
 957         }
 958         break;
 959       case Bytecodes::_anewarray:
 960         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
 961           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
 962           k = ek->array_klass(CHECK);
 963         }
 964         break;
 965       case Bytecodes::_ldc:
 966       case Bytecodes::_ldc_w:
 967         {
 968           Bytecode_loadconstant cc(caller_method, bci);
 969           oop m = cc.resolve_constant(CHECK);
 970           mirror = Handle(current, m);
 971         }
 972         break;
 973       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
 974     }
 975     load_klass = k;
 976   } else if (stub_id == load_appendix_patching_id) {
 977     Bytecode_invoke bytecode(caller_method, bci);
 978     Bytecodes::Code bc = bytecode.invoke_code();
 979 
 980     CallInfo info;
 981     constantPoolHandle pool(current, caller_method->constants());
 982     int index = bytecode.index();
 983     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
 984     switch (bc) {
 985       case Bytecodes::_invokehandle: {
 986         int cache_index = ConstantPool::decode_cpcache_index(index, true);
 987         assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
 988         ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
 989         cpce->set_method_handle(pool, info);
 990         appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
 991         break;
 992       }
 993       case Bytecodes::_invokedynamic: {
 994         ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
 995         cpce->set_dynamic_call(pool, info);
 996         appendix = Handle(current, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
 997         break;
 998       }
 999       default: fatal("unexpected bytecode for load_appendix_patching_id");
1000     }
1001   } else {
1002     ShouldNotReachHere();
1003   }
1004 
1005   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1006     // At compile time we assumed the field wasn't volatile/atomic but after
1007     // loading it turns out it was volatile/atomic so we have to throw the
1008     // compiled code out and let it be regenerated.
1009     if (TracePatching) {
1010       if (deoptimize_for_volatile) {
1011         tty->print_cr("Deoptimizing for patching volatile field reference");
1012       }
1013       if (deoptimize_for_atomic) {
1014         tty->print_cr("Deoptimizing for patching atomic field reference");
1015       }
1016     }
1017 
1018     // It's possible the nmethod was invalidated in the last
1019     // safepoint, but if it's still alive then make it not_entrant.
1020     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1021     if (nm != NULL) {
1022       nm->make_not_entrant();
1023     }
1024 
1025     Deoptimization::deoptimize_frame(current, caller_frame.id());
1026 
1027     // Return to the now deoptimized frame.
1028   }
1029 
1030   // Now copy code back
1031 
1032   {
1033     MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag);
1034     //
1035     // Deoptimization may have happened while we waited for the lock.
1036     // In that case we don't bother to do any patching we just return
1037     // and let the deopt happen
1038     if (!caller_is_deopted(current)) {
1039       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1040       address instr_pc = jump->jump_destination();
1041       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1042       if (ni->is_jump() ) {
1043         // the jump has not been patched yet
1044         // The jump destination is slow case and therefore not part of the stubs
1045         // (stubs are only for StaticCalls)
1046 
1047         // format of buffer
1048         //    ....
1049         //    instr byte 0     <-- copy_buff
1050         //    instr byte 1
1051         //    ..
1052         //    instr byte n-1
1053         //      n
1054         //    ....             <-- call destination
1055 
1056         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
1057         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
1058         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
1059         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
1060         address copy_buff = stub_location - *byte_skip - *byte_count;
1061         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
1062         if (TracePatching) {
1063           ttyLocker ttyl;
1064           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
1065                         p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
1066           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
1067           assert(caller_code != NULL, "nmethod not found");
1068 
1069           // NOTE we use pc() not original_pc() because we already know they are
1070           // identical otherwise we'd have never entered this block of code
1071 
1072           const ImmutableOopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
1073           assert(map != NULL, "null check");
1074           map->print();
1075           tty->cr();
1076 
1077           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1078         }
1079         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1080         bool do_patch = true;
1081         if (stub_id == Runtime1::access_field_patching_id) {
1082           // The offset may not be correct if the class was not loaded at code generation time.
1083           // Set it now.
1084           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1085           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1086           assert(patch_field_offset >= 0, "illegal offset");
1087           n_move->add_offset_in_bytes(patch_field_offset);
1088         } else if (load_klass_or_mirror_patch_id) {
1089           // If a getstatic or putstatic is referencing a klass which
1090           // isn't fully initialized, the patch body isn't copied into
1091           // place until initialization is complete.  In this case the
1092           // patch site is setup so that any threads besides the
1093           // initializing thread are forced to come into the VM and
1094           // block.
1095           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1096                      InstanceKlass::cast(init_klass)->is_initialized();
1097           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1098           if (jump->jump_destination() == being_initialized_entry) {
1099             assert(do_patch == true, "initialization must be complete at this point");
1100           } else {
1101             // patch the instruction <move reg, klass>
1102             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1103 
1104             assert(n_copy->data() == 0 ||
1105                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
1106                    "illegal init value");
1107             if (stub_id == Runtime1::load_klass_patching_id) {
1108               assert(load_klass != NULL, "klass not set");
1109               n_copy->set_data((intx) (load_klass));
1110             } else {
1111               assert(mirror() != NULL, "klass not set");
1112               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1113               n_copy->set_data(cast_from_oop<intx>(mirror()));
1114             }
1115 
1116             if (TracePatching) {
1117               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1118             }
1119           }
1120         } else if (stub_id == Runtime1::load_appendix_patching_id) {
1121           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1122           assert(n_copy->data() == 0 ||
1123                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
1124                  "illegal init value");
1125           n_copy->set_data(cast_from_oop<intx>(appendix()));
1126 
1127           if (TracePatching) {
1128             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1129           }
1130         } else {
1131           ShouldNotReachHere();
1132         }
1133 
1134 #if defined(PPC32)
1135         if (load_klass_or_mirror_patch_id ||
1136             stub_id == Runtime1::load_appendix_patching_id) {
1137           // Update the location in the nmethod with the proper
1138           // metadata.  When the code was generated, a NULL was stuffed
1139           // in the metadata table and that table needs to be update to
1140           // have the right value.  On intel the value is kept
1141           // directly in the instruction instead of in the metadata
1142           // table, so set_data above effectively updated the value.
1143           nmethod* nm = CodeCache::find_nmethod(instr_pc);
1144           assert(nm != NULL, "invalid nmethod_pc");
1145           RelocIterator mds(nm, copy_buff, copy_buff + 1);
1146           bool found = false;
1147           while (mds.next() && !found) {
1148             if (mds.type() == relocInfo::oop_type) {
1149               assert(stub_id == Runtime1::load_mirror_patching_id ||
1150                      stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1151               oop_Relocation* r = mds.oop_reloc();
1152               oop* oop_adr = r->oop_addr();
1153               *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1154               r->fix_oop_relocation();
1155               found = true;
1156             } else if (mds.type() == relocInfo::metadata_type) {
1157               assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1158               metadata_Relocation* r = mds.metadata_reloc();
1159               Metadata** metadata_adr = r->metadata_addr();
1160               *metadata_adr = load_klass;
1161               r->fix_metadata_relocation();
1162               found = true;
1163             }
1164           }
1165           assert(found, "the metadata must exist!");
1166         }
1167 #endif
1168         if (do_patch) {
1169           // replace instructions
1170           // first replace the tail, then the call
1171 #ifdef ARM
1172           if((load_klass_or_mirror_patch_id ||
1173               stub_id == Runtime1::load_appendix_patching_id) &&
1174               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1175             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1176             address addr = NULL;
1177             assert(nm != NULL, "invalid nmethod_pc");
1178             RelocIterator mds(nm, copy_buff, copy_buff + 1);
1179             while (mds.next()) {
1180               if (mds.type() == relocInfo::oop_type) {
1181                 assert(stub_id == Runtime1::load_mirror_patching_id ||
1182                        stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1183                 oop_Relocation* r = mds.oop_reloc();
1184                 addr = (address)r->oop_addr();
1185                 break;
1186               } else if (mds.type() == relocInfo::metadata_type) {
1187                 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1188                 metadata_Relocation* r = mds.metadata_reloc();
1189                 addr = (address)r->metadata_addr();
1190                 break;
1191               }
1192             }
1193             assert(addr != NULL, "metadata relocation must exist");
1194             copy_buff -= *byte_count;
1195             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
1196             n_copy2->set_pc_relative_offset(addr, instr_pc);
1197           }
1198 #endif
1199 
1200           for (int i = NativeGeneralJump::instruction_size; i < *byte_count; i++) {
1201             address ptr = copy_buff + i;
1202             int a_byte = (*ptr) & 0xFF;
1203             address dst = instr_pc + i;
1204             *(unsigned char*)dst = (unsigned char) a_byte;
1205           }
1206           ICache::invalidate_range(instr_pc, *byte_count);
1207           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
1208 
1209           if (load_klass_or_mirror_patch_id ||
1210               stub_id == Runtime1::load_appendix_patching_id) {
1211             relocInfo::relocType rtype =
1212               (stub_id == Runtime1::load_klass_patching_id) ?
1213                                    relocInfo::metadata_type :
1214                                    relocInfo::oop_type;
1215             // update relocInfo to metadata
1216             nmethod* nm = CodeCache::find_nmethod(instr_pc);
1217             assert(nm != NULL, "invalid nmethod_pc");
1218 
1219             // The old patch site is now a move instruction so update
1220             // the reloc info so that it will get updated during
1221             // future GCs.
1222             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
1223             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
1224                                                      relocInfo::none, rtype);
1225 #ifdef PPC32
1226           { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1227             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1228             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1229                                                      relocInfo::none, rtype);
1230           }
1231 #endif
1232           }
1233 
1234         } else {
1235           ICache::invalidate_range(copy_buff, *byte_count);
1236           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1237         }
1238       }
1239     }
1240   }
1241 
1242   // If we are patching in a non-perm oop, make sure the nmethod
1243   // is on the right list.
1244   {
1245     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1246     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1247     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1248 
1249     // Since we've patched some oops in the nmethod,
1250     // (re)register it with the heap.
1251     Universe::heap()->register_nmethod(nm);
1252   }
1253 JRT_END
1254 
1255 #else // DEOPTIMIZE_WHEN_PATCHING
1256 
1257 static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) {
1258   if (stub_id == Runtime1::load_klass_patching_id ||
1259       stub_id == Runtime1::load_mirror_patching_id) {
1260     // last java frame on stack
1261     vframeStream vfst(current, true);
1262     assert(!vfst.at_end(), "Java frame must exist");
1263 
1264     methodHandle caller_method(current, vfst.method());
1265     int bci = vfst.bci();
1266     Bytecodes::Code code = caller_method()->java_code_at(bci);
1267 
1268     switch (code) {
1269       case Bytecodes::_new:
1270       case Bytecodes::_anewarray:
1271       case Bytecodes::_multianewarray:
1272       case Bytecodes::_instanceof:
1273       case Bytecodes::_checkcast: {
1274         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1275         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1276         if (tag.is_unresolved_klass_in_error()) {
1277           return false; // throws resolution error
1278         }
1279         break;
1280       }
1281 
1282       default: break;
1283     }
1284   }
1285   return true;
1286 }
1287 
1288 void Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id) {
1289   NOT_PRODUCT(_patch_code_slowcase_cnt++);
1290 
1291   // Enable WXWrite: the function is called by c1 stub as a runtime function
1292   // (see another implementation above).
1293   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1294 
1295   if (TracePatching) {
1296     tty->print_cr("Deoptimizing because patch is needed");
1297   }
1298 
1299   RegisterMap reg_map(current, false);
1300 
1301   frame runtime_frame = current->last_frame();
1302   frame caller_frame = runtime_frame.sender(&reg_map);
1303   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1304 
1305   if (is_patching_needed(current, stub_id)) {
1306     // Make sure the nmethod is invalidated, i.e. made not entrant.
1307     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1308     if (nm != NULL) {
1309       nm->make_not_entrant();
1310     }
1311   }
1312 
1313   Deoptimization::deoptimize_frame(current, caller_frame.id());
1314   // Return to the now deoptimized frame.
1315   postcond(caller_is_deopted(current));
1316 }
1317 
1318 #endif // DEOPTIMIZE_WHEN_PATCHING
1319 
1320 // Entry point for compiled code. We want to patch a nmethod.
1321 // We don't do a normal VM transition here because we want to
1322 // know after the patching is complete and any safepoint(s) are taken
1323 // if the calling nmethod was deoptimized. We do this by calling a
1324 // helper method which does the normal VM transition and when it
1325 // completes we can check for deoptimization. This simplifies the
1326 // assembly code in the cpu directories.
1327 //
1328 int Runtime1::move_klass_patching(JavaThread* current) {
1329 //
1330 // NOTE: we are still in Java
1331 //
1332   debug_only(NoHandleMark nhm;)
1333   {
1334     // Enter VM mode
1335     ResetNoHandleMark rnhm;
1336     patch_code(current, load_klass_patching_id);
1337   }
1338   // Back in JAVA, use no oops DON'T safepoint
1339 
1340   // Return true if calling code is deoptimized
1341 
1342   return caller_is_deopted(current);
1343 }
1344 
1345 int Runtime1::move_mirror_patching(JavaThread* current) {
1346 //
1347 // NOTE: we are still in Java
1348 //
1349   debug_only(NoHandleMark nhm;)
1350   {
1351     // Enter VM mode
1352     ResetNoHandleMark rnhm;
1353     patch_code(current, load_mirror_patching_id);
1354   }
1355   // Back in JAVA, use no oops DON'T safepoint
1356 
1357   // Return true if calling code is deoptimized
1358 
1359   return caller_is_deopted(current);
1360 }
1361 
1362 int Runtime1::move_appendix_patching(JavaThread* current) {
1363 //
1364 // NOTE: we are still in Java
1365 //
1366   debug_only(NoHandleMark nhm;)
1367   {
1368     // Enter VM mode
1369     ResetNoHandleMark rnhm;
1370     patch_code(current, load_appendix_patching_id);
1371   }
1372   // Back in JAVA, use no oops DON'T safepoint
1373 
1374   // Return true if calling code is deoptimized
1375 
1376   return caller_is_deopted(current);
1377 }
1378 
1379 // Entry point for compiled code. We want to patch a nmethod.
1380 // We don't do a normal VM transition here because we want to
1381 // know after the patching is complete and any safepoint(s) are taken
1382 // if the calling nmethod was deoptimized. We do this by calling a
1383 // helper method which does the normal VM transition and when it
1384 // completes we can check for deoptimization. This simplifies the
1385 // assembly code in the cpu directories.
1386 //
1387 int Runtime1::access_field_patching(JavaThread* current) {
1388   //
1389   // NOTE: we are still in Java
1390   //
1391   // Handles created in this function will be deleted by the
1392   // HandleMarkCleaner in the transition to the VM.
1393   NoHandleMark nhm;
1394   {
1395     // Enter VM mode
1396     ResetNoHandleMark rnhm;
1397     patch_code(current, access_field_patching_id);
1398   }
1399   // Back in JAVA, use no oops DON'T safepoint
1400 
1401   // Return true if calling code is deoptimized
1402 
1403   return caller_is_deopted(current);
1404 }
1405 
1406 
1407 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1408   // for now we just print out the block id
1409   tty->print("%d ", block_id);
1410 JRT_END
1411 
1412 
1413 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1414   // had to return int instead of bool, otherwise there may be a mismatch
1415   // between the C calling convention and the Java one.
1416   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1417   // JVM takes the whole %eax as the return value, which may misinterpret
1418   // the return value as a boolean true.
1419 
1420   assert(mirror != NULL, "should null-check on mirror before calling");
1421   Klass* k = java_lang_Class::as_Klass(mirror);
1422   return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
1423 JRT_END
1424 
1425 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1426   ResourceMark rm;
1427 
1428   RegisterMap reg_map(current, false);
1429   frame runtime_frame = current->last_frame();
1430   frame caller_frame = runtime_frame.sender(&reg_map);
1431 
1432   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1433   assert (nm != NULL, "no more nmethod?");
1434   nm->make_not_entrant();
1435 
1436   methodHandle m(current, nm->method());
1437   MethodData* mdo = m->method_data();
1438 
1439   if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
1440     // Build an MDO.  Ignore errors like OutOfMemory;
1441     // that simply means we won't have an MDO to update.
1442     Method::build_interpreter_method_data(m, THREAD);
1443     if (HAS_PENDING_EXCEPTION) {
1444       // Only metaspace OOM is expected. No Java code executed.
1445       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1446       CLEAR_PENDING_EXCEPTION;
1447     }
1448     mdo = m->method_data();
1449   }
1450 
1451   if (mdo != NULL) {
1452     mdo->inc_trap_count(Deoptimization::Reason_none);
1453   }
1454 
1455   if (TracePredicateFailedTraps) {
1456     stringStream ss1, ss2;
1457     vframeStream vfst(current);
1458     Method* inlinee = vfst.method();
1459     inlinee->print_short_name(&ss1);
1460     m->print_short_name(&ss2);
1461     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
1462   }
1463 
1464 
1465   Deoptimization::deoptimize_frame(current, caller_frame.id());
1466 
1467 JRT_END
1468 
1469 #ifndef PRODUCT
1470 void Runtime1::print_statistics() {
1471   tty->print_cr("C1 Runtime statistics:");
1472   tty->print_cr(" _resolve_invoke_virtual_cnt:     %d", SharedRuntime::_resolve_virtual_ctr);
1473   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
1474   tty->print_cr(" _resolve_invoke_static_cnt:      %d", SharedRuntime::_resolve_static_ctr);
1475   tty->print_cr(" _handle_wrong_method_cnt:        %d", SharedRuntime::_wrong_method_ctr);
1476   tty->print_cr(" _ic_miss_cnt:                    %d", SharedRuntime::_ic_miss_ctr);
1477   tty->print_cr(" _generic_arraycopystub_cnt:      %d", _generic_arraycopystub_cnt);
1478   tty->print_cr(" _byte_arraycopy_cnt:             %d", _byte_arraycopy_stub_cnt);
1479   tty->print_cr(" _short_arraycopy_cnt:            %d", _short_arraycopy_stub_cnt);
1480   tty->print_cr(" _int_arraycopy_cnt:              %d", _int_arraycopy_stub_cnt);
1481   tty->print_cr(" _long_arraycopy_cnt:             %d", _long_arraycopy_stub_cnt);
1482   tty->print_cr(" _oop_arraycopy_cnt:              %d", _oop_arraycopy_stub_cnt);
1483   tty->print_cr(" _arraycopy_slowcase_cnt:         %d", _arraycopy_slowcase_cnt);
1484   tty->print_cr(" _arraycopy_checkcast_cnt:        %d", _arraycopy_checkcast_cnt);
1485   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
1486 
1487   tty->print_cr(" _new_type_array_slowcase_cnt:    %d", _new_type_array_slowcase_cnt);
1488   tty->print_cr(" _new_object_array_slowcase_cnt:  %d", _new_object_array_slowcase_cnt);
1489   tty->print_cr(" _new_instance_slowcase_cnt:      %d", _new_instance_slowcase_cnt);
1490   tty->print_cr(" _new_multi_array_slowcase_cnt:   %d", _new_multi_array_slowcase_cnt);
1491   tty->print_cr(" _monitorenter_slowcase_cnt:      %d", _monitorenter_slowcase_cnt);
1492   tty->print_cr(" _monitorexit_slowcase_cnt:       %d", _monitorexit_slowcase_cnt);
1493   tty->print_cr(" _patch_code_slowcase_cnt:        %d", _patch_code_slowcase_cnt);
1494 
1495   tty->print_cr(" _throw_range_check_exception_count:            %d:", _throw_range_check_exception_count);
1496   tty->print_cr(" _throw_index_exception_count:                  %d:", _throw_index_exception_count);
1497   tty->print_cr(" _throw_div0_exception_count:                   %d:", _throw_div0_exception_count);
1498   tty->print_cr(" _throw_null_pointer_exception_count:           %d:", _throw_null_pointer_exception_count);
1499   tty->print_cr(" _throw_class_cast_exception_count:             %d:", _throw_class_cast_exception_count);
1500   tty->print_cr(" _throw_incompatible_class_change_error_count:  %d:", _throw_incompatible_class_change_error_count);
1501   tty->print_cr(" _throw_count:                                  %d:", _throw_count);
1502 
1503   SharedRuntime::print_ic_miss_histogram();
1504   tty->cr();
1505 }
1506 #endif // PRODUCT