< prev index next >

src/hotspot/share/c1/c1_Runtime1.cpp

Print this page

  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/codeBuffer.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "classfile/javaClasses.inline.hpp"
  33 #include "classfile/vmClasses.hpp"
  34 #include "classfile/vmSymbols.hpp"
  35 #include "code/codeBlob.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/scopeDesc.hpp"
  38 #include "code/vtableStubs.hpp"
  39 #include "compiler/compilationPolicy.hpp"

  40 #include "compiler/disassembler.hpp"
  41 #include "compiler/oopMap.hpp"
  42 #include "gc/shared/barrierSet.hpp"
  43 #include "gc/shared/c1/barrierSetC1.hpp"
  44 #include "gc/shared/collectedHeap.hpp"
  45 #include "interpreter/bytecode.hpp"
  46 #include "interpreter/interpreter.hpp"
  47 #include "jfr/support/jfrIntrinsics.hpp"
  48 #include "logging/log.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/access.inline.hpp"
  53 #include "oops/objArrayOop.inline.hpp"
  54 #include "oops/objArrayKlass.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "runtime/atomic.hpp"
  58 #include "runtime/fieldDescriptor.inline.hpp"
  59 #include "runtime/frame.inline.hpp"
  60 #include "runtime/handles.inline.hpp"
  61 #include "runtime/interfaceSupport.inline.hpp"
  62 #include "runtime/javaCalls.hpp"


  63 #include "runtime/sharedRuntime.hpp"
  64 #include "runtime/stackWatermarkSet.hpp"
  65 #include "runtime/stubRoutines.hpp"
  66 #include "runtime/vframe.inline.hpp"
  67 #include "runtime/vframeArray.hpp"
  68 #include "runtime/vm_version.hpp"

  69 #include "utilities/copy.hpp"
  70 #include "utilities/events.hpp"
  71 
  72 
  73 // Implementation of StubAssembler
  74 
  75 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  76   _name = name;
  77   _must_gc_arguments = false;
  78   _frame_size = no_frame_size;
  79   _num_rt_args = 0;
  80   _stub_id = stub_id;
  81 }
  82 
  83 
  84 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  85   _name = name;
  86   _must_gc_arguments = must_gc_arguments;
  87 }
  88 

 243   switch (id) {
 244     // These stubs don't need to have an oopmap
 245   case C1StubId::dtrace_object_alloc_id:
 246   case C1StubId::slow_subtype_check_id:
 247   case C1StubId::fpu2long_stub_id:
 248   case C1StubId::unwind_exception_id:
 249   case C1StubId::counter_overflow_id:
 250     expect_oop_map = false;
 251     break;
 252   default:
 253     break;
 254   }
 255 #endif
 256   C1StubIdStubAssemblerCodeGenClosure cl(id);
 257   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 258   // install blob
 259   _blobs[(int)id] = blob;
 260 }
 261 
 262 void Runtime1::initialize(BufferBlob* blob) {

 263   // platform-dependent initialization
 264   initialize_pd();
 265   // generate stubs
 266   int limit = (int)C1StubId::NUM_STUBIDS;
 267   for (int id = 0; id < limit; id++) generate_blob_for(blob, (C1StubId)id);
 268   // printing
 269 #ifndef PRODUCT
 270   if (PrintSimpleStubs) {
 271     ResourceMark rm;
 272     for (int id = 0; id < limit; id++) {
 273       _blobs[id]->print();
 274       if (_blobs[id]->oop_maps() != nullptr) {
 275         _blobs[id]->oop_maps()->print();
 276       }
 277     }
 278   }
 279 #endif
 280   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 281   bs->generate_c1_runtime_stubs(blob);
 282 }

 329   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 330   FUNCTION_CASE(entry, is_instance_of);
 331   FUNCTION_CASE(entry, trace_block_entry);
 332 #ifdef JFR_HAVE_INTRINSICS
 333   FUNCTION_CASE(entry, JfrTime::time_function());
 334 #endif
 335   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 336   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 337   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 338   FUNCTION_CASE(entry, StubRoutines::dexp());
 339   FUNCTION_CASE(entry, StubRoutines::dlog());
 340   FUNCTION_CASE(entry, StubRoutines::dlog10());
 341   FUNCTION_CASE(entry, StubRoutines::dpow());
 342   FUNCTION_CASE(entry, StubRoutines::dsin());
 343   FUNCTION_CASE(entry, StubRoutines::dcos());
 344   FUNCTION_CASE(entry, StubRoutines::dtan());
 345   FUNCTION_CASE(entry, StubRoutines::dtanh());
 346 
 347 #undef FUNCTION_CASE
 348 






 349   // Soft float adds more runtime names.
 350   return pd_name_for_address(entry);
 351 }
 352 
 353 
 354 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
 355 #ifndef PRODUCT
 356   if (PrintC1Statistics) {
 357     _new_instance_slowcase_cnt++;
 358   }
 359 #endif
 360   assert(klass->is_klass(), "not a class");
 361   Handle holder(current, klass->klass_holder()); // keep the klass alive
 362   InstanceKlass* h = InstanceKlass::cast(klass);
 363   h->check_valid_for_instantiation(true, CHECK);
 364   // make sure klass is initialized
 365   h->initialize(CHECK);
 366   // allocate instance and return via TLS
 367   oop obj = h->allocate_instance(CHECK);
 368   current->set_vm_result(obj);
 369 JRT_END
 370 
 371 
 372 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 373 #ifndef PRODUCT
 374   if (PrintC1Statistics) {
 375     _new_type_array_slowcase_cnt++;
 376   }
 377 #endif
 378   // Note: no handle for klass needed since they are not used
 379   //       anymore after new_typeArray() and no GC can happen before.
 380   //       (This may have to change if this code changes!)
 381   assert(klass->is_klass(), "not a class");
 382   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 383   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 384   current->set_vm_result(obj);
 385   // This is pretty rare but this runtime patch is stressful to deoptimization
 386   // if we deoptimize here so force a deopt to stress the path.
 387   if (DeoptimizeALot) {
 388     deopt_caller(current);
 389   }
 390 
 391 JRT_END
 392 
 393 
 394 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 395 #ifndef PRODUCT
 396   if (PrintC1Statistics) {
 397     _new_object_array_slowcase_cnt++;
 398   }
 399 #endif
 400   // Note: no handle for klass needed since they are not used
 401   //       anymore after new_objArray() and no GC can happen before.
 402   //       (This may have to change if this code changes!)
 403   assert(array_klass->is_klass(), "not a class");
 404   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 405   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 406   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 407   current->set_vm_result(obj);
 408   // This is pretty rare but this runtime patch is stressful to deoptimization
 409   // if we deoptimize here so force a deopt to stress the path.
 410   if (DeoptimizeALot) {
 411     deopt_caller(current);
 412   }
 413 JRT_END
 414 
 415 
 416 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 417 #ifndef PRODUCT
 418   if (PrintC1Statistics) {
 419     _new_multi_array_slowcase_cnt++;
 420   }
 421 #endif
 422   assert(klass->is_klass(), "not a class");
 423   assert(rank >= 1, "rank must be nonzero");
 424   Handle holder(current, klass->klass_holder()); // keep the klass alive
 425   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 426   current->set_vm_result(obj);
 427 JRT_END
 428 
 429 
 430 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id))
 431   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 432 JRT_END
 433 
 434 
 435 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 436   ResourceMark rm(current);

 467       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 468       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 469       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 470       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 471       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 472       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 473       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 474         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 475         break;
 476       case Bytecodes::_goto_w:
 477         offset = Bytes::get_Java_u4(pc + 1);
 478         break;
 479       default: ;
 480     }
 481     bci = branch_bci + offset;
 482   }
 483   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 484   return osr_nm;
 485 }
 486 
 487 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 488   nmethod* osr_nm;
 489   JRT_BLOCK
 490     osr_nm = counter_overflow_helper(current, bci, method);
 491     if (osr_nm != nullptr) {
 492       RegisterMap map(current,
 493                       RegisterMap::UpdateMap::skip,
 494                       RegisterMap::ProcessFrames::include,
 495                       RegisterMap::WalkContinuation::skip);
 496       frame fr =  current->last_frame().sender(&map);
 497       Deoptimization::deoptimize_frame(current, fr.id());
 498     }
 499   JRT_BLOCK_END
 500   return nullptr;
 501 JRT_END
 502 
 503 extern void vm_exit(int code);
 504 
 505 // Enter this method from compiled code handler below. This is where we transition
 506 // to VM mode. This is done as a helper routine so that the method called directly
 507 // from compiled code does not have to transition to VM. This allows the entry
 508 // method to see if the nmethod that we have just looked up a handler for has
 509 // been deoptimized while we were in the vm. This simplifies the assembly code
 510 // cpu directories.
 511 //
 512 // We are entering here from exception stub (via the entry method below)
 513 // If there is a compiled exception handler in this method, we will continue there;
 514 // otherwise we will unwind the stack and continue at the caller of top frame method
 515 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 516 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 517 // check to see if the handler we are going to return is now in a nmethod that has
 518 // been deoptimized. If that is the case we return the deopt blob
 519 // unpack_with_exception entry instead. This makes life for the exception blob easier
 520 // because making that same check and diverting is painful from assembly language.
 521 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 522   // Reset method handle flag.
 523   current->set_is_method_handle_return(false);
 524 
 525   Handle exception(current, ex);
 526 
 527   // This function is called when we are about to throw an exception. Therefore,
 528   // we have to poll the stack watermark barrier to make sure that not yet safe
 529   // stack frames are made safe before returning into them.
 530   if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) {
 531     // The C1StubId::handle_exception_from_callee_id handler is invoked after the
 532     // frame has been unwound. It instead builds its own stub frame, to call the
 533     // runtime. But the throwing frame has already been unwound here.
 534     StackWatermarkSet::after_unwind(current);
 535   }
 536 
 537   nm = CodeCache::find_nmethod(pc);
 538   assert(nm != nullptr, "this is not an nmethod");
 539   // Adjust the pc as needed/
 540   if (nm->is_deopt_pc(pc)) {
 541     RegisterMap map(current,

 733     _throw_class_cast_exception_count++;
 734   }
 735 #endif
 736   ResourceMark rm(current);
 737   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 738   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 739 JRT_END
 740 
 741 
 742 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 743 #ifndef PRODUCT
 744   if (PrintC1Statistics) {
 745     _throw_incompatible_class_change_error_count++;
 746   }
 747 #endif
 748   ResourceMark rm(current);
 749   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 750 JRT_END
 751 
 752 
 753 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 754 #ifndef PRODUCT
 755   if (PrintC1Statistics) {
 756     _monitorenter_slowcase_cnt++;
 757   }
 758 #endif
 759   if (LockingMode == LM_MONITOR) {
 760     lock->set_obj(obj);
 761   }
 762   assert(obj == lock->obj(), "must match");
 763   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 764 JRT_END
 765 
 766 
 767 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 768   assert(current == JavaThread::current(), "pre-condition");
 769 #ifndef PRODUCT
 770   if (PrintC1Statistics) {
 771     _monitorexit_slowcase_cnt++;
 772   }
 773 #endif
 774   assert(current->last_Java_sp(), "last_Java_sp must be set");
 775   oop obj = lock->obj();
 776   assert(oopDesc::is_oop(obj), "must be null or an object");
 777   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 778 JRT_END
 779 
 780 // Cf. OptoRuntime::deoptimize_caller_frame
 781 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 782   // Called from within the owner thread, so no need for safepoint
 783   RegisterMap reg_map(current,
 784                       RegisterMap::UpdateMap::skip,
 785                       RegisterMap::ProcessFrames::include,
 786                       RegisterMap::WalkContinuation::skip);
 787   frame stub_frame = current->last_frame();
 788   assert(stub_frame.is_runtime_frame(), "Sanity check");
 789   frame caller_frame = stub_frame.sender(&reg_map);
 790   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 791   assert(nm != nullptr, "Sanity check");
 792   methodHandle method(current, nm->method());
 793   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 794   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 795   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 796 
 797   if (action == Deoptimization::Action_make_not_entrant) {
 798     if (nm->make_not_entrant()) {
 799       if (reason == Deoptimization::Reason_tenured) {
 800         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 801         if (trap_mdo != nullptr) {

 804       }
 805     }
 806   }
 807 
 808   // Deoptimize the caller frame.
 809   Deoptimization::deoptimize_frame(current, caller_frame.id());
 810   // Return to the now deoptimized frame.
 811 JRT_END
 812 
 813 
 814 #ifndef DEOPTIMIZE_WHEN_PATCHING
 815 
 816 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 817   Bytecode_field field_access(caller, bci);
 818   // This can be static or non-static field access
 819   Bytecodes::Code code       = field_access.code();
 820 
 821   // We must load class, initialize class and resolve the field
 822   fieldDescriptor result; // initialize class if needed
 823   constantPoolHandle constants(THREAD, caller->constants());
 824   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);

 825   return result.field_holder();
 826 }
 827 
 828 
 829 //
 830 // This routine patches sites where a class wasn't loaded or
 831 // initialized at the time the code was generated.  It handles
 832 // references to classes, fields and forcing of initialization.  Most
 833 // of the cases are straightforward and involving simply forcing
 834 // resolution of a class, rewriting the instruction stream with the
 835 // needed constant and replacing the call in this function with the
 836 // patched code.  The case for static field is more complicated since
 837 // the thread which is in the process of initializing a class can
 838 // access it's static fields but other threads can't so the code
 839 // either has to deoptimize when this case is detected or execute a
 840 // check that the current thread is the initializing thread.  The
 841 // current
 842 //
 843 // Patches basically look like this:
 844 //

 907 // always end up with a correct outcome.  This is easiest if there are
 908 // few or no intermediate states.  (Some inline caches have two
 909 // related instructions that must be patched in tandem.  For those,
 910 // intermediate states seem to be unavoidable, but we will get the
 911 // right answer from all possible observation orders.)
 912 //
 913 // When patching the entry instruction at the head of a method, or a
 914 // linkable call instruction inside of a method, we try very hard to
 915 // use a patch sequence which executes as a single memory transaction.
 916 // This means, in practice, that when thread A patches an instruction,
 917 // it should patch a 32-bit or 64-bit word that somehow overlaps the
 918 // instruction or is contained in it.  We believe that memory hardware
 919 // will never break up such a word write, if it is naturally aligned
 920 // for the word being written.  We also know that some CPUs work very
 921 // hard to create atomic updates even of naturally unaligned words,
 922 // but we don't want to bet the farm on this always working.
 923 //
 924 // Therefore, if there is any chance of a race condition, we try to
 925 // patch only naturally aligned words, as single, full-word writes.
 926 
 927 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
 928 #ifndef PRODUCT
 929   if (PrintC1Statistics) {
 930     _patch_code_slowcase_cnt++;
 931   }
 932 #endif
 933 
 934   ResourceMark rm(current);
 935   RegisterMap reg_map(current,
 936                       RegisterMap::UpdateMap::skip,
 937                       RegisterMap::ProcessFrames::include,
 938                       RegisterMap::WalkContinuation::skip);
 939   frame runtime_frame = current->last_frame();
 940   frame caller_frame = runtime_frame.sender(&reg_map);
 941 
 942   // last java frame on stack
 943   vframeStream vfst(current, true);
 944   assert(!vfst.at_end(), "Java frame must exist");
 945 
 946   methodHandle caller_method(current, vfst.method());
 947   // Note that caller_method->code() may not be same as caller_code because of OSR's

 952   Bytecodes::Code code = caller_method()->java_code_at(bci);
 953 
 954   // this is used by assertions in the access_field_patching_id
 955   BasicType patch_field_type = T_ILLEGAL;
 956   bool deoptimize_for_volatile = false;
 957   bool deoptimize_for_atomic = false;
 958   int patch_field_offset = -1;
 959   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
 960   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
 961   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
 962   Handle appendix(current, nullptr); // oop needed by appendix_patching code
 963   bool load_klass_or_mirror_patch_id =
 964     (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id);
 965 
 966   if (stub_id == C1StubId::access_field_patching_id) {
 967 
 968     Bytecode_field field_access(caller_method, bci);
 969     fieldDescriptor result; // initialize class if needed
 970     Bytecodes::Code code = field_access.code();
 971     constantPoolHandle constants(current, caller_method->constants());
 972     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);

 973     patch_field_offset = result.offset();
 974 
 975     // If we're patching a field which is volatile then at compile it
 976     // must not have been know to be volatile, so the generated code
 977     // isn't correct for a volatile reference.  The nmethod has to be
 978     // deoptimized so that the code can be regenerated correctly.
 979     // This check is only needed for access_field_patching since this
 980     // is the path for patching field offsets.  load_klass is only
 981     // used for patching references to oops which don't need special
 982     // handling in the volatile case.
 983 
 984     deoptimize_for_volatile = result.access_flags().is_volatile();
 985 
 986     // If we are patching a field which should be atomic, then
 987     // the generated code is not correct either, force deoptimizing.
 988     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 989     // break access atomicity only for them.
 990 
 991     // Strictly speaking, the deoptimization on 64-bit platforms
 992     // is unnecessary, and T_LONG stores on 32-bit platforms need

1290     switch (code) {
1291       case Bytecodes::_new:
1292       case Bytecodes::_anewarray:
1293       case Bytecodes::_multianewarray:
1294       case Bytecodes::_instanceof:
1295       case Bytecodes::_checkcast: {
1296         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1297         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1298         if (tag.is_unresolved_klass_in_error()) {
1299           return false; // throws resolution error
1300         }
1301         break;
1302       }
1303 
1304       default: break;
1305     }
1306   }
1307   return true;
1308 }
1309 
1310 void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
1311 #ifndef PRODUCT
1312   if (PrintC1Statistics) {
1313     _patch_code_slowcase_cnt++;
1314   }
1315 #endif
1316 
1317   // Enable WXWrite: the function is called by c1 stub as a runtime function
1318   // (see another implementation above).
1319   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1320 
1321   if (TracePatching) {
1322     tty->print_cr("Deoptimizing because patch is needed");
1323   }
1324 
1325   RegisterMap reg_map(current,
1326                       RegisterMap::UpdateMap::skip,
1327                       RegisterMap::ProcessFrames::include,
1328                       RegisterMap::WalkContinuation::skip);
1329 
1330   frame runtime_frame = current->last_frame();
1331   frame caller_frame = runtime_frame.sender(&reg_map);
1332   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1333 
1334   if (is_patching_needed(current, stub_id)) {
1335     // Make sure the nmethod is invalidated, i.e. made not entrant.
1336     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1337     if (nm != nullptr) {
1338       nm->make_not_entrant();
1339     }
1340   }
1341 
1342   Deoptimization::deoptimize_frame(current, caller_frame.id());
1343   // Return to the now deoptimized frame.
1344   postcond(caller_is_deopted(current));
1345 }
1346 
1347 #endif // DEOPTIMIZE_WHEN_PATCHING
1348 
1349 // Entry point for compiled code. We want to patch a nmethod.
1350 // We don't do a normal VM transition here because we want to
1351 // know after the patching is complete and any safepoint(s) are taken
1352 // if the calling nmethod was deoptimized. We do this by calling a
1353 // helper method which does the normal VM transition and when it
1354 // completes we can check for deoptimization. This simplifies the
1355 // assembly code in the cpu directories.
1356 //
1357 int Runtime1::move_klass_patching(JavaThread* current) {
1358 //
1359 // NOTE: we are still in Java
1360 //
1361   debug_only(NoHandleMark nhm;)
1362   {
1363     // Enter VM mode
1364     ResetNoHandleMark rnhm;
1365     patch_code(current, C1StubId::load_klass_patching_id);

1416 int Runtime1::access_field_patching(JavaThread* current) {
1417   //
1418   // NOTE: we are still in Java
1419   //
1420   // Handles created in this function will be deleted by the
1421   // HandleMarkCleaner in the transition to the VM.
1422   NoHandleMark nhm;
1423   {
1424     // Enter VM mode
1425     ResetNoHandleMark rnhm;
1426     patch_code(current, C1StubId::access_field_patching_id);
1427   }
1428   // Back in JAVA, use no oops DON'T safepoint
1429 
1430   // Return true if calling code is deoptimized
1431 
1432   return caller_is_deopted(current);
1433 }
1434 
1435 
1436 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1437   // for now we just print out the block id
1438   tty->print("%d ", block_id);
1439 JRT_END
1440 
1441 
1442 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1443   // had to return int instead of bool, otherwise there may be a mismatch
1444   // between the C calling convention and the Java one.
1445   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1446   // JVM takes the whole %eax as the return value, which may misinterpret
1447   // the return value as a boolean true.
1448 
1449   assert(mirror != nullptr, "should null-check on mirror before calling");
1450   Klass* k = java_lang_Class::as_Klass(mirror);
1451   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1452 JRT_END
1453 
1454 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1455   ResourceMark rm;
1456 
1457   RegisterMap reg_map(current,
1458                       RegisterMap::UpdateMap::skip,
1459                       RegisterMap::ProcessFrames::include,
1460                       RegisterMap::WalkContinuation::skip);
1461   frame runtime_frame = current->last_frame();
1462   frame caller_frame = runtime_frame.sender(&reg_map);
1463 
1464   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1465   assert (nm != nullptr, "no more nmethod?");
1466   nm->make_not_entrant();
1467 
1468   methodHandle m(current, nm->method());
1469   MethodData* mdo = m->method_data();
1470 
1471   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1472     // Build an MDO.  Ignore errors like OutOfMemory;
1473     // that simply means we won't have an MDO to update.
1474     Method::build_profiling_method_data(m, THREAD);

1494   }
1495 
1496 
1497   Deoptimization::deoptimize_frame(current, caller_frame.id());
1498 
1499 JRT_END
1500 
1501 // Check exception if AbortVMOnException flag set
1502 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1503   ResourceMark rm;
1504   const char* message = nullptr;
1505   if (ex->is_a(vmClasses::Throwable_klass())) {
1506     oop msg = java_lang_Throwable::message(ex);
1507     if (msg != nullptr) {
1508       message = java_lang_String::as_utf8_string(msg);
1509     }
1510   }
1511   Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1512 JRT_END
1513 
























































1514 #ifndef PRODUCT
1515 void Runtime1::print_statistics() {
1516   tty->print_cr("C1 Runtime statistics:");
1517   tty->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1518   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1519   tty->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1520   tty->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1521   tty->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1522   tty->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1523   tty->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1524   tty->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1525   tty->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1526   tty->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1527   tty->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1528   tty->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1529   tty->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1530   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1531 
1532   tty->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1533   tty->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1534   tty->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1535   tty->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1536   tty->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1537   tty->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1538   tty->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1539 
1540   tty->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1541   tty->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1542   tty->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1543   tty->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1544   tty->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1545   tty->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1546   tty->print_cr(" _throw_count:                                  %u:", _throw_count);
1547 
1548   SharedRuntime::print_ic_miss_histogram();
1549   tty->cr();
1550 }
1551 #endif // PRODUCT

  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/codeBuffer.hpp"
  27 #include "c1/c1_CodeStubs.hpp"
  28 #include "c1/c1_Defs.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_MacroAssembler.hpp"
  31 #include "c1/c1_Runtime1.hpp"
  32 #include "classfile/javaClasses.inline.hpp"
  33 #include "classfile/vmClasses.hpp"
  34 #include "classfile/vmSymbols.hpp"
  35 #include "code/codeBlob.hpp"
  36 #include "code/compiledIC.hpp"
  37 #include "code/scopeDesc.hpp"
  38 #include "code/vtableStubs.hpp"
  39 #include "compiler/compilationPolicy.hpp"
  40 #include "compiler/compilerDefinitions.inline.hpp"
  41 #include "compiler/disassembler.hpp"
  42 #include "compiler/oopMap.hpp"
  43 #include "gc/shared/barrierSet.hpp"
  44 #include "gc/shared/c1/barrierSetC1.hpp"
  45 #include "gc/shared/collectedHeap.hpp"
  46 #include "interpreter/bytecode.hpp"
  47 #include "interpreter/interpreter.hpp"
  48 #include "jfr/support/jfrIntrinsics.hpp"
  49 #include "logging/log.hpp"
  50 #include "memory/oopFactory.hpp"
  51 #include "memory/resourceArea.hpp"
  52 #include "memory/universe.hpp"
  53 #include "oops/access.inline.hpp"
  54 #include "oops/objArrayOop.inline.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/oop.inline.hpp"
  57 #include "prims/jvmtiExport.hpp"
  58 #include "runtime/atomic.hpp"
  59 #include "runtime/fieldDescriptor.inline.hpp"
  60 #include "runtime/frame.inline.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/interfaceSupport.inline.hpp"
  63 #include "runtime/javaCalls.hpp"
  64 #include "runtime/perfData.inline.hpp"
  65 #include "runtime/runtimeUpcalls.hpp"
  66 #include "runtime/sharedRuntime.hpp"
  67 #include "runtime/stackWatermarkSet.hpp"
  68 #include "runtime/stubRoutines.hpp"
  69 #include "runtime/vframe.inline.hpp"
  70 #include "runtime/vframeArray.hpp"
  71 #include "runtime/vm_version.hpp"
  72 #include "services/management.hpp"
  73 #include "utilities/copy.hpp"
  74 #include "utilities/events.hpp"
  75 
  76 
  77 // Implementation of StubAssembler
  78 
  79 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
  80   _name = name;
  81   _must_gc_arguments = false;
  82   _frame_size = no_frame_size;
  83   _num_rt_args = 0;
  84   _stub_id = stub_id;
  85 }
  86 
  87 
  88 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
  89   _name = name;
  90   _must_gc_arguments = must_gc_arguments;
  91 }
  92 

 247   switch (id) {
 248     // These stubs don't need to have an oopmap
 249   case C1StubId::dtrace_object_alloc_id:
 250   case C1StubId::slow_subtype_check_id:
 251   case C1StubId::fpu2long_stub_id:
 252   case C1StubId::unwind_exception_id:
 253   case C1StubId::counter_overflow_id:
 254     expect_oop_map = false;
 255     break;
 256   default:
 257     break;
 258   }
 259 #endif
 260   C1StubIdStubAssemblerCodeGenClosure cl(id);
 261   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
 262   // install blob
 263   _blobs[(int)id] = blob;
 264 }
 265 
 266 void Runtime1::initialize(BufferBlob* blob) {
 267   init_counters();
 268   // platform-dependent initialization
 269   initialize_pd();
 270   // generate stubs
 271   int limit = (int)C1StubId::NUM_STUBIDS;
 272   for (int id = 0; id < limit; id++) generate_blob_for(blob, (C1StubId)id);
 273   // printing
 274 #ifndef PRODUCT
 275   if (PrintSimpleStubs) {
 276     ResourceMark rm;
 277     for (int id = 0; id < limit; id++) {
 278       _blobs[id]->print();
 279       if (_blobs[id]->oop_maps() != nullptr) {
 280         _blobs[id]->oop_maps()->print();
 281       }
 282     }
 283   }
 284 #endif
 285   BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
 286   bs->generate_c1_runtime_stubs(blob);
 287 }

 334   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
 335   FUNCTION_CASE(entry, is_instance_of);
 336   FUNCTION_CASE(entry, trace_block_entry);
 337 #ifdef JFR_HAVE_INTRINSICS
 338   FUNCTION_CASE(entry, JfrTime::time_function());
 339 #endif
 340   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
 341   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
 342   FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
 343   FUNCTION_CASE(entry, StubRoutines::dexp());
 344   FUNCTION_CASE(entry, StubRoutines::dlog());
 345   FUNCTION_CASE(entry, StubRoutines::dlog10());
 346   FUNCTION_CASE(entry, StubRoutines::dpow());
 347   FUNCTION_CASE(entry, StubRoutines::dsin());
 348   FUNCTION_CASE(entry, StubRoutines::dcos());
 349   FUNCTION_CASE(entry, StubRoutines::dtan());
 350   FUNCTION_CASE(entry, StubRoutines::dtanh());
 351 
 352 #undef FUNCTION_CASE
 353 
 354   // Runtime upcalls also has a map of addresses to names
 355   const char* upcall_name = RuntimeUpcalls::get_name_for_upcall_address(entry);
 356   if (upcall_name != nullptr) {
 357     return upcall_name;
 358   }
 359 
 360   // Soft float adds more runtime names.
 361   return pd_name_for_address(entry);
 362 }
 363 
 364 
 365 JRT_ENTRY_PROF(void, Runtime1, new_instance, Runtime1::new_instance(JavaThread* current, Klass* klass))
 366 #ifndef PRODUCT
 367   if (PrintC1Statistics) {
 368     _new_instance_slowcase_cnt++;
 369   }
 370 #endif
 371   assert(klass->is_klass(), "not a class");
 372   Handle holder(current, klass->klass_holder()); // keep the klass alive
 373   InstanceKlass* h = InstanceKlass::cast(klass);
 374   h->check_valid_for_instantiation(true, CHECK);
 375   // make sure klass is initialized
 376   h->initialize(CHECK);
 377   // allocate instance and return via TLS
 378   oop obj = h->allocate_instance(CHECK);
 379   current->set_vm_result(obj);
 380 JRT_END
 381 
 382 
 383 JRT_ENTRY_PROF(void, Runtime1, new_type_array, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
 384 #ifndef PRODUCT
 385   if (PrintC1Statistics) {
 386     _new_type_array_slowcase_cnt++;
 387   }
 388 #endif
 389   // Note: no handle for klass needed since they are not used
 390   //       anymore after new_typeArray() and no GC can happen before.
 391   //       (This may have to change if this code changes!)
 392   assert(klass->is_klass(), "not a class");
 393   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
 394   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
 395   current->set_vm_result(obj);
 396   // This is pretty rare but this runtime patch is stressful to deoptimization
 397   // if we deoptimize here so force a deopt to stress the path.
 398   if (DeoptimizeALot) {
 399     deopt_caller(current);
 400   }
 401 
 402 JRT_END
 403 
 404 
 405 JRT_ENTRY_PROF(void, Runtime1, new_object_array, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
 406 #ifndef PRODUCT
 407   if (PrintC1Statistics) {
 408     _new_object_array_slowcase_cnt++;
 409   }
 410 #endif
 411   // Note: no handle for klass needed since they are not used
 412   //       anymore after new_objArray() and no GC can happen before.
 413   //       (This may have to change if this code changes!)
 414   assert(array_klass->is_klass(), "not a class");
 415   Handle holder(current, array_klass->klass_holder()); // keep the klass alive
 416   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
 417   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
 418   current->set_vm_result(obj);
 419   // This is pretty rare but this runtime patch is stressful to deoptimization
 420   // if we deoptimize here so force a deopt to stress the path.
 421   if (DeoptimizeALot) {
 422     deopt_caller(current);
 423   }
 424 JRT_END
 425 
 426 
 427 JRT_ENTRY_PROF(void, Runtime1, new_multi_array, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
 428 #ifndef PRODUCT
 429   if (PrintC1Statistics) {
 430     _new_multi_array_slowcase_cnt++;
 431   }
 432 #endif
 433   assert(klass->is_klass(), "not a class");
 434   assert(rank >= 1, "rank must be nonzero");
 435   Handle holder(current, klass->klass_holder()); // keep the klass alive
 436   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
 437   current->set_vm_result(obj);
 438 JRT_END
 439 
 440 
 441 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id))
 442   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 443 JRT_END
 444 
 445 
 446 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
 447   ResourceMark rm(current);

 478       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
 479       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
 480       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
 481       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
 482       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
 483       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
 484       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
 485         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
 486         break;
 487       case Bytecodes::_goto_w:
 488         offset = Bytes::get_Java_u4(pc + 1);
 489         break;
 490       default: ;
 491     }
 492     bci = branch_bci + offset;
 493   }
 494   osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
 495   return osr_nm;
 496 }
 497 
 498 JRT_BLOCK_ENTRY_PROF(address, Runtime1, counter_overflow, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
 499   nmethod* osr_nm;
 500   JRT_BLOCK
 501     osr_nm = counter_overflow_helper(current, bci, method);
 502     if (osr_nm != nullptr) {
 503       RegisterMap map(current,
 504                       RegisterMap::UpdateMap::skip,
 505                       RegisterMap::ProcessFrames::include,
 506                       RegisterMap::WalkContinuation::skip);
 507       frame fr =  current->last_frame().sender(&map);
 508       Deoptimization::deoptimize_frame(current, fr.id());
 509     }
 510   JRT_BLOCK_END
 511   return nullptr;
 512 JRT_END
 513 
 514 extern void vm_exit(int code);
 515 
 516 // Enter this method from compiled code handler below. This is where we transition
 517 // to VM mode. This is done as a helper routine so that the method called directly
 518 // from compiled code does not have to transition to VM. This allows the entry
 519 // method to see if the nmethod that we have just looked up a handler for has
 520 // been deoptimized while we were in the vm. This simplifies the assembly code
 521 // cpu directories.
 522 //
 523 // We are entering here from exception stub (via the entry method below)
 524 // If there is a compiled exception handler in this method, we will continue there;
 525 // otherwise we will unwind the stack and continue at the caller of top frame method
 526 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
 527 // control the area where we can allow a safepoint. After we exit the safepoint area we can
 528 // check to see if the handler we are going to return is now in a nmethod that has
 529 // been deoptimized. If that is the case we return the deopt blob
 530 // unpack_with_exception entry instead. This makes life for the exception blob easier
 531 // because making that same check and diverting is painful from assembly language.
 532 JRT_ENTRY_NO_ASYNC_PROF(static address, Runtime1, exception_handler_for_pc_helper, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
 533   // Reset method handle flag.
 534   current->set_is_method_handle_return(false);
 535 
 536   Handle exception(current, ex);
 537 
 538   // This function is called when we are about to throw an exception. Therefore,
 539   // we have to poll the stack watermark barrier to make sure that not yet safe
 540   // stack frames are made safe before returning into them.
 541   if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) {
 542     // The C1StubId::handle_exception_from_callee_id handler is invoked after the
 543     // frame has been unwound. It instead builds its own stub frame, to call the
 544     // runtime. But the throwing frame has already been unwound here.
 545     StackWatermarkSet::after_unwind(current);
 546   }
 547 
 548   nm = CodeCache::find_nmethod(pc);
 549   assert(nm != nullptr, "this is not an nmethod");
 550   // Adjust the pc as needed/
 551   if (nm->is_deopt_pc(pc)) {
 552     RegisterMap map(current,

 744     _throw_class_cast_exception_count++;
 745   }
 746 #endif
 747   ResourceMark rm(current);
 748   char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
 749   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
 750 JRT_END
 751 
 752 
 753 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
 754 #ifndef PRODUCT
 755   if (PrintC1Statistics) {
 756     _throw_incompatible_class_change_error_count++;
 757   }
 758 #endif
 759   ResourceMark rm(current);
 760   SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
 761 JRT_END
 762 
 763 
 764 JRT_BLOCK_ENTRY_PROF(void, Runtime1, monitorenter, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
 765 #ifndef PRODUCT
 766   if (PrintC1Statistics) {
 767     _monitorenter_slowcase_cnt++;
 768   }
 769 #endif
 770   if (LockingMode == LM_MONITOR) {
 771     lock->set_obj(obj);
 772   }
 773   assert(obj == lock->obj(), "must match");
 774   SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
 775 JRT_END
 776 
 777 
 778 JRT_LEAF_PROF(void, Runtime1, monitorexit, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
 779   assert(current == JavaThread::current(), "pre-condition");
 780 #ifndef PRODUCT
 781   if (PrintC1Statistics) {
 782     _monitorexit_slowcase_cnt++;
 783   }
 784 #endif
 785   assert(current->last_Java_sp(), "last_Java_sp must be set");
 786   oop obj = lock->obj();
 787   assert(oopDesc::is_oop(obj), "must be null or an object");
 788   SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
 789 JRT_END
 790 
 791 // Cf. OptoRuntime::deoptimize_caller_frame
 792 JRT_ENTRY_PROF(void, Runtime1, deoptimize, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 793   // Called from within the owner thread, so no need for safepoint
 794   RegisterMap reg_map(current,
 795                       RegisterMap::UpdateMap::skip,
 796                       RegisterMap::ProcessFrames::include,
 797                       RegisterMap::WalkContinuation::skip);
 798   frame stub_frame = current->last_frame();
 799   assert(stub_frame.is_runtime_frame(), "Sanity check");
 800   frame caller_frame = stub_frame.sender(&reg_map);
 801   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 802   assert(nm != nullptr, "Sanity check");
 803   methodHandle method(current, nm->method());
 804   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 805   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 806   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 807 
 808   if (action == Deoptimization::Action_make_not_entrant) {
 809     if (nm->make_not_entrant()) {
 810       if (reason == Deoptimization::Reason_tenured) {
 811         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 812         if (trap_mdo != nullptr) {

 815       }
 816     }
 817   }
 818 
 819   // Deoptimize the caller frame.
 820   Deoptimization::deoptimize_frame(current, caller_frame.id());
 821   // Return to the now deoptimized frame.
 822 JRT_END
 823 
 824 
 825 #ifndef DEOPTIMIZE_WHEN_PATCHING
 826 
 827 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 828   Bytecode_field field_access(caller, bci);
 829   // This can be static or non-static field access
 830   Bytecodes::Code code       = field_access.code();
 831 
 832   // We must load class, initialize class and resolve the field
 833   fieldDescriptor result; // initialize class if needed
 834   constantPoolHandle constants(THREAD, caller->constants());
 835   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller,
 836                                      Bytecodes::java_code(code), true /*initialize_class*/, CHECK_NULL);
 837   return result.field_holder();
 838 }
 839 
 840 
 841 //
 842 // This routine patches sites where a class wasn't loaded or
 843 // initialized at the time the code was generated.  It handles
 844 // references to classes, fields and forcing of initialization.  Most
 845 // of the cases are straightforward and involving simply forcing
 846 // resolution of a class, rewriting the instruction stream with the
 847 // needed constant and replacing the call in this function with the
 848 // patched code.  The case for static field is more complicated since
 849 // the thread which is in the process of initializing a class can
 850 // access it's static fields but other threads can't so the code
 851 // either has to deoptimize when this case is detected or execute a
 852 // check that the current thread is the initializing thread.  The
 853 // current
 854 //
 855 // Patches basically look like this:
 856 //

 919 // always end up with a correct outcome.  This is easiest if there are
 920 // few or no intermediate states.  (Some inline caches have two
 921 // related instructions that must be patched in tandem.  For those,
 922 // intermediate states seem to be unavoidable, but we will get the
 923 // right answer from all possible observation orders.)
 924 //
 925 // When patching the entry instruction at the head of a method, or a
 926 // linkable call instruction inside of a method, we try very hard to
 927 // use a patch sequence which executes as a single memory transaction.
 928 // This means, in practice, that when thread A patches an instruction,
 929 // it should patch a 32-bit or 64-bit word that somehow overlaps the
 930 // instruction or is contained in it.  We believe that memory hardware
 931 // will never break up such a word write, if it is naturally aligned
 932 // for the word being written.  We also know that some CPUs work very
 933 // hard to create atomic updates even of naturally unaligned words,
 934 // but we don't want to bet the farm on this always working.
 935 //
 936 // Therefore, if there is any chance of a race condition, we try to
 937 // patch only naturally aligned words, as single, full-word writes.
 938 
 939 JRT_ENTRY_PROF(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, C1StubId stub_id))
 940 #ifndef PRODUCT
 941   if (PrintC1Statistics) {
 942     _patch_code_slowcase_cnt++;
 943   }
 944 #endif
 945 
 946   ResourceMark rm(current);
 947   RegisterMap reg_map(current,
 948                       RegisterMap::UpdateMap::skip,
 949                       RegisterMap::ProcessFrames::include,
 950                       RegisterMap::WalkContinuation::skip);
 951   frame runtime_frame = current->last_frame();
 952   frame caller_frame = runtime_frame.sender(&reg_map);
 953 
 954   // last java frame on stack
 955   vframeStream vfst(current, true);
 956   assert(!vfst.at_end(), "Java frame must exist");
 957 
 958   methodHandle caller_method(current, vfst.method());
 959   // Note that caller_method->code() may not be same as caller_code because of OSR's

 964   Bytecodes::Code code = caller_method()->java_code_at(bci);
 965 
 966   // this is used by assertions in the access_field_patching_id
 967   BasicType patch_field_type = T_ILLEGAL;
 968   bool deoptimize_for_volatile = false;
 969   bool deoptimize_for_atomic = false;
 970   int patch_field_offset = -1;
 971   Klass* init_klass = nullptr; // klass needed by load_klass_patching code
 972   Klass* load_klass = nullptr; // klass needed by load_klass_patching code
 973   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
 974   Handle appendix(current, nullptr); // oop needed by appendix_patching code
 975   bool load_klass_or_mirror_patch_id =
 976     (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id);
 977 
 978   if (stub_id == C1StubId::access_field_patching_id) {
 979 
 980     Bytecode_field field_access(caller_method, bci);
 981     fieldDescriptor result; // initialize class if needed
 982     Bytecodes::Code code = field_access.code();
 983     constantPoolHandle constants(current, caller_method->constants());
 984     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method,
 985                                        Bytecodes::java_code(code), true /*initialize_class*/, CHECK);
 986     patch_field_offset = result.offset();
 987 
 988     // If we're patching a field which is volatile then at compile it
 989     // must not have been know to be volatile, so the generated code
 990     // isn't correct for a volatile reference.  The nmethod has to be
 991     // deoptimized so that the code can be regenerated correctly.
 992     // This check is only needed for access_field_patching since this
 993     // is the path for patching field offsets.  load_klass is only
 994     // used for patching references to oops which don't need special
 995     // handling in the volatile case.
 996 
 997     deoptimize_for_volatile = result.access_flags().is_volatile();
 998 
 999     // If we are patching a field which should be atomic, then
1000     // the generated code is not correct either, force deoptimizing.
1001     // We need to only cover T_LONG and T_DOUBLE fields, as we can
1002     // break access atomicity only for them.
1003 
1004     // Strictly speaking, the deoptimization on 64-bit platforms
1005     // is unnecessary, and T_LONG stores on 32-bit platforms need

1303     switch (code) {
1304       case Bytecodes::_new:
1305       case Bytecodes::_anewarray:
1306       case Bytecodes::_multianewarray:
1307       case Bytecodes::_instanceof:
1308       case Bytecodes::_checkcast: {
1309         Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1310         constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1311         if (tag.is_unresolved_klass_in_error()) {
1312           return false; // throws resolution error
1313         }
1314         break;
1315       }
1316 
1317       default: break;
1318     }
1319   }
1320   return true;
1321 }
1322 
1323 PROF_ENTRY(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, C1StubId stub_id))
1324 #ifndef PRODUCT
1325   if (PrintC1Statistics) {
1326     _patch_code_slowcase_cnt++;
1327   }
1328 #endif
1329 
1330   // Enable WXWrite: the function is called by c1 stub as a runtime function
1331   // (see another implementation above).
1332   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1333 
1334   if (TracePatching) {
1335     tty->print_cr("Deoptimizing because patch is needed");
1336   }
1337 
1338   RegisterMap reg_map(current,
1339                       RegisterMap::UpdateMap::skip,
1340                       RegisterMap::ProcessFrames::include,
1341                       RegisterMap::WalkContinuation::skip);
1342 
1343   frame runtime_frame = current->last_frame();
1344   frame caller_frame = runtime_frame.sender(&reg_map);
1345   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1346 
1347   if (is_patching_needed(current, stub_id)) {
1348     // Make sure the nmethod is invalidated, i.e. made not entrant.
1349     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1350     if (nm != nullptr) {
1351       nm->make_not_entrant();
1352     }
1353   }
1354 
1355   Deoptimization::deoptimize_frame(current, caller_frame.id());
1356   // Return to the now deoptimized frame.
1357   postcond(caller_is_deopted(current));
1358 PROF_END
1359 
1360 #endif // DEOPTIMIZE_WHEN_PATCHING
1361 
1362 // Entry point for compiled code. We want to patch a nmethod.
1363 // We don't do a normal VM transition here because we want to
1364 // know after the patching is complete and any safepoint(s) are taken
1365 // if the calling nmethod was deoptimized. We do this by calling a
1366 // helper method which does the normal VM transition and when it
1367 // completes we can check for deoptimization. This simplifies the
1368 // assembly code in the cpu directories.
1369 //
1370 int Runtime1::move_klass_patching(JavaThread* current) {
1371 //
1372 // NOTE: we are still in Java
1373 //
1374   debug_only(NoHandleMark nhm;)
1375   {
1376     // Enter VM mode
1377     ResetNoHandleMark rnhm;
1378     patch_code(current, C1StubId::load_klass_patching_id);

1429 int Runtime1::access_field_patching(JavaThread* current) {
1430   //
1431   // NOTE: we are still in Java
1432   //
1433   // Handles created in this function will be deleted by the
1434   // HandleMarkCleaner in the transition to the VM.
1435   NoHandleMark nhm;
1436   {
1437     // Enter VM mode
1438     ResetNoHandleMark rnhm;
1439     patch_code(current, C1StubId::access_field_patching_id);
1440   }
1441   // Back in JAVA, use no oops DON'T safepoint
1442 
1443   // Return true if calling code is deoptimized
1444 
1445   return caller_is_deopted(current);
1446 }
1447 
1448 
1449 JRT_LEAF_PROF_NO_THREAD(void, Runtime1, trace_block_entry, Runtime1::trace_block_entry(jint block_id))
1450   // for now we just print out the block id
1451   tty->print("%d ", block_id);
1452 JRT_END
1453 
1454 
1455 JRT_LEAF_PROF_NO_THREAD(int, Runtime1, is_instance_of, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1456   // had to return int instead of bool, otherwise there may be a mismatch
1457   // between the C calling convention and the Java one.
1458   // e.g., on x86, GCC may clear only %al when returning a bool false, but
1459   // JVM takes the whole %eax as the return value, which may misinterpret
1460   // the return value as a boolean true.
1461 
1462   assert(mirror != nullptr, "should null-check on mirror before calling");
1463   Klass* k = java_lang_Class::as_Klass(mirror);
1464   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1465 JRT_END
1466 
1467 JRT_ENTRY_PROF(void, Runtime1, predicate_failed_trap, Runtime1::predicate_failed_trap(JavaThread* current))
1468   ResourceMark rm;
1469 
1470   RegisterMap reg_map(current,
1471                       RegisterMap::UpdateMap::skip,
1472                       RegisterMap::ProcessFrames::include,
1473                       RegisterMap::WalkContinuation::skip);
1474   frame runtime_frame = current->last_frame();
1475   frame caller_frame = runtime_frame.sender(&reg_map);
1476 
1477   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1478   assert (nm != nullptr, "no more nmethod?");
1479   nm->make_not_entrant();
1480 
1481   methodHandle m(current, nm->method());
1482   MethodData* mdo = m->method_data();
1483 
1484   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1485     // Build an MDO.  Ignore errors like OutOfMemory;
1486     // that simply means we won't have an MDO to update.
1487     Method::build_profiling_method_data(m, THREAD);

1507   }
1508 
1509 
1510   Deoptimization::deoptimize_frame(current, caller_frame.id());
1511 
1512 JRT_END
1513 
1514 // Check exception if AbortVMOnException flag set
1515 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1516   ResourceMark rm;
1517   const char* message = nullptr;
1518   if (ex->is_a(vmClasses::Throwable_klass())) {
1519     oop msg = java_lang_Throwable::message(ex);
1520     if (msg != nullptr) {
1521       message = java_lang_String::as_utf8_string(msg);
1522     }
1523   }
1524   Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1525 JRT_END
1526 
1527 #define DO_COUNTERS(macro) \
1528   macro(Runtime1, new_instance) \
1529   macro(Runtime1, new_type_array) \
1530   macro(Runtime1, new_object_array) \
1531   macro(Runtime1, new_multi_array) \
1532   macro(Runtime1, counter_overflow) \
1533   macro(Runtime1, exception_handler_for_pc_helper) \
1534   macro(Runtime1, monitorenter) \
1535   macro(Runtime1, monitorexit) \
1536   macro(Runtime1, deoptimize) \
1537   macro(Runtime1, is_instance_of) \
1538   macro(Runtime1, predicate_failed_trap) \
1539   macro(Runtime1, patch_code)
1540 
1541 #define INIT_COUNTER(sub, name) \
1542   NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \
1543   NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
1544 
1545 void Runtime1::init_counters() {
1546   assert(CompilerConfig::is_c1_enabled(), "");
1547 
1548   if (UsePerfData) {
1549     EXCEPTION_MARK;
1550 
1551     DO_COUNTERS(INIT_COUNTER)
1552 
1553     if (HAS_PENDING_EXCEPTION) {
1554       vm_exit_during_initialization("Runtime1::init_counters() failed unexpectedly");
1555     }
1556   }
1557 }
1558 #undef INIT_COUNTER
1559 
1560 #define PRINT_COUNTER(sub, name) { \
1561   if (_perf_##sub##_##name##_count != nullptr) {  \
1562     jlong count = _perf_##sub##_##name##_count->get_value(); \
1563     if (count > 0) { \
1564       st->print_cr("  %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \
1565                    _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \
1566                    _perf_##sub##_##name##_timer->thread_counter_value_us(), \
1567                    count); \
1568     }}}
1569 
1570 
1571 void Runtime1::print_counters_on(outputStream* st) {
1572   if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c1_enabled()) {
1573     DO_COUNTERS(PRINT_COUNTER)
1574   } else {
1575     st->print_cr("  Runtime1: no info (%s is disabled)",
1576                  (!CompilerConfig::is_c1_enabled() ? "C1" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")));
1577   }
1578 }
1579 
1580 #undef PRINT_COUNTER
1581 #undef DO_COUNTERS
1582 
1583 #ifndef PRODUCT
1584 void Runtime1::print_statistics_on(outputStream* st) {
1585   st->print_cr("C1 Runtime statistics:");
1586   st->print_cr(" _resolve_invoke_virtual_cnt:     %u", SharedRuntime::_resolve_virtual_ctr);
1587   st->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1588   st->print_cr(" _resolve_invoke_static_cnt:      %u", SharedRuntime::_resolve_static_ctr);
1589   st->print_cr(" _handle_wrong_method_cnt:        %u", SharedRuntime::_wrong_method_ctr);
1590   st->print_cr(" _ic_miss_cnt:                    %u", SharedRuntime::_ic_miss_ctr);
1591   st->print_cr(" _generic_arraycopystub_cnt:      %u", _generic_arraycopystub_cnt);
1592   st->print_cr(" _byte_arraycopy_cnt:             %u", _byte_arraycopy_stub_cnt);
1593   st->print_cr(" _short_arraycopy_cnt:            %u", _short_arraycopy_stub_cnt);
1594   st->print_cr(" _int_arraycopy_cnt:              %u", _int_arraycopy_stub_cnt);
1595   st->print_cr(" _long_arraycopy_cnt:             %u", _long_arraycopy_stub_cnt);
1596   st->print_cr(" _oop_arraycopy_cnt:              %u", _oop_arraycopy_stub_cnt);
1597   st->print_cr(" _arraycopy_slowcase_cnt:         %u", _arraycopy_slowcase_cnt);
1598   st->print_cr(" _arraycopy_checkcast_cnt:        %u", _arraycopy_checkcast_cnt);
1599   st->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1600 
1601   st->print_cr(" _new_type_array_slowcase_cnt:    %u", _new_type_array_slowcase_cnt);
1602   st->print_cr(" _new_object_array_slowcase_cnt:  %u", _new_object_array_slowcase_cnt);
1603   st->print_cr(" _new_instance_slowcase_cnt:      %u", _new_instance_slowcase_cnt);
1604   st->print_cr(" _new_multi_array_slowcase_cnt:   %u", _new_multi_array_slowcase_cnt);
1605   st->print_cr(" _monitorenter_slowcase_cnt:      %u", _monitorenter_slowcase_cnt);
1606   st->print_cr(" _monitorexit_slowcase_cnt:       %u", _monitorexit_slowcase_cnt);
1607   st->print_cr(" _patch_code_slowcase_cnt:        %u", _patch_code_slowcase_cnt);
1608 
1609   st->print_cr(" _throw_range_check_exception_count:            %u:", _throw_range_check_exception_count);
1610   st->print_cr(" _throw_index_exception_count:                  %u:", _throw_index_exception_count);
1611   st->print_cr(" _throw_div0_exception_count:                   %u:", _throw_div0_exception_count);
1612   st->print_cr(" _throw_null_pointer_exception_count:           %u:", _throw_null_pointer_exception_count);
1613   st->print_cr(" _throw_class_cast_exception_count:             %u:", _throw_class_cast_exception_count);
1614   st->print_cr(" _throw_incompatible_class_change_error_count:  %u:", _throw_incompatible_class_change_error_count);
1615   st->print_cr(" _throw_count:                                  %u:", _throw_count);
1616 
1617   SharedRuntime::print_ic_miss_histogram_on(st);
1618   st->cr();
1619 }
1620 #endif // PRODUCT
< prev index next >