20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/codeBuffer.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.inline.hpp"
32 #include "classfile/vmClasses.hpp"
33 #include "classfile/vmSymbols.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeBlob.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/scopeDesc.hpp"
38 #include "code/vtableStubs.hpp"
39 #include "compiler/compilationPolicy.hpp"
40 #include "compiler/disassembler.hpp"
41 #include "compiler/oopMap.hpp"
42 #include "gc/shared/barrierSet.hpp"
43 #include "gc/shared/c1/barrierSetC1.hpp"
44 #include "gc/shared/collectedHeap.hpp"
45 #include "interpreter/bytecode.hpp"
46 #include "interpreter/interpreter.hpp"
47 #include "jfr/support/jfrIntrinsics.hpp"
48 #include "logging/log.hpp"
49 #include "memory/oopFactory.hpp"
50 #include "memory/resourceArea.hpp"
51 #include "memory/universe.hpp"
52 #include "oops/access.inline.hpp"
53 #include "oops/objArrayKlass.hpp"
54 #include "oops/objArrayOop.inline.hpp"
55 #include "oops/oop.inline.hpp"
56 #include "prims/jvmtiExport.hpp"
57 #include "runtime/atomicAccess.hpp"
58 #include "runtime/fieldDescriptor.inline.hpp"
59 #include "runtime/frame.inline.hpp"
60 #include "runtime/handles.inline.hpp"
61 #include "runtime/interfaceSupport.inline.hpp"
62 #include "runtime/javaCalls.hpp"
63 #include "runtime/sharedRuntime.hpp"
64 #include "runtime/stackWatermarkSet.hpp"
65 #include "runtime/stubInfo.hpp"
66 #include "runtime/stubRoutines.hpp"
67 #include "runtime/vframe.inline.hpp"
68 #include "runtime/vframeArray.hpp"
69 #include "runtime/vm_version.hpp"
70 #include "utilities/copy.hpp"
71 #include "utilities/events.hpp"
72
73
74 // Implementation of StubAssembler
75
76 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
77 _name = name;
78 _must_gc_arguments = false;
79 _frame_size = no_frame_size;
80 _num_rt_args = 0;
81 _stub_id = stub_id;
82 }
83
84
85 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
86 _name = name;
87 _must_gc_arguments = must_gc_arguments;
88 }
89
252 case StubId::c1_slow_subtype_check_id:
253 case StubId::c1_fpu2long_stub_id:
254 case StubId::c1_unwind_exception_id:
255 case StubId::c1_counter_overflow_id:
256 case StubId::c1_is_instance_of_id:
257 expect_oop_map = false;
258 break;
259 default:
260 break;
261 }
262 #endif
263 C1StubAssemblerCodeGenClosure cl(id);
264 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
265 // install blob
266 int idx = StubInfo::c1_offset(id); // will assert on non-c1 id
267 _blobs[idx] = blob;
268 return blob != nullptr;
269 }
270
271 bool Runtime1::initialize(BufferBlob* blob) {
272 // platform-dependent initialization
273 initialize_pd();
274 // iterate blobs in C1 group and generate a single stub per blob
275 StubId id = StubInfo::stub_base(StubGroup::C1);
276 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
277 for (; id != limit; id = StubInfo::next(id)) {
278 if (!generate_blob_for(blob, id)) {
279 return false;
280 }
281 if (id == StubId::c1_forward_exception_id) {
282 // publish early c1 stubs at this point so later stubs can refer to them
283 AOTCodeCache::init_early_c1_table();
284 }
285 }
286 // printing
287 #ifndef PRODUCT
288 if (PrintSimpleStubs) {
289 ResourceMark rm;
290 id = StubInfo::stub_base(StubGroup::C1);
291 for (; id != limit; id = StubInfo::next(id)) {
351 FUNCTION_CASE(entry, trace_block_entry);
352 #ifdef JFR_HAVE_INTRINSICS
353 FUNCTION_CASE(entry, JfrTime::time_function());
354 #endif
355 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
356 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
357 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
358 FUNCTION_CASE(entry, StubRoutines::dexp());
359 FUNCTION_CASE(entry, StubRoutines::dlog());
360 FUNCTION_CASE(entry, StubRoutines::dlog10());
361 FUNCTION_CASE(entry, StubRoutines::dpow());
362 FUNCTION_CASE(entry, StubRoutines::dsin());
363 FUNCTION_CASE(entry, StubRoutines::dcos());
364 FUNCTION_CASE(entry, StubRoutines::dtan());
365 FUNCTION_CASE(entry, StubRoutines::dsinh());
366 FUNCTION_CASE(entry, StubRoutines::dtanh());
367 FUNCTION_CASE(entry, StubRoutines::dcbrt());
368
369 #undef FUNCTION_CASE
370
371 // Soft float adds more runtime names.
372 return pd_name_for_address(entry);
373 }
374
375
376 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
377 #ifndef PRODUCT
378 if (PrintC1Statistics) {
379 _new_instance_slowcase_cnt++;
380 }
381 #endif
382 assert(klass->is_klass(), "not a class");
383 Handle holder(current, klass->klass_holder()); // keep the klass alive
384 InstanceKlass* h = InstanceKlass::cast(klass);
385 h->check_valid_for_instantiation(true, CHECK);
386 // make sure klass is initialized
387 h->initialize(CHECK);
388 // allocate instance and return via TLS
389 oop obj = h->allocate_instance(CHECK);
390 current->set_vm_result_oop(obj);
391 JRT_END
392
393
394 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
395 #ifndef PRODUCT
396 if (PrintC1Statistics) {
397 _new_type_array_slowcase_cnt++;
398 }
399 #endif
400 // Note: no handle for klass needed since they are not used
401 // anymore after new_typeArray() and no GC can happen before.
402 // (This may have to change if this code changes!)
403 assert(klass->is_klass(), "not a class");
404 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
405 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
406 current->set_vm_result_oop(obj);
407 // This is pretty rare but this runtime patch is stressful to deoptimization
408 // if we deoptimize here so force a deopt to stress the path.
409 if (DeoptimizeALot) {
410 deopt_caller(current);
411 }
412
413 JRT_END
414
415
416 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
417 #ifndef PRODUCT
418 if (PrintC1Statistics) {
419 _new_object_array_slowcase_cnt++;
420 }
421 #endif
422 // Note: no handle for klass needed since they are not used
423 // anymore after new_objArray() and no GC can happen before.
424 // (This may have to change if this code changes!)
425 assert(array_klass->is_klass(), "not a class");
426 Handle holder(current, array_klass->klass_holder()); // keep the klass alive
427 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
428 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
429 current->set_vm_result_oop(obj);
430 // This is pretty rare but this runtime patch is stressful to deoptimization
431 // if we deoptimize here so force a deopt to stress the path.
432 if (DeoptimizeALot) {
433 deopt_caller(current);
434 }
435 JRT_END
436
437
438 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
439 #ifndef PRODUCT
440 if (PrintC1Statistics) {
441 _new_multi_array_slowcase_cnt++;
442 }
443 #endif
444 assert(klass->is_klass(), "not a class");
445 assert(rank >= 1, "rank must be nonzero");
446 Handle holder(current, klass->klass_holder()); // keep the klass alive
447 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
448 current->set_vm_result_oop(obj);
449 JRT_END
450
451
452 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubId id))
453 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
454 JRT_END
455
456
457 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
458 ResourceMark rm(current);
489 case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
490 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
491 case Bytecodes::_if_icmple: case Bytecodes::_ifle:
492 case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
493 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
494 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
495 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
496 offset = (int16_t)Bytes::get_Java_u2(pc + 1);
497 break;
498 case Bytecodes::_goto_w:
499 offset = Bytes::get_Java_u4(pc + 1);
500 break;
501 default: ;
502 }
503 bci = branch_bci + offset;
504 }
505 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
506 return osr_nm;
507 }
508
509 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
510 nmethod* osr_nm;
511 JRT_BLOCK_NO_ASYNC
512 osr_nm = counter_overflow_helper(current, bci, method);
513 if (osr_nm != nullptr) {
514 RegisterMap map(current,
515 RegisterMap::UpdateMap::skip,
516 RegisterMap::ProcessFrames::include,
517 RegisterMap::WalkContinuation::skip);
518 frame fr = current->last_frame().sender(&map);
519 Deoptimization::deoptimize_frame(current, fr.id());
520 }
521 JRT_BLOCK_END
522 return nullptr;
523 JRT_END
524
525 extern void vm_exit(int code);
526
527 // Enter this method from compiled code handler below. This is where we transition
528 // to VM mode. This is done as a helper routine so that the method called directly
529 // from compiled code does not have to transition to VM. This allows the entry
530 // method to see if the nmethod that we have just looked up a handler for has
531 // been deoptimized while we were in the vm. This simplifies the assembly code
532 // cpu directories.
533 //
534 // We are entering here from exception stub (via the entry method below)
535 // If there is a compiled exception handler in this method, we will continue there;
536 // otherwise we will unwind the stack and continue at the caller of top frame method
537 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
538 // control the area where we can allow a safepoint. After we exit the safepoint area we can
539 // check to see if the handler we are going to return is now in a nmethod that has
540 // been deoptimized. If that is the case we return the deopt blob
541 // unpack_with_exception entry instead. This makes life for the exception blob easier
542 // because making that same check and diverting is painful from assembly language.
543 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
544 // Reset method handle flag.
545 current->set_is_method_handle_return(false);
546
547 Handle exception(current, ex);
548
549 // This function is called when we are about to throw an exception. Therefore,
550 // we have to poll the stack watermark barrier to make sure that not yet safe
551 // stack frames are made safe before returning into them.
552 if (current->last_frame().cb() == Runtime1::blob_for(StubId::c1_handle_exception_from_callee_id)) {
553 // The StubId::c1_handle_exception_from_callee_id handler is invoked after the
554 // frame has been unwound. It instead builds its own stub frame, to call the
555 // runtime. But the throwing frame has already been unwound here.
556 StackWatermarkSet::after_unwind(current);
557 }
558
559 nm = CodeCache::find_nmethod(pc);
560 assert(nm != nullptr, "this is not an nmethod");
561 // Adjust the pc as needed/
562 if (nm->is_deopt_pc(pc)) {
563 RegisterMap map(current,
755 _throw_class_cast_exception_count++;
756 }
757 #endif
758 ResourceMark rm(current);
759 char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
760 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
761 JRT_END
762
763
764 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
765 #ifndef PRODUCT
766 if (PrintC1Statistics) {
767 _throw_incompatible_class_change_error_count++;
768 }
769 #endif
770 ResourceMark rm(current);
771 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
772 JRT_END
773
774
775 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
776 #ifndef PRODUCT
777 if (PrintC1Statistics) {
778 _monitorenter_slowcase_cnt++;
779 }
780 #endif
781 assert(obj == lock->obj(), "must match");
782 SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
783 JRT_END
784
785
786 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
787 assert(current == JavaThread::current(), "pre-condition");
788 #ifndef PRODUCT
789 if (PrintC1Statistics) {
790 _monitorexit_slowcase_cnt++;
791 }
792 #endif
793 assert(current->last_Java_sp(), "last_Java_sp must be set");
794 oop obj = lock->obj();
795 assert(oopDesc::is_oop(obj), "must be null or an object");
796 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
797 JRT_END
798
799 // Cf. OptoRuntime::deoptimize_caller_frame
800 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
801 // Called from within the owner thread, so no need for safepoint
802 RegisterMap reg_map(current,
803 RegisterMap::UpdateMap::skip,
804 RegisterMap::ProcessFrames::include,
805 RegisterMap::WalkContinuation::skip);
806 frame stub_frame = current->last_frame();
807 assert(stub_frame.is_runtime_frame(), "Sanity check");
808 frame caller_frame = stub_frame.sender(®_map);
809 nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
810 assert(nm != nullptr, "Sanity check");
811 methodHandle method(current, nm->method());
812 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
813 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
814 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
815
816 if (action == Deoptimization::Action_make_not_entrant) {
817 if (nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE)) {
818 if (reason == Deoptimization::Reason_tenured) {
819 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
820 if (trap_mdo != nullptr) {
823 }
824 }
825 }
826
827 // Deoptimize the caller frame.
828 Deoptimization::deoptimize_frame(current, caller_frame.id());
829 // Return to the now deoptimized frame.
830 JRT_END
831
832
833 #ifndef DEOPTIMIZE_WHEN_PATCHING
834
835 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
836 Bytecode_field field_access(caller, bci);
837 // This can be static or non-static field access
838 Bytecodes::Code code = field_access.code();
839
840 // We must load class, initialize class and resolve the field
841 fieldDescriptor result; // initialize class if needed
842 constantPoolHandle constants(THREAD, caller->constants());
843 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
844 return result.field_holder();
845 }
846
847
848 //
849 // This routine patches sites where a class wasn't loaded or
850 // initialized at the time the code was generated. It handles
851 // references to classes, fields and forcing of initialization. Most
852 // of the cases are straightforward and involving simply forcing
853 // resolution of a class, rewriting the instruction stream with the
854 // needed constant and replacing the call in this function with the
855 // patched code. The case for static field is more complicated since
856 // the thread which is in the process of initializing a class can
857 // access it's static fields but other threads can't so the code
858 // either has to deoptimize when this case is detected or execute a
859 // check that the current thread is the initializing thread. The
860 // current
861 //
862 // Patches basically look like this:
863 //
926 // always end up with a correct outcome. This is easiest if there are
927 // few or no intermediate states. (Some inline caches have two
928 // related instructions that must be patched in tandem. For those,
929 // intermediate states seem to be unavoidable, but we will get the
930 // right answer from all possible observation orders.)
931 //
932 // When patching the entry instruction at the head of a method, or a
933 // linkable call instruction inside of a method, we try very hard to
934 // use a patch sequence which executes as a single memory transaction.
935 // This means, in practice, that when thread A patches an instruction,
936 // it should patch a 32-bit or 64-bit word that somehow overlaps the
937 // instruction or is contained in it. We believe that memory hardware
938 // will never break up such a word write, if it is naturally aligned
939 // for the word being written. We also know that some CPUs work very
940 // hard to create atomic updates even of naturally unaligned words,
941 // but we don't want to bet the farm on this always working.
942 //
943 // Therefore, if there is any chance of a race condition, we try to
944 // patch only naturally aligned words, as single, full-word writes.
945
946 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, StubId stub_id ))
947 #ifndef PRODUCT
948 if (PrintC1Statistics) {
949 _patch_code_slowcase_cnt++;
950 }
951 #endif
952
953 ResourceMark rm(current);
954 RegisterMap reg_map(current,
955 RegisterMap::UpdateMap::skip,
956 RegisterMap::ProcessFrames::include,
957 RegisterMap::WalkContinuation::skip);
958 frame runtime_frame = current->last_frame();
959 frame caller_frame = runtime_frame.sender(®_map);
960
961 // last java frame on stack
962 vframeStream vfst(current, true);
963 assert(!vfst.at_end(), "Java frame must exist");
964
965 methodHandle caller_method(current, vfst.method());
966 // Note that caller_method->code() may not be same as caller_code because of OSR's
971 Bytecodes::Code code = caller_method()->java_code_at(bci);
972
973 // this is used by assertions in the access_field_patching_id
974 BasicType patch_field_type = T_ILLEGAL;
975 bool deoptimize_for_volatile = false;
976 bool deoptimize_for_atomic = false;
977 int patch_field_offset = -1;
978 Klass* init_klass = nullptr; // klass needed by load_klass_patching code
979 Klass* load_klass = nullptr; // klass needed by load_klass_patching code
980 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
981 Handle appendix(current, nullptr); // oop needed by appendix_patching code
982 bool load_klass_or_mirror_patch_id =
983 (stub_id == StubId::c1_load_klass_patching_id || stub_id == StubId::c1_load_mirror_patching_id);
984
985 if (stub_id == StubId::c1_access_field_patching_id) {
986
987 Bytecode_field field_access(caller_method, bci);
988 fieldDescriptor result; // initialize class if needed
989 Bytecodes::Code code = field_access.code();
990 constantPoolHandle constants(current, caller_method->constants());
991 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
992 patch_field_offset = result.offset();
993
994 // If we're patching a field which is volatile then at compile it
995 // must not have been know to be volatile, so the generated code
996 // isn't correct for a volatile reference. The nmethod has to be
997 // deoptimized so that the code can be regenerated correctly.
998 // This check is only needed for access_field_patching since this
999 // is the path for patching field offsets. load_klass is only
1000 // used for patching references to oops which don't need special
1001 // handling in the volatile case.
1002
1003 deoptimize_for_volatile = result.access_flags().is_volatile();
1004
1005 // If we are patching a field which should be atomic, then
1006 // the generated code is not correct either, force deoptimizing.
1007 // We need to only cover T_LONG and T_DOUBLE fields, as we can
1008 // break access atomicity only for them.
1009
1010 // Strictly speaking, the deoptimization on 64-bit platforms
1011 // is unnecessary, and T_LONG stores on 32-bit platforms need
1309 switch (code) {
1310 case Bytecodes::_new:
1311 case Bytecodes::_anewarray:
1312 case Bytecodes::_multianewarray:
1313 case Bytecodes::_instanceof:
1314 case Bytecodes::_checkcast: {
1315 Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1316 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1317 if (tag.is_unresolved_klass_in_error()) {
1318 return false; // throws resolution error
1319 }
1320 break;
1321 }
1322
1323 default: break;
1324 }
1325 }
1326 return true;
1327 }
1328
1329 void Runtime1::patch_code(JavaThread* current, StubId stub_id) {
1330 #ifndef PRODUCT
1331 if (PrintC1Statistics) {
1332 _patch_code_slowcase_cnt++;
1333 }
1334 #endif
1335
1336 // Enable WXWrite: the function is called by c1 stub as a runtime function
1337 // (see another implementation above).
1338 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1339
1340 if (TracePatching) {
1341 tty->print_cr("Deoptimizing because patch is needed");
1342 }
1343
1344 RegisterMap reg_map(current,
1345 RegisterMap::UpdateMap::skip,
1346 RegisterMap::ProcessFrames::include,
1347 RegisterMap::WalkContinuation::skip);
1348
1349 frame runtime_frame = current->last_frame();
1350 frame caller_frame = runtime_frame.sender(®_map);
1351 assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1352
1353 if (is_patching_needed(current, stub_id)) {
1354 // Make sure the nmethod is invalidated, i.e. made not entrant.
1355 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1356 if (nm != nullptr) {
1357 nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING);
1358 }
1359 }
1360
1361 Deoptimization::deoptimize_frame(current, caller_frame.id());
1362 // Return to the now deoptimized frame.
1363 postcond(caller_is_deopted(current));
1364 }
1365
1366 #endif // DEOPTIMIZE_WHEN_PATCHING
1367
1368 // Entry point for compiled code. We want to patch a nmethod.
1369 // We don't do a normal VM transition here because we want to
1370 // know after the patching is complete and any safepoint(s) are taken
1371 // if the calling nmethod was deoptimized. We do this by calling a
1372 // helper method which does the normal VM transition and when it
1373 // completes we can check for deoptimization. This simplifies the
1374 // assembly code in the cpu directories.
1375 //
1376 int Runtime1::move_klass_patching(JavaThread* current) {
1377 //
1378 // NOTE: we are still in Java
1379 //
1380 DEBUG_ONLY(NoHandleMark nhm;)
1381 {
1382 // Enter VM mode
1383 ResetNoHandleMark rnhm;
1384 patch_code(current, StubId::c1_load_klass_patching_id);
1435 int Runtime1::access_field_patching(JavaThread* current) {
1436 //
1437 // NOTE: we are still in Java
1438 //
1439 // Handles created in this function will be deleted by the
1440 // HandleMarkCleaner in the transition to the VM.
1441 NoHandleMark nhm;
1442 {
1443 // Enter VM mode
1444 ResetNoHandleMark rnhm;
1445 patch_code(current, StubId::c1_access_field_patching_id);
1446 }
1447 // Back in JAVA, use no oops DON'T safepoint
1448
1449 // Return true if calling code is deoptimized
1450
1451 return caller_is_deopted(current);
1452 }
1453
1454
1455 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1456 // for now we just print out the block id
1457 tty->print("%d ", block_id);
1458 JRT_END
1459
1460
1461 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1462 // had to return int instead of bool, otherwise there may be a mismatch
1463 // between the C calling convention and the Java one.
1464 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1465 // JVM takes the whole %eax as the return value, which may misinterpret
1466 // the return value as a boolean true.
1467
1468 assert(mirror != nullptr, "should null-check on mirror before calling");
1469 Klass* k = java_lang_Class::as_Klass(mirror);
1470 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1471 JRT_END
1472
1473 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1474 ResourceMark rm;
1475
1476 RegisterMap reg_map(current,
1477 RegisterMap::UpdateMap::skip,
1478 RegisterMap::ProcessFrames::include,
1479 RegisterMap::WalkContinuation::skip);
1480 frame runtime_frame = current->last_frame();
1481 frame caller_frame = runtime_frame.sender(®_map);
1482
1483 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1484 assert (nm != nullptr, "no more nmethod?");
1485 nm->make_not_entrant(nmethod::InvalidationReason::C1_PREDICATE_FAILED_TRAP);
1486
1487 methodHandle m(current, nm->method());
1488 MethodData* mdo = m->method_data();
1489
1490 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1491 // Build an MDO. Ignore errors like OutOfMemory;
1492 // that simply means we won't have an MDO to update.
1493 Method::build_profiling_method_data(m, THREAD);
1513 }
1514
1515
1516 Deoptimization::deoptimize_frame(current, caller_frame.id());
1517
1518 JRT_END
1519
1520 // Check exception if AbortVMOnException flag set
1521 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1522 ResourceMark rm;
1523 const char* message = nullptr;
1524 if (ex->is_a(vmClasses::Throwable_klass())) {
1525 oop msg = java_lang_Throwable::message(ex);
1526 if (msg != nullptr) {
1527 message = java_lang_String::as_utf8_string(msg);
1528 }
1529 }
1530 Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1531 JRT_END
1532
1533 #ifndef PRODUCT
1534 void Runtime1::print_statistics() {
1535 tty->print_cr("C1 Runtime statistics:");
1536 tty->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr);
1537 tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1538 tty->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr);
1539 tty->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr);
1540 tty->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr);
1541 tty->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt);
1542 tty->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt);
1543 tty->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt);
1544 tty->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt);
1545 tty->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt);
1546 tty->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt);
1547 tty->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt);
1548 tty->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt);
1549 tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1550
1551 tty->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt);
1552 tty->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt);
1553 tty->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt);
1554 tty->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt);
1555 tty->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt);
1556 tty->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt);
1557 tty->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt);
1558
1559 tty->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count);
1560 tty->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count);
1561 tty->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count);
1562 tty->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count);
1563 tty->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count);
1564 tty->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count);
1565 tty->print_cr(" _throw_count: %u:", _throw_count);
1566
1567 SharedRuntime::print_ic_miss_histogram();
1568 tty->cr();
1569 }
1570 #endif // PRODUCT
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/codeBuffer.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.inline.hpp"
32 #include "classfile/vmClasses.hpp"
33 #include "classfile/vmSymbols.hpp"
34 #include "code/aotCodeCache.hpp"
35 #include "code/codeBlob.hpp"
36 #include "code/compiledIC.hpp"
37 #include "code/scopeDesc.hpp"
38 #include "code/vtableStubs.hpp"
39 #include "compiler/compilationPolicy.hpp"
40 #include "compiler/compilerDefinitions.inline.hpp"
41 #include "compiler/disassembler.hpp"
42 #include "compiler/oopMap.hpp"
43 #include "gc/shared/barrierSet.hpp"
44 #include "gc/shared/c1/barrierSetC1.hpp"
45 #include "gc/shared/collectedHeap.hpp"
46 #include "interpreter/bytecode.hpp"
47 #include "interpreter/interpreter.hpp"
48 #include "jfr/support/jfrIntrinsics.hpp"
49 #include "logging/log.hpp"
50 #include "memory/oopFactory.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "oops/access.inline.hpp"
54 #include "oops/objArrayKlass.hpp"
55 #include "oops/objArrayOop.inline.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "prims/jvmtiExport.hpp"
58 #include "runtime/atomicAccess.hpp"
59 #include "runtime/fieldDescriptor.inline.hpp"
60 #include "runtime/frame.inline.hpp"
61 #include "runtime/handles.inline.hpp"
62 #include "runtime/interfaceSupport.inline.hpp"
63 #include "runtime/javaCalls.hpp"
64 #include "runtime/perfData.inline.hpp"
65 #include "runtime/runtimeUpcalls.hpp"
66 #include "runtime/sharedRuntime.hpp"
67 #include "runtime/stackWatermarkSet.hpp"
68 #include "runtime/stubInfo.hpp"
69 #include "runtime/stubRoutines.hpp"
70 #include "runtime/vframe.inline.hpp"
71 #include "runtime/vframeArray.hpp"
72 #include "runtime/vm_version.hpp"
73 #include "services/management.hpp"
74 #include "utilities/copy.hpp"
75 #include "utilities/events.hpp"
76
77
78 // Implementation of StubAssembler
79
80 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
81 _name = name;
82 _must_gc_arguments = false;
83 _frame_size = no_frame_size;
84 _num_rt_args = 0;
85 _stub_id = stub_id;
86 }
87
88
89 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
90 _name = name;
91 _must_gc_arguments = must_gc_arguments;
92 }
93
256 case StubId::c1_slow_subtype_check_id:
257 case StubId::c1_fpu2long_stub_id:
258 case StubId::c1_unwind_exception_id:
259 case StubId::c1_counter_overflow_id:
260 case StubId::c1_is_instance_of_id:
261 expect_oop_map = false;
262 break;
263 default:
264 break;
265 }
266 #endif
267 C1StubAssemblerCodeGenClosure cl(id);
268 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
269 // install blob
270 int idx = StubInfo::c1_offset(id); // will assert on non-c1 id
271 _blobs[idx] = blob;
272 return blob != nullptr;
273 }
274
275 bool Runtime1::initialize(BufferBlob* blob) {
276 init_counters();
277 // platform-dependent initialization
278 initialize_pd();
279 // iterate blobs in C1 group and generate a single stub per blob
280 StubId id = StubInfo::stub_base(StubGroup::C1);
281 StubId limit = StubInfo::next(StubInfo::stub_max(StubGroup::C1));
282 for (; id != limit; id = StubInfo::next(id)) {
283 if (!generate_blob_for(blob, id)) {
284 return false;
285 }
286 if (id == StubId::c1_forward_exception_id) {
287 // publish early c1 stubs at this point so later stubs can refer to them
288 AOTCodeCache::init_early_c1_table();
289 }
290 }
291 // printing
292 #ifndef PRODUCT
293 if (PrintSimpleStubs) {
294 ResourceMark rm;
295 id = StubInfo::stub_base(StubGroup::C1);
296 for (; id != limit; id = StubInfo::next(id)) {
356 FUNCTION_CASE(entry, trace_block_entry);
357 #ifdef JFR_HAVE_INTRINSICS
358 FUNCTION_CASE(entry, JfrTime::time_function());
359 #endif
360 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
361 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
362 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
363 FUNCTION_CASE(entry, StubRoutines::dexp());
364 FUNCTION_CASE(entry, StubRoutines::dlog());
365 FUNCTION_CASE(entry, StubRoutines::dlog10());
366 FUNCTION_CASE(entry, StubRoutines::dpow());
367 FUNCTION_CASE(entry, StubRoutines::dsin());
368 FUNCTION_CASE(entry, StubRoutines::dcos());
369 FUNCTION_CASE(entry, StubRoutines::dtan());
370 FUNCTION_CASE(entry, StubRoutines::dsinh());
371 FUNCTION_CASE(entry, StubRoutines::dtanh());
372 FUNCTION_CASE(entry, StubRoutines::dcbrt());
373
374 #undef FUNCTION_CASE
375
376 // Runtime upcalls also has a map of addresses to names
377 const char* upcall_name = RuntimeUpcalls::get_name_for_upcall_address(entry);
378 if (upcall_name != nullptr) {
379 return upcall_name;
380 }
381
382 // Soft float adds more runtime names.
383 return pd_name_for_address(entry);
384 }
385
386
387 JRT_ENTRY_PROF(void, Runtime1, new_instance, Runtime1::new_instance(JavaThread* current, Klass* klass))
388 #ifndef PRODUCT
389 if (PrintC1Statistics) {
390 _new_instance_slowcase_cnt++;
391 }
392 #endif
393 assert(klass->is_klass(), "not a class");
394 Handle holder(current, klass->klass_holder()); // keep the klass alive
395 InstanceKlass* h = InstanceKlass::cast(klass);
396 h->check_valid_for_instantiation(true, CHECK);
397 // make sure klass is initialized
398 h->initialize(CHECK);
399 // allocate instance and return via TLS
400 oop obj = h->allocate_instance(CHECK);
401 current->set_vm_result_oop(obj);
402 JRT_END
403
404
405 JRT_ENTRY_PROF(void, Runtime1, new_type_array, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
406 #ifndef PRODUCT
407 if (PrintC1Statistics) {
408 _new_type_array_slowcase_cnt++;
409 }
410 #endif
411 // Note: no handle for klass needed since they are not used
412 // anymore after new_typeArray() and no GC can happen before.
413 // (This may have to change if this code changes!)
414 assert(klass->is_klass(), "not a class");
415 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
416 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
417 current->set_vm_result_oop(obj);
418 // This is pretty rare but this runtime patch is stressful to deoptimization
419 // if we deoptimize here so force a deopt to stress the path.
420 if (DeoptimizeALot) {
421 deopt_caller(current);
422 }
423
424 JRT_END
425
426
427 JRT_ENTRY_PROF(void, Runtime1, new_object_array, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
428 #ifndef PRODUCT
429 if (PrintC1Statistics) {
430 _new_object_array_slowcase_cnt++;
431 }
432 #endif
433 // Note: no handle for klass needed since they are not used
434 // anymore after new_objArray() and no GC can happen before.
435 // (This may have to change if this code changes!)
436 assert(array_klass->is_klass(), "not a class");
437 Handle holder(current, array_klass->klass_holder()); // keep the klass alive
438 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
439 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
440 current->set_vm_result_oop(obj);
441 // This is pretty rare but this runtime patch is stressful to deoptimization
442 // if we deoptimize here so force a deopt to stress the path.
443 if (DeoptimizeALot) {
444 deopt_caller(current);
445 }
446 JRT_END
447
448
449 JRT_ENTRY_PROF(void, Runtime1, new_multi_array, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
450 #ifndef PRODUCT
451 if (PrintC1Statistics) {
452 _new_multi_array_slowcase_cnt++;
453 }
454 #endif
455 assert(klass->is_klass(), "not a class");
456 assert(rank >= 1, "rank must be nonzero");
457 Handle holder(current, klass->klass_holder()); // keep the klass alive
458 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
459 current->set_vm_result_oop(obj);
460 JRT_END
461
462
463 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubId id))
464 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
465 JRT_END
466
467
468 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
469 ResourceMark rm(current);
500 case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
501 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
502 case Bytecodes::_if_icmple: case Bytecodes::_ifle:
503 case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
504 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
505 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
506 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
507 offset = (int16_t)Bytes::get_Java_u2(pc + 1);
508 break;
509 case Bytecodes::_goto_w:
510 offset = Bytes::get_Java_u4(pc + 1);
511 break;
512 default: ;
513 }
514 bci = branch_bci + offset;
515 }
516 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
517 return osr_nm;
518 }
519
520 JRT_BLOCK_ENTRY_PROF(address, Runtime1, counter_overflow, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
521 nmethod* osr_nm;
522 JRT_BLOCK_NO_ASYNC
523 osr_nm = counter_overflow_helper(current, bci, method);
524 if (osr_nm != nullptr) {
525 RegisterMap map(current,
526 RegisterMap::UpdateMap::skip,
527 RegisterMap::ProcessFrames::include,
528 RegisterMap::WalkContinuation::skip);
529 frame fr = current->last_frame().sender(&map);
530 Deoptimization::deoptimize_frame(current, fr.id());
531 }
532 JRT_BLOCK_END
533 return nullptr;
534 JRT_END
535
536 extern void vm_exit(int code);
537
538 // Enter this method from compiled code handler below. This is where we transition
539 // to VM mode. This is done as a helper routine so that the method called directly
540 // from compiled code does not have to transition to VM. This allows the entry
541 // method to see if the nmethod that we have just looked up a handler for has
542 // been deoptimized while we were in the vm. This simplifies the assembly code
543 // cpu directories.
544 //
545 // We are entering here from exception stub (via the entry method below)
546 // If there is a compiled exception handler in this method, we will continue there;
547 // otherwise we will unwind the stack and continue at the caller of top frame method
548 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
549 // control the area where we can allow a safepoint. After we exit the safepoint area we can
550 // check to see if the handler we are going to return is now in a nmethod that has
551 // been deoptimized. If that is the case we return the deopt blob
552 // unpack_with_exception entry instead. This makes life for the exception blob easier
553 // because making that same check and diverting is painful from assembly language.
554 JRT_ENTRY_NO_ASYNC_PROF(static address, Runtime1, exception_handler_for_pc_helper, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
555 // Reset method handle flag.
556 current->set_is_method_handle_return(false);
557
558 Handle exception(current, ex);
559
560 // This function is called when we are about to throw an exception. Therefore,
561 // we have to poll the stack watermark barrier to make sure that not yet safe
562 // stack frames are made safe before returning into them.
563 if (current->last_frame().cb() == Runtime1::blob_for(StubId::c1_handle_exception_from_callee_id)) {
564 // The StubId::c1_handle_exception_from_callee_id handler is invoked after the
565 // frame has been unwound. It instead builds its own stub frame, to call the
566 // runtime. But the throwing frame has already been unwound here.
567 StackWatermarkSet::after_unwind(current);
568 }
569
570 nm = CodeCache::find_nmethod(pc);
571 assert(nm != nullptr, "this is not an nmethod");
572 // Adjust the pc as needed/
573 if (nm->is_deopt_pc(pc)) {
574 RegisterMap map(current,
766 _throw_class_cast_exception_count++;
767 }
768 #endif
769 ResourceMark rm(current);
770 char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
771 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
772 JRT_END
773
774
775 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
776 #ifndef PRODUCT
777 if (PrintC1Statistics) {
778 _throw_incompatible_class_change_error_count++;
779 }
780 #endif
781 ResourceMark rm(current);
782 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
783 JRT_END
784
785
786 JRT_BLOCK_ENTRY_PROF(void, Runtime1, monitorenter, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
787 #ifndef PRODUCT
788 if (PrintC1Statistics) {
789 _monitorenter_slowcase_cnt++;
790 }
791 #endif
792 assert(obj == lock->obj(), "must match");
793 SharedRuntime::monitor_enter_helper(obj, lock->lock(), current);
794 JRT_END
795
796
797 JRT_LEAF_PROF(void, Runtime1, monitorexit, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
798 assert(current == JavaThread::current(), "pre-condition");
799 #ifndef PRODUCT
800 if (PrintC1Statistics) {
801 _monitorexit_slowcase_cnt++;
802 }
803 #endif
804 assert(current->last_Java_sp(), "last_Java_sp must be set");
805 oop obj = lock->obj();
806 assert(oopDesc::is_oop(obj), "must be null or an object");
807 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
808 JRT_END
809
810 // Cf. OptoRuntime::deoptimize_caller_frame
811 JRT_ENTRY_PROF(void, Runtime1, deoptimize, Runtime1::deoptimize(JavaThread* current, jint trap_request))
812 // Called from within the owner thread, so no need for safepoint
813 RegisterMap reg_map(current,
814 RegisterMap::UpdateMap::skip,
815 RegisterMap::ProcessFrames::include,
816 RegisterMap::WalkContinuation::skip);
817 frame stub_frame = current->last_frame();
818 assert(stub_frame.is_runtime_frame(), "Sanity check");
819 frame caller_frame = stub_frame.sender(®_map);
820 nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
821 assert(nm != nullptr, "Sanity check");
822 methodHandle method(current, nm->method());
823 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
824 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
825 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
826
827 if (action == Deoptimization::Action_make_not_entrant) {
828 if (nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE)) {
829 if (reason == Deoptimization::Reason_tenured) {
830 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
831 if (trap_mdo != nullptr) {
834 }
835 }
836 }
837
838 // Deoptimize the caller frame.
839 Deoptimization::deoptimize_frame(current, caller_frame.id());
840 // Return to the now deoptimized frame.
841 JRT_END
842
843
844 #ifndef DEOPTIMIZE_WHEN_PATCHING
845
846 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
847 Bytecode_field field_access(caller, bci);
848 // This can be static or non-static field access
849 Bytecodes::Code code = field_access.code();
850
851 // We must load class, initialize class and resolve the field
852 fieldDescriptor result; // initialize class if needed
853 constantPoolHandle constants(THREAD, caller->constants());
854 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller,
855 Bytecodes::java_code(code), true /*initialize_class*/, CHECK_NULL);
856 return result.field_holder();
857 }
858
859
860 //
861 // This routine patches sites where a class wasn't loaded or
862 // initialized at the time the code was generated. It handles
863 // references to classes, fields and forcing of initialization. Most
864 // of the cases are straightforward and involving simply forcing
865 // resolution of a class, rewriting the instruction stream with the
866 // needed constant and replacing the call in this function with the
867 // patched code. The case for static field is more complicated since
868 // the thread which is in the process of initializing a class can
869 // access it's static fields but other threads can't so the code
870 // either has to deoptimize when this case is detected or execute a
871 // check that the current thread is the initializing thread. The
872 // current
873 //
874 // Patches basically look like this:
875 //
938 // always end up with a correct outcome. This is easiest if there are
939 // few or no intermediate states. (Some inline caches have two
940 // related instructions that must be patched in tandem. For those,
941 // intermediate states seem to be unavoidable, but we will get the
942 // right answer from all possible observation orders.)
943 //
944 // When patching the entry instruction at the head of a method, or a
945 // linkable call instruction inside of a method, we try very hard to
946 // use a patch sequence which executes as a single memory transaction.
947 // This means, in practice, that when thread A patches an instruction,
948 // it should patch a 32-bit or 64-bit word that somehow overlaps the
949 // instruction or is contained in it. We believe that memory hardware
950 // will never break up such a word write, if it is naturally aligned
951 // for the word being written. We also know that some CPUs work very
952 // hard to create atomic updates even of naturally unaligned words,
953 // but we don't want to bet the farm on this always working.
954 //
955 // Therefore, if there is any chance of a race condition, we try to
956 // patch only naturally aligned words, as single, full-word writes.
957
958 JRT_ENTRY_PROF(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, StubId stub_id))
959 #ifndef PRODUCT
960 if (PrintC1Statistics) {
961 _patch_code_slowcase_cnt++;
962 }
963 #endif
964
965 ResourceMark rm(current);
966 RegisterMap reg_map(current,
967 RegisterMap::UpdateMap::skip,
968 RegisterMap::ProcessFrames::include,
969 RegisterMap::WalkContinuation::skip);
970 frame runtime_frame = current->last_frame();
971 frame caller_frame = runtime_frame.sender(®_map);
972
973 // last java frame on stack
974 vframeStream vfst(current, true);
975 assert(!vfst.at_end(), "Java frame must exist");
976
977 methodHandle caller_method(current, vfst.method());
978 // Note that caller_method->code() may not be same as caller_code because of OSR's
983 Bytecodes::Code code = caller_method()->java_code_at(bci);
984
985 // this is used by assertions in the access_field_patching_id
986 BasicType patch_field_type = T_ILLEGAL;
987 bool deoptimize_for_volatile = false;
988 bool deoptimize_for_atomic = false;
989 int patch_field_offset = -1;
990 Klass* init_klass = nullptr; // klass needed by load_klass_patching code
991 Klass* load_klass = nullptr; // klass needed by load_klass_patching code
992 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
993 Handle appendix(current, nullptr); // oop needed by appendix_patching code
994 bool load_klass_or_mirror_patch_id =
995 (stub_id == StubId::c1_load_klass_patching_id || stub_id == StubId::c1_load_mirror_patching_id);
996
997 if (stub_id == StubId::c1_access_field_patching_id) {
998
999 Bytecode_field field_access(caller_method, bci);
1000 fieldDescriptor result; // initialize class if needed
1001 Bytecodes::Code code = field_access.code();
1002 constantPoolHandle constants(current, caller_method->constants());
1003 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method,
1004 Bytecodes::java_code(code), true /*initialize_class*/, CHECK);
1005 patch_field_offset = result.offset();
1006
1007 // If we're patching a field which is volatile then at compile it
1008 // must not have been know to be volatile, so the generated code
1009 // isn't correct for a volatile reference. The nmethod has to be
1010 // deoptimized so that the code can be regenerated correctly.
1011 // This check is only needed for access_field_patching since this
1012 // is the path for patching field offsets. load_klass is only
1013 // used for patching references to oops which don't need special
1014 // handling in the volatile case.
1015
1016 deoptimize_for_volatile = result.access_flags().is_volatile();
1017
1018 // If we are patching a field which should be atomic, then
1019 // the generated code is not correct either, force deoptimizing.
1020 // We need to only cover T_LONG and T_DOUBLE fields, as we can
1021 // break access atomicity only for them.
1022
1023 // Strictly speaking, the deoptimization on 64-bit platforms
1024 // is unnecessary, and T_LONG stores on 32-bit platforms need
1322 switch (code) {
1323 case Bytecodes::_new:
1324 case Bytecodes::_anewarray:
1325 case Bytecodes::_multianewarray:
1326 case Bytecodes::_instanceof:
1327 case Bytecodes::_checkcast: {
1328 Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1329 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1330 if (tag.is_unresolved_klass_in_error()) {
1331 return false; // throws resolution error
1332 }
1333 break;
1334 }
1335
1336 default: break;
1337 }
1338 }
1339 return true;
1340 }
1341
1342 PROF_ENTRY(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, StubId stub_id))
1343 #ifndef PRODUCT
1344 if (PrintC1Statistics) {
1345 _patch_code_slowcase_cnt++;
1346 }
1347 #endif
1348
1349 // Enable WXWrite: the function is called by c1 stub as a runtime function
1350 // (see another implementation above).
1351 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1352
1353 if (TracePatching) {
1354 tty->print_cr("Deoptimizing because patch is needed");
1355 }
1356
1357 RegisterMap reg_map(current,
1358 RegisterMap::UpdateMap::skip,
1359 RegisterMap::ProcessFrames::include,
1360 RegisterMap::WalkContinuation::skip);
1361
1362 frame runtime_frame = current->last_frame();
1363 frame caller_frame = runtime_frame.sender(®_map);
1364 assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1365
1366 if (is_patching_needed(current, stub_id)) {
1367 // Make sure the nmethod is invalidated, i.e. made not entrant.
1368 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1369 if (nm != nullptr) {
1370 nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING);
1371 }
1372 }
1373
1374 Deoptimization::deoptimize_frame(current, caller_frame.id());
1375 // Return to the now deoptimized frame.
1376 postcond(caller_is_deopted(current));
1377 PROF_END
1378
1379 #endif // DEOPTIMIZE_WHEN_PATCHING
1380
1381 // Entry point for compiled code. We want to patch a nmethod.
1382 // We don't do a normal VM transition here because we want to
1383 // know after the patching is complete and any safepoint(s) are taken
1384 // if the calling nmethod was deoptimized. We do this by calling a
1385 // helper method which does the normal VM transition and when it
1386 // completes we can check for deoptimization. This simplifies the
1387 // assembly code in the cpu directories.
1388 //
1389 int Runtime1::move_klass_patching(JavaThread* current) {
1390 //
1391 // NOTE: we are still in Java
1392 //
1393 DEBUG_ONLY(NoHandleMark nhm;)
1394 {
1395 // Enter VM mode
1396 ResetNoHandleMark rnhm;
1397 patch_code(current, StubId::c1_load_klass_patching_id);
1448 int Runtime1::access_field_patching(JavaThread* current) {
1449 //
1450 // NOTE: we are still in Java
1451 //
1452 // Handles created in this function will be deleted by the
1453 // HandleMarkCleaner in the transition to the VM.
1454 NoHandleMark nhm;
1455 {
1456 // Enter VM mode
1457 ResetNoHandleMark rnhm;
1458 patch_code(current, StubId::c1_access_field_patching_id);
1459 }
1460 // Back in JAVA, use no oops DON'T safepoint
1461
1462 // Return true if calling code is deoptimized
1463
1464 return caller_is_deopted(current);
1465 }
1466
1467
1468 JRT_LEAF_PROF_NO_THREAD(void, Runtime1, trace_block_entry, Runtime1::trace_block_entry(jint block_id))
1469 // for now we just print out the block id
1470 tty->print("%d ", block_id);
1471 JRT_END
1472
1473
1474 JRT_LEAF_PROF_NO_THREAD(int, Runtime1, is_instance_of, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1475 // had to return int instead of bool, otherwise there may be a mismatch
1476 // between the C calling convention and the Java one.
1477 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1478 // JVM takes the whole %eax as the return value, which may misinterpret
1479 // the return value as a boolean true.
1480
1481 assert(mirror != nullptr, "should null-check on mirror before calling");
1482 Klass* k = java_lang_Class::as_Klass(mirror);
1483 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1484 JRT_END
1485
1486 JRT_ENTRY_PROF(void, Runtime1, predicate_failed_trap, Runtime1::predicate_failed_trap(JavaThread* current))
1487 ResourceMark rm;
1488
1489 RegisterMap reg_map(current,
1490 RegisterMap::UpdateMap::skip,
1491 RegisterMap::ProcessFrames::include,
1492 RegisterMap::WalkContinuation::skip);
1493 frame runtime_frame = current->last_frame();
1494 frame caller_frame = runtime_frame.sender(®_map);
1495
1496 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1497 assert (nm != nullptr, "no more nmethod?");
1498 nm->make_not_entrant(nmethod::InvalidationReason::C1_PREDICATE_FAILED_TRAP);
1499
1500 methodHandle m(current, nm->method());
1501 MethodData* mdo = m->method_data();
1502
1503 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1504 // Build an MDO. Ignore errors like OutOfMemory;
1505 // that simply means we won't have an MDO to update.
1506 Method::build_profiling_method_data(m, THREAD);
1526 }
1527
1528
1529 Deoptimization::deoptimize_frame(current, caller_frame.id());
1530
1531 JRT_END
1532
1533 // Check exception if AbortVMOnException flag set
1534 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1535 ResourceMark rm;
1536 const char* message = nullptr;
1537 if (ex->is_a(vmClasses::Throwable_klass())) {
1538 oop msg = java_lang_Throwable::message(ex);
1539 if (msg != nullptr) {
1540 message = java_lang_String::as_utf8_string(msg);
1541 }
1542 }
1543 Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1544 JRT_END
1545
1546 #define DO_COUNTERS(macro) \
1547 macro(Runtime1, new_instance) \
1548 macro(Runtime1, new_type_array) \
1549 macro(Runtime1, new_object_array) \
1550 macro(Runtime1, new_multi_array) \
1551 macro(Runtime1, counter_overflow) \
1552 macro(Runtime1, exception_handler_for_pc_helper) \
1553 macro(Runtime1, monitorenter) \
1554 macro(Runtime1, monitorexit) \
1555 macro(Runtime1, deoptimize) \
1556 macro(Runtime1, is_instance_of) \
1557 macro(Runtime1, predicate_failed_trap) \
1558 macro(Runtime1, patch_code)
1559
1560 #define INIT_COUNTER(sub, name) \
1561 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \
1562 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
1563
1564 void Runtime1::init_counters() {
1565 assert(CompilerConfig::is_c1_enabled(), "");
1566
1567 if (UsePerfData) {
1568 EXCEPTION_MARK;
1569
1570 DO_COUNTERS(INIT_COUNTER)
1571
1572 if (HAS_PENDING_EXCEPTION) {
1573 vm_exit_during_initialization("Runtime1::init_counters() failed unexpectedly");
1574 }
1575 }
1576 }
1577 #undef INIT_COUNTER
1578
1579 #define PRINT_COUNTER(sub, name) { \
1580 if (_perf_##sub##_##name##_count != nullptr) { \
1581 jlong count = _perf_##sub##_##name##_count->get_value(); \
1582 if (count > 0) { \
1583 st->print_cr(" %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \
1584 _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \
1585 _perf_##sub##_##name##_timer->thread_counter_value_us(), \
1586 count); \
1587 }}}
1588
1589
1590 void Runtime1::print_counters_on(outputStream* st) {
1591 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c1_enabled()) {
1592 DO_COUNTERS(PRINT_COUNTER)
1593 } else {
1594 st->print_cr(" Runtime1: no info (%s is disabled)",
1595 (!CompilerConfig::is_c1_enabled() ? "C1" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")));
1596 }
1597 }
1598
1599 #undef PRINT_COUNTER
1600 #undef DO_COUNTERS
1601
1602 #ifndef PRODUCT
1603 void Runtime1::print_statistics_on(outputStream* st) {
1604 st->print_cr("C1 Runtime statistics:");
1605 st->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr);
1606 st->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1607 st->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr);
1608 st->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr);
1609 st->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr);
1610 st->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt);
1611 st->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt);
1612 st->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt);
1613 st->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt);
1614 st->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt);
1615 st->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt);
1616 st->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt);
1617 st->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt);
1618 st->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1619
1620 st->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt);
1621 st->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt);
1622 st->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt);
1623 st->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt);
1624 st->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt);
1625 st->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt);
1626 st->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt);
1627
1628 st->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count);
1629 st->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count);
1630 st->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count);
1631 st->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count);
1632 st->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count);
1633 st->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count);
1634 st->print_cr(" _throw_count: %u:", _throw_count);
1635
1636 SharedRuntime::print_ic_miss_histogram_on(st);
1637 st->cr();
1638 }
1639 #endif // PRODUCT
|