1 /*
2 * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/codeBuffer.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Defs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "classfile/javaClasses.inline.hpp"
34 #include "classfile/vmClasses.hpp"
35 #include "classfile/vmSymbols.hpp"
36 #include "code/codeBlob.hpp"
37 #include "code/compiledIC.hpp"
38 #include "code/pcDesc.hpp"
39 #include "code/scopeDesc.hpp"
40 #include "code/vtableStubs.hpp"
41 #include "compiler/compilationPolicy.hpp"
42 #include "compiler/disassembler.hpp"
43 #include "compiler/oopMap.hpp"
44 #include "gc/shared/barrierSet.hpp"
45 #include "gc/shared/c1/barrierSetC1.hpp"
46 #include "gc/shared/collectedHeap.hpp"
47 #include "interpreter/bytecode.hpp"
48 #include "interpreter/interpreter.hpp"
49 #include "jfr/support/jfrIntrinsics.hpp"
50 #include "logging/log.hpp"
51 #include "memory/allocation.inline.hpp"
52 #include "memory/oopFactory.hpp"
53 #include "memory/resourceArea.hpp"
54 #include "memory/universe.hpp"
55 #include "oops/access.inline.hpp"
56 #include "oops/klass.inline.hpp"
57 #include "oops/objArrayOop.inline.hpp"
58 #include "oops/objArrayKlass.hpp"
59 #include "oops/oop.inline.hpp"
60 #include "prims/jvmtiExport.hpp"
61 #include "runtime/atomic.hpp"
62 #include "runtime/fieldDescriptor.inline.hpp"
63 #include "runtime/frame.inline.hpp"
64 #include "runtime/handles.inline.hpp"
65 #include "runtime/interfaceSupport.inline.hpp"
66 #include "runtime/javaCalls.hpp"
67 #include "runtime/sharedRuntime.hpp"
68 #include "runtime/stackWatermarkSet.hpp"
69 #include "runtime/stubRoutines.hpp"
70 #include "runtime/threadCritical.hpp"
71 #include "runtime/vframe.inline.hpp"
72 #include "runtime/vframeArray.hpp"
73 #include "runtime/vm_version.hpp"
74 #include "utilities/copy.hpp"
75 #include "utilities/events.hpp"
76
77
78 // Implementation of StubAssembler
79
80 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
81 _name = name;
82 _must_gc_arguments = false;
83 _frame_size = no_frame_size;
84 _num_rt_args = 0;
85 _stub_id = stub_id;
86 }
87
88
89 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
90 _name = name;
91 _must_gc_arguments = must_gc_arguments;
92 }
93
245 switch (id) {
246 // These stubs don't need to have an oopmap
247 case dtrace_object_alloc_id:
248 case slow_subtype_check_id:
249 case fpu2long_stub_id:
250 case unwind_exception_id:
251 case counter_overflow_id:
252 expect_oop_map = false;
253 break;
254 default:
255 break;
256 }
257 #endif
258 StubIDStubAssemblerCodeGenClosure cl(id);
259 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
260 // install blob
261 _blobs[id] = blob;
262 }
263
264 void Runtime1::initialize(BufferBlob* blob) {
265 // platform-dependent initialization
266 initialize_pd();
267 // generate stubs
268 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
269 // printing
270 #ifndef PRODUCT
271 if (PrintSimpleStubs) {
272 ResourceMark rm;
273 for (int id = 0; id < number_of_ids; id++) {
274 _blobs[id]->print();
275 if (_blobs[id]->oop_maps() != nullptr) {
276 _blobs[id]->oop_maps()->print();
277 }
278 }
279 }
280 #endif
281 BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
282 bs->generate_c1_runtime_stubs(blob);
283 }
284
332 FUNCTION_CASE(entry, JfrTime::time_function());
333 #endif
334 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
335 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
336 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
337 FUNCTION_CASE(entry, StubRoutines::dexp());
338 FUNCTION_CASE(entry, StubRoutines::dlog());
339 FUNCTION_CASE(entry, StubRoutines::dlog10());
340 FUNCTION_CASE(entry, StubRoutines::dpow());
341 FUNCTION_CASE(entry, StubRoutines::dsin());
342 FUNCTION_CASE(entry, StubRoutines::dcos());
343 FUNCTION_CASE(entry, StubRoutines::dtan());
344
345 #undef FUNCTION_CASE
346
347 // Soft float adds more runtime names.
348 return pd_name_for_address(entry);
349 }
350
351
352 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* current, Klass* klass))
353 #ifndef PRODUCT
354 if (PrintC1Statistics) {
355 _new_instance_slowcase_cnt++;
356 }
357 #endif
358 assert(klass->is_klass(), "not a class");
359 Handle holder(current, klass->klass_holder()); // keep the klass alive
360 InstanceKlass* h = InstanceKlass::cast(klass);
361 h->check_valid_for_instantiation(true, CHECK);
362 // make sure klass is initialized
363 h->initialize(CHECK);
364 // allocate instance and return via TLS
365 oop obj = h->allocate_instance(CHECK);
366 current->set_vm_result(obj);
367 JRT_END
368
369
370 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
371 #ifndef PRODUCT
372 if (PrintC1Statistics) {
373 _new_type_array_slowcase_cnt++;
374 }
375 #endif
376 // Note: no handle for klass needed since they are not used
377 // anymore after new_typeArray() and no GC can happen before.
378 // (This may have to change if this code changes!)
379 assert(klass->is_klass(), "not a class");
380 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
381 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
382 current->set_vm_result(obj);
383 // This is pretty rare but this runtime patch is stressful to deoptimization
384 // if we deoptimize here so force a deopt to stress the path.
385 if (DeoptimizeALot) {
386 deopt_caller(current);
387 }
388
389 JRT_END
390
391
392 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
393 #ifndef PRODUCT
394 if (PrintC1Statistics) {
395 _new_object_array_slowcase_cnt++;
396 }
397 #endif
398 // Note: no handle for klass needed since they are not used
399 // anymore after new_objArray() and no GC can happen before.
400 // (This may have to change if this code changes!)
401 assert(array_klass->is_klass(), "not a class");
402 Handle holder(current, array_klass->klass_holder()); // keep the klass alive
403 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
404 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
405 current->set_vm_result(obj);
406 // This is pretty rare but this runtime patch is stressful to deoptimization
407 // if we deoptimize here so force a deopt to stress the path.
408 if (DeoptimizeALot) {
409 deopt_caller(current);
410 }
411 JRT_END
412
413
414 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
415 #ifndef PRODUCT
416 if (PrintC1Statistics) {
417 _new_multi_array_slowcase_cnt++;
418 }
419 #endif
420 assert(klass->is_klass(), "not a class");
421 assert(rank >= 1, "rank must be nonzero");
422 Handle holder(current, klass->klass_holder()); // keep the klass alive
423 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
424 current->set_vm_result(obj);
425 JRT_END
426
427
428 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
429 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
430 JRT_END
431
432
433 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
434 ResourceMark rm(current);
465 case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
466 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
467 case Bytecodes::_if_icmple: case Bytecodes::_ifle:
468 case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
469 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
470 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
471 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
472 offset = (int16_t)Bytes::get_Java_u2(pc + 1);
473 break;
474 case Bytecodes::_goto_w:
475 offset = Bytes::get_Java_u4(pc + 1);
476 break;
477 default: ;
478 }
479 bci = branch_bci + offset;
480 }
481 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
482 return osr_nm;
483 }
484
485 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
486 nmethod* osr_nm;
487 JRT_BLOCK
488 osr_nm = counter_overflow_helper(current, bci, method);
489 if (osr_nm != nullptr) {
490 RegisterMap map(current,
491 RegisterMap::UpdateMap::skip,
492 RegisterMap::ProcessFrames::include,
493 RegisterMap::WalkContinuation::skip);
494 frame fr = current->last_frame().sender(&map);
495 Deoptimization::deoptimize_frame(current, fr.id());
496 }
497 JRT_BLOCK_END
498 return nullptr;
499 JRT_END
500
501 extern void vm_exit(int code);
502
503 // Enter this method from compiled code handler below. This is where we transition
504 // to VM mode. This is done as a helper routine so that the method called directly
505 // from compiled code does not have to transition to VM. This allows the entry
506 // method to see if the nmethod that we have just looked up a handler for has
507 // been deoptimized while we were in the vm. This simplifies the assembly code
508 // cpu directories.
509 //
510 // We are entering here from exception stub (via the entry method below)
511 // If there is a compiled exception handler in this method, we will continue there;
512 // otherwise we will unwind the stack and continue at the caller of top frame method
513 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
514 // control the area where we can allow a safepoint. After we exit the safepoint area we can
515 // check to see if the handler we are going to return is now in a nmethod that has
516 // been deoptimized. If that is the case we return the deopt blob
517 // unpack_with_exception entry instead. This makes life for the exception blob easier
518 // because making that same check and diverting is painful from assembly language.
519 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
520 // Reset method handle flag.
521 current->set_is_method_handle_return(false);
522
523 Handle exception(current, ex);
524
525 // This function is called when we are about to throw an exception. Therefore,
526 // we have to poll the stack watermark barrier to make sure that not yet safe
527 // stack frames are made safe before returning into them.
528 if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {
529 // The Runtime1::handle_exception_from_callee_id handler is invoked after the
530 // frame has been unwound. It instead builds its own stub frame, to call the
531 // runtime. But the throwing frame has already been unwound here.
532 StackWatermarkSet::after_unwind(current);
533 }
534
535 nm = CodeCache::find_nmethod(pc);
536 assert(nm != nullptr, "this is not an nmethod");
537 // Adjust the pc as needed/
538 if (nm->is_deopt_pc(pc)) {
539 RegisterMap map(current,
731 _throw_class_cast_exception_count++;
732 }
733 #endif
734 ResourceMark rm(current);
735 char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
736 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
737 JRT_END
738
739
740 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
741 #ifndef PRODUCT
742 if (PrintC1Statistics) {
743 _throw_incompatible_class_change_error_count++;
744 }
745 #endif
746 ResourceMark rm(current);
747 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
748 JRT_END
749
750
751 JRT_BLOCK_ENTRY(void, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
752 #ifndef PRODUCT
753 if (PrintC1Statistics) {
754 _monitorenter_slowcase_cnt++;
755 }
756 #endif
757 if (LockingMode == LM_MONITOR) {
758 lock->set_obj(obj);
759 }
760 assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match");
761 SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? nullptr : lock->lock(), current);
762 JRT_END
763
764
765 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
766 assert(current == JavaThread::current(), "pre-condition");
767 #ifndef PRODUCT
768 if (PrintC1Statistics) {
769 _monitorexit_slowcase_cnt++;
770 }
771 #endif
772 assert(current->last_Java_sp(), "last_Java_sp must be set");
773 oop obj = lock->obj();
774 assert(oopDesc::is_oop(obj), "must be null or an object");
775 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
776 JRT_END
777
778 // Cf. OptoRuntime::deoptimize_caller_frame
779 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
780 // Called from within the owner thread, so no need for safepoint
781 RegisterMap reg_map(current,
782 RegisterMap::UpdateMap::skip,
783 RegisterMap::ProcessFrames::include,
784 RegisterMap::WalkContinuation::skip);
785 frame stub_frame = current->last_frame();
786 assert(stub_frame.is_runtime_frame(), "Sanity check");
787 frame caller_frame = stub_frame.sender(®_map);
788 nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
789 assert(nm != nullptr, "Sanity check");
790 methodHandle method(current, nm->method());
791 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
792 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
793 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
794
795 if (action == Deoptimization::Action_make_not_entrant) {
796 if (nm->make_not_entrant()) {
797 if (reason == Deoptimization::Reason_tenured) {
798 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
799 if (trap_mdo != nullptr) {
802 }
803 }
804 }
805
806 // Deoptimize the caller frame.
807 Deoptimization::deoptimize_frame(current, caller_frame.id());
808 // Return to the now deoptimized frame.
809 JRT_END
810
811
812 #ifndef DEOPTIMIZE_WHEN_PATCHING
813
814 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
815 Bytecode_field field_access(caller, bci);
816 // This can be static or non-static field access
817 Bytecodes::Code code = field_access.code();
818
819 // We must load class, initialize class and resolve the field
820 fieldDescriptor result; // initialize class if needed
821 constantPoolHandle constants(THREAD, caller->constants());
822 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
823 return result.field_holder();
824 }
825
826
827 //
828 // This routine patches sites where a class wasn't loaded or
829 // initialized at the time the code was generated. It handles
830 // references to classes, fields and forcing of initialization. Most
831 // of the cases are straightforward and involving simply forcing
832 // resolution of a class, rewriting the instruction stream with the
833 // needed constant and replacing the call in this function with the
834 // patched code. The case for static field is more complicated since
835 // the thread which is in the process of initializing a class can
836 // access it's static fields but other threads can't so the code
837 // either has to deoptimize when this case is detected or execute a
838 // check that the current thread is the initializing thread. The
839 // current
840 //
841 // Patches basically look like this:
842 //
905 // always end up with a correct outcome. This is easiest if there are
906 // few or no intermediate states. (Some inline caches have two
907 // related instructions that must be patched in tandem. For those,
908 // intermediate states seem to be unavoidable, but we will get the
909 // right answer from all possible observation orders.)
910 //
911 // When patching the entry instruction at the head of a method, or a
912 // linkable call instruction inside of a method, we try very hard to
913 // use a patch sequence which executes as a single memory transaction.
914 // This means, in practice, that when thread A patches an instruction,
915 // it should patch a 32-bit or 64-bit word that somehow overlaps the
916 // instruction or is contained in it. We believe that memory hardware
917 // will never break up such a word write, if it is naturally aligned
918 // for the word being written. We also know that some CPUs work very
919 // hard to create atomic updates even of naturally unaligned words,
920 // but we don't want to bet the farm on this always working.
921 //
922 // Therefore, if there is any chance of a race condition, we try to
923 // patch only naturally aligned words, as single, full-word writes.
924
925 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id ))
926 #ifndef PRODUCT
927 if (PrintC1Statistics) {
928 _patch_code_slowcase_cnt++;
929 }
930 #endif
931
932 ResourceMark rm(current);
933 RegisterMap reg_map(current,
934 RegisterMap::UpdateMap::skip,
935 RegisterMap::ProcessFrames::include,
936 RegisterMap::WalkContinuation::skip);
937 frame runtime_frame = current->last_frame();
938 frame caller_frame = runtime_frame.sender(®_map);
939
940 // last java frame on stack
941 vframeStream vfst(current, true);
942 assert(!vfst.at_end(), "Java frame must exist");
943
944 methodHandle caller_method(current, vfst.method());
945 // Note that caller_method->code() may not be same as caller_code because of OSR's
950 Bytecodes::Code code = caller_method()->java_code_at(bci);
951
952 // this is used by assertions in the access_field_patching_id
953 BasicType patch_field_type = T_ILLEGAL;
954 bool deoptimize_for_volatile = false;
955 bool deoptimize_for_atomic = false;
956 int patch_field_offset = -1;
957 Klass* init_klass = nullptr; // klass needed by load_klass_patching code
958 Klass* load_klass = nullptr; // klass needed by load_klass_patching code
959 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
960 Handle appendix(current, nullptr); // oop needed by appendix_patching code
961 bool load_klass_or_mirror_patch_id =
962 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
963
964 if (stub_id == Runtime1::access_field_patching_id) {
965
966 Bytecode_field field_access(caller_method, bci);
967 fieldDescriptor result; // initialize class if needed
968 Bytecodes::Code code = field_access.code();
969 constantPoolHandle constants(current, caller_method->constants());
970 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
971 patch_field_offset = result.offset();
972
973 // If we're patching a field which is volatile then at compile it
974 // must not have been know to be volatile, so the generated code
975 // isn't correct for a volatile reference. The nmethod has to be
976 // deoptimized so that the code can be regenerated correctly.
977 // This check is only needed for access_field_patching since this
978 // is the path for patching field offsets. load_klass is only
979 // used for patching references to oops which don't need special
980 // handling in the volatile case.
981
982 deoptimize_for_volatile = result.access_flags().is_volatile();
983
984 // If we are patching a field which should be atomic, then
985 // the generated code is not correct either, force deoptimizing.
986 // We need to only cover T_LONG and T_DOUBLE fields, as we can
987 // break access atomicity only for them.
988
989 // Strictly speaking, the deoptimization on 64-bit platforms
990 // is unnecessary, and T_LONG stores on 32-bit platforms need
1292 switch (code) {
1293 case Bytecodes::_new:
1294 case Bytecodes::_anewarray:
1295 case Bytecodes::_multianewarray:
1296 case Bytecodes::_instanceof:
1297 case Bytecodes::_checkcast: {
1298 Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1299 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1300 if (tag.is_unresolved_klass_in_error()) {
1301 return false; // throws resolution error
1302 }
1303 break;
1304 }
1305
1306 default: break;
1307 }
1308 }
1309 return true;
1310 }
1311
1312 void Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id) {
1313 #ifndef PRODUCT
1314 if (PrintC1Statistics) {
1315 _patch_code_slowcase_cnt++;
1316 }
1317 #endif
1318
1319 // Enable WXWrite: the function is called by c1 stub as a runtime function
1320 // (see another implementation above).
1321 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1322
1323 if (TracePatching) {
1324 tty->print_cr("Deoptimizing because patch is needed");
1325 }
1326
1327 RegisterMap reg_map(current,
1328 RegisterMap::UpdateMap::skip,
1329 RegisterMap::ProcessFrames::include,
1330 RegisterMap::WalkContinuation::skip);
1331
1332 frame runtime_frame = current->last_frame();
1333 frame caller_frame = runtime_frame.sender(®_map);
1334 assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1335
1336 if (is_patching_needed(current, stub_id)) {
1337 // Make sure the nmethod is invalidated, i.e. made not entrant.
1338 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1339 if (nm != nullptr) {
1340 nm->make_not_entrant();
1341 }
1342 }
1343
1344 Deoptimization::deoptimize_frame(current, caller_frame.id());
1345 // Return to the now deoptimized frame.
1346 postcond(caller_is_deopted(current));
1347 }
1348
1349 #endif // DEOPTIMIZE_WHEN_PATCHING
1350
1351 // Entry point for compiled code. We want to patch a nmethod.
1352 // We don't do a normal VM transition here because we want to
1353 // know after the patching is complete and any safepoint(s) are taken
1354 // if the calling nmethod was deoptimized. We do this by calling a
1355 // helper method which does the normal VM transition and when it
1356 // completes we can check for deoptimization. This simplifies the
1357 // assembly code in the cpu directories.
1358 //
1359 int Runtime1::move_klass_patching(JavaThread* current) {
1360 //
1361 // NOTE: we are still in Java
1362 //
1363 debug_only(NoHandleMark nhm;)
1364 {
1365 // Enter VM mode
1366 ResetNoHandleMark rnhm;
1367 patch_code(current, load_klass_patching_id);
1418 int Runtime1::access_field_patching(JavaThread* current) {
1419 //
1420 // NOTE: we are still in Java
1421 //
1422 // Handles created in this function will be deleted by the
1423 // HandleMarkCleaner in the transition to the VM.
1424 NoHandleMark nhm;
1425 {
1426 // Enter VM mode
1427 ResetNoHandleMark rnhm;
1428 patch_code(current, access_field_patching_id);
1429 }
1430 // Back in JAVA, use no oops DON'T safepoint
1431
1432 // Return true if calling code is deoptimized
1433
1434 return caller_is_deopted(current);
1435 }
1436
1437
1438 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
1439 // for now we just print out the block id
1440 tty->print("%d ", block_id);
1441 JRT_END
1442
1443
1444 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1445 // had to return int instead of bool, otherwise there may be a mismatch
1446 // between the C calling convention and the Java one.
1447 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1448 // JVM takes the whole %eax as the return value, which may misinterpret
1449 // the return value as a boolean true.
1450
1451 assert(mirror != nullptr, "should null-check on mirror before calling");
1452 Klass* k = java_lang_Class::as_Klass(mirror);
1453 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1454 JRT_END
1455
1456 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1457 ResourceMark rm;
1458
1459 RegisterMap reg_map(current,
1460 RegisterMap::UpdateMap::skip,
1461 RegisterMap::ProcessFrames::include,
1462 RegisterMap::WalkContinuation::skip);
1463 frame runtime_frame = current->last_frame();
1464 frame caller_frame = runtime_frame.sender(®_map);
1465
1466 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1467 assert (nm != nullptr, "no more nmethod?");
1468 nm->make_not_entrant();
1469
1470 methodHandle m(current, nm->method());
1471 MethodData* mdo = m->method_data();
1472
1473 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1474 // Build an MDO. Ignore errors like OutOfMemory;
1475 // that simply means we won't have an MDO to update.
1476 Method::build_profiling_method_data(m, THREAD);
1496 }
1497
1498
1499 Deoptimization::deoptimize_frame(current, caller_frame.id());
1500
1501 JRT_END
1502
1503 // Check exception if AbortVMOnException flag set
1504 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1505 ResourceMark rm;
1506 const char* message = nullptr;
1507 if (ex->is_a(vmClasses::Throwable_klass())) {
1508 oop msg = java_lang_Throwable::message(ex);
1509 if (msg != nullptr) {
1510 message = java_lang_String::as_utf8_string(msg);
1511 }
1512 }
1513 Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1514 JRT_END
1515
1516 #ifndef PRODUCT
1517 void Runtime1::print_statistics() {
1518 tty->print_cr("C1 Runtime statistics:");
1519 tty->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr);
1520 tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1521 tty->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr);
1522 tty->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr);
1523 tty->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr);
1524 tty->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt);
1525 tty->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt);
1526 tty->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt);
1527 tty->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt);
1528 tty->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt);
1529 tty->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt);
1530 tty->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt);
1531 tty->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt);
1532 tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1533
1534 tty->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt);
1535 tty->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt);
1536 tty->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt);
1537 tty->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt);
1538 tty->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt);
1539 tty->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt);
1540 tty->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt);
1541
1542 tty->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count);
1543 tty->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count);
1544 tty->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count);
1545 tty->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count);
1546 tty->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count);
1547 tty->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count);
1548 tty->print_cr(" _throw_count: %u:", _throw_count);
1549
1550 SharedRuntime::print_ic_miss_histogram();
1551 tty->cr();
1552 }
1553 #endif // PRODUCT
|
1 /*
2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/codeBuffer.hpp"
27 #include "c1/c1_CodeStubs.hpp"
28 #include "c1/c1_Defs.hpp"
29 #include "c1/c1_FrameMap.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "classfile/javaClasses.inline.hpp"
34 #include "classfile/vmClasses.hpp"
35 #include "classfile/vmSymbols.hpp"
36 #include "code/codeBlob.hpp"
37 #include "code/compiledIC.hpp"
38 #include "code/pcDesc.hpp"
39 #include "code/scopeDesc.hpp"
40 #include "code/vtableStubs.hpp"
41 #include "compiler/compilationPolicy.hpp"
42 #include "compiler/compilerDefinitions.inline.hpp"
43 #include "compiler/disassembler.hpp"
44 #include "compiler/oopMap.hpp"
45 #include "gc/shared/barrierSet.hpp"
46 #include "gc/shared/c1/barrierSetC1.hpp"
47 #include "gc/shared/collectedHeap.hpp"
48 #include "interpreter/bytecode.hpp"
49 #include "interpreter/interpreter.hpp"
50 #include "jfr/support/jfrIntrinsics.hpp"
51 #include "logging/log.hpp"
52 #include "memory/allocation.inline.hpp"
53 #include "memory/oopFactory.hpp"
54 #include "memory/resourceArea.hpp"
55 #include "memory/universe.hpp"
56 #include "oops/access.inline.hpp"
57 #include "oops/klass.inline.hpp"
58 #include "oops/objArrayOop.inline.hpp"
59 #include "oops/objArrayKlass.hpp"
60 #include "oops/oop.inline.hpp"
61 #include "prims/jvmtiExport.hpp"
62 #include "runtime/atomic.hpp"
63 #include "runtime/fieldDescriptor.inline.hpp"
64 #include "runtime/frame.inline.hpp"
65 #include "runtime/handles.inline.hpp"
66 #include "runtime/interfaceSupport.inline.hpp"
67 #include "runtime/javaCalls.hpp"
68 #include "runtime/perfData.inline.hpp"
69 #include "runtime/sharedRuntime.hpp"
70 #include "runtime/stackWatermarkSet.hpp"
71 #include "runtime/stubRoutines.hpp"
72 #include "runtime/threadCritical.hpp"
73 #include "runtime/vframe.inline.hpp"
74 #include "runtime/vframeArray.hpp"
75 #include "runtime/vm_version.hpp"
76 #include "services/management.hpp"
77 #include "utilities/copy.hpp"
78 #include "utilities/events.hpp"
79
80
81 // Implementation of StubAssembler
82
83 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
84 _name = name;
85 _must_gc_arguments = false;
86 _frame_size = no_frame_size;
87 _num_rt_args = 0;
88 _stub_id = stub_id;
89 }
90
91
92 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
93 _name = name;
94 _must_gc_arguments = must_gc_arguments;
95 }
96
248 switch (id) {
249 // These stubs don't need to have an oopmap
250 case dtrace_object_alloc_id:
251 case slow_subtype_check_id:
252 case fpu2long_stub_id:
253 case unwind_exception_id:
254 case counter_overflow_id:
255 expect_oop_map = false;
256 break;
257 default:
258 break;
259 }
260 #endif
261 StubIDStubAssemblerCodeGenClosure cl(id);
262 CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
263 // install blob
264 _blobs[id] = blob;
265 }
266
267 void Runtime1::initialize(BufferBlob* blob) {
268 init_counters();
269 // platform-dependent initialization
270 initialize_pd();
271 // generate stubs
272 for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
273 // printing
274 #ifndef PRODUCT
275 if (PrintSimpleStubs) {
276 ResourceMark rm;
277 for (int id = 0; id < number_of_ids; id++) {
278 _blobs[id]->print();
279 if (_blobs[id]->oop_maps() != nullptr) {
280 _blobs[id]->oop_maps()->print();
281 }
282 }
283 }
284 #endif
285 BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
286 bs->generate_c1_runtime_stubs(blob);
287 }
288
336 FUNCTION_CASE(entry, JfrTime::time_function());
337 #endif
338 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
339 FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32C());
340 FUNCTION_CASE(entry, StubRoutines::vectorizedMismatch());
341 FUNCTION_CASE(entry, StubRoutines::dexp());
342 FUNCTION_CASE(entry, StubRoutines::dlog());
343 FUNCTION_CASE(entry, StubRoutines::dlog10());
344 FUNCTION_CASE(entry, StubRoutines::dpow());
345 FUNCTION_CASE(entry, StubRoutines::dsin());
346 FUNCTION_CASE(entry, StubRoutines::dcos());
347 FUNCTION_CASE(entry, StubRoutines::dtan());
348
349 #undef FUNCTION_CASE
350
351 // Soft float adds more runtime names.
352 return pd_name_for_address(entry);
353 }
354
355
356 JRT_ENTRY_PROF(void, Runtime1, new_instance, Runtime1::new_instance(JavaThread* current, Klass* klass))
357 #ifndef PRODUCT
358 if (PrintC1Statistics) {
359 _new_instance_slowcase_cnt++;
360 }
361 #endif
362 assert(klass->is_klass(), "not a class");
363 Handle holder(current, klass->klass_holder()); // keep the klass alive
364 InstanceKlass* h = InstanceKlass::cast(klass);
365 h->check_valid_for_instantiation(true, CHECK);
366 // make sure klass is initialized
367 h->initialize(CHECK);
368 // allocate instance and return via TLS
369 oop obj = h->allocate_instance(CHECK);
370 current->set_vm_result(obj);
371 JRT_END
372
373
374 JRT_ENTRY_PROF(void, Runtime1, new_type_array, Runtime1::new_type_array(JavaThread* current, Klass* klass, jint length))
375 #ifndef PRODUCT
376 if (PrintC1Statistics) {
377 _new_type_array_slowcase_cnt++;
378 }
379 #endif
380 // Note: no handle for klass needed since they are not used
381 // anymore after new_typeArray() and no GC can happen before.
382 // (This may have to change if this code changes!)
383 assert(klass->is_klass(), "not a class");
384 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
385 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
386 current->set_vm_result(obj);
387 // This is pretty rare but this runtime patch is stressful to deoptimization
388 // if we deoptimize here so force a deopt to stress the path.
389 if (DeoptimizeALot) {
390 deopt_caller(current);
391 }
392
393 JRT_END
394
395
396 JRT_ENTRY_PROF(void, Runtime1, new_object_array, Runtime1::new_object_array(JavaThread* current, Klass* array_klass, jint length))
397 #ifndef PRODUCT
398 if (PrintC1Statistics) {
399 _new_object_array_slowcase_cnt++;
400 }
401 #endif
402 // Note: no handle for klass needed since they are not used
403 // anymore after new_objArray() and no GC can happen before.
404 // (This may have to change if this code changes!)
405 assert(array_klass->is_klass(), "not a class");
406 Handle holder(current, array_klass->klass_holder()); // keep the klass alive
407 Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
408 objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
409 current->set_vm_result(obj);
410 // This is pretty rare but this runtime patch is stressful to deoptimization
411 // if we deoptimize here so force a deopt to stress the path.
412 if (DeoptimizeALot) {
413 deopt_caller(current);
414 }
415 JRT_END
416
417
418 JRT_ENTRY_PROF(void, Runtime1, new_multi_array, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int rank, jint* dims))
419 #ifndef PRODUCT
420 if (PrintC1Statistics) {
421 _new_multi_array_slowcase_cnt++;
422 }
423 #endif
424 assert(klass->is_klass(), "not a class");
425 assert(rank >= 1, "rank must be nonzero");
426 Handle holder(current, klass->klass_holder()); // keep the klass alive
427 oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
428 current->set_vm_result(obj);
429 JRT_END
430
431
432 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
433 tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
434 JRT_END
435
436
437 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* current, oopDesc* obj))
438 ResourceMark rm(current);
469 case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
470 case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
471 case Bytecodes::_if_icmple: case Bytecodes::_ifle:
472 case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
473 case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
474 case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
475 case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
476 offset = (int16_t)Bytes::get_Java_u2(pc + 1);
477 break;
478 case Bytecodes::_goto_w:
479 offset = Bytes::get_Java_u4(pc + 1);
480 break;
481 default: ;
482 }
483 bci = branch_bci + offset;
484 }
485 osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, current);
486 return osr_nm;
487 }
488
489 JRT_BLOCK_ENTRY_PROF(address, Runtime1, counter_overflow, Runtime1::counter_overflow(JavaThread* current, int bci, Method* method))
490 nmethod* osr_nm;
491 JRT_BLOCK
492 osr_nm = counter_overflow_helper(current, bci, method);
493 if (osr_nm != nullptr) {
494 RegisterMap map(current,
495 RegisterMap::UpdateMap::skip,
496 RegisterMap::ProcessFrames::include,
497 RegisterMap::WalkContinuation::skip);
498 frame fr = current->last_frame().sender(&map);
499 Deoptimization::deoptimize_frame(current, fr.id());
500 }
501 JRT_BLOCK_END
502 return nullptr;
503 JRT_END
504
505 extern void vm_exit(int code);
506
507 // Enter this method from compiled code handler below. This is where we transition
508 // to VM mode. This is done as a helper routine so that the method called directly
509 // from compiled code does not have to transition to VM. This allows the entry
510 // method to see if the nmethod that we have just looked up a handler for has
511 // been deoptimized while we were in the vm. This simplifies the assembly code
512 // cpu directories.
513 //
514 // We are entering here from exception stub (via the entry method below)
515 // If there is a compiled exception handler in this method, we will continue there;
516 // otherwise we will unwind the stack and continue at the caller of top frame method
517 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
518 // control the area where we can allow a safepoint. After we exit the safepoint area we can
519 // check to see if the handler we are going to return is now in a nmethod that has
520 // been deoptimized. If that is the case we return the deopt blob
521 // unpack_with_exception entry instead. This makes life for the exception blob easier
522 // because making that same check and diverting is painful from assembly language.
523 JRT_ENTRY_NO_ASYNC_PROF(static address, Runtime1, exception_handler_for_pc_helper, exception_handler_for_pc_helper(JavaThread* current, oopDesc* ex, address pc, nmethod*& nm))
524 // Reset method handle flag.
525 current->set_is_method_handle_return(false);
526
527 Handle exception(current, ex);
528
529 // This function is called when we are about to throw an exception. Therefore,
530 // we have to poll the stack watermark barrier to make sure that not yet safe
531 // stack frames are made safe before returning into them.
532 if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {
533 // The Runtime1::handle_exception_from_callee_id handler is invoked after the
534 // frame has been unwound. It instead builds its own stub frame, to call the
535 // runtime. But the throwing frame has already been unwound here.
536 StackWatermarkSet::after_unwind(current);
537 }
538
539 nm = CodeCache::find_nmethod(pc);
540 assert(nm != nullptr, "this is not an nmethod");
541 // Adjust the pc as needed/
542 if (nm->is_deopt_pc(pc)) {
543 RegisterMap map(current,
735 _throw_class_cast_exception_count++;
736 }
737 #endif
738 ResourceMark rm(current);
739 char* message = SharedRuntime::generate_class_cast_message(current, object->klass());
740 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_ClassCastException(), message);
741 JRT_END
742
743
744 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* current))
745 #ifndef PRODUCT
746 if (PrintC1Statistics) {
747 _throw_incompatible_class_change_error_count++;
748 }
749 #endif
750 ResourceMark rm(current);
751 SharedRuntime::throw_and_post_jvmti_exception(current, vmSymbols::java_lang_IncompatibleClassChangeError());
752 JRT_END
753
754
755 JRT_BLOCK_ENTRY_PROF(void, Runtime1, monitorenter, Runtime1::monitorenter(JavaThread* current, oopDesc* obj, BasicObjectLock* lock))
756 #ifndef PRODUCT
757 if (PrintC1Statistics) {
758 _monitorenter_slowcase_cnt++;
759 }
760 #endif
761 if (LockingMode == LM_MONITOR) {
762 lock->set_obj(obj);
763 }
764 assert(LockingMode == LM_LIGHTWEIGHT || obj == lock->obj(), "must match");
765 SharedRuntime::monitor_enter_helper(obj, LockingMode == LM_LIGHTWEIGHT ? nullptr : lock->lock(), current);
766 JRT_END
767
768
769 JRT_LEAF_PROF(void, Runtime1, monitorexit, Runtime1::monitorexit(JavaThread* current, BasicObjectLock* lock))
770 assert(current == JavaThread::current(), "pre-condition");
771 #ifndef PRODUCT
772 if (PrintC1Statistics) {
773 _monitorexit_slowcase_cnt++;
774 }
775 #endif
776 assert(current->last_Java_sp(), "last_Java_sp must be set");
777 oop obj = lock->obj();
778 assert(oopDesc::is_oop(obj), "must be null or an object");
779 SharedRuntime::monitor_exit_helper(obj, lock->lock(), current);
780 JRT_END
781
782 // Cf. OptoRuntime::deoptimize_caller_frame
783 JRT_ENTRY_PROF(void, Runtime1, deoptimize, Runtime1::deoptimize(JavaThread* current, jint trap_request))
784 // Called from within the owner thread, so no need for safepoint
785 RegisterMap reg_map(current,
786 RegisterMap::UpdateMap::skip,
787 RegisterMap::ProcessFrames::include,
788 RegisterMap::WalkContinuation::skip);
789 frame stub_frame = current->last_frame();
790 assert(stub_frame.is_runtime_frame(), "Sanity check");
791 frame caller_frame = stub_frame.sender(®_map);
792 nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
793 assert(nm != nullptr, "Sanity check");
794 methodHandle method(current, nm->method());
795 assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
796 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
797 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
798
799 if (action == Deoptimization::Action_make_not_entrant) {
800 if (nm->make_not_entrant()) {
801 if (reason == Deoptimization::Reason_tenured) {
802 MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
803 if (trap_mdo != nullptr) {
806 }
807 }
808 }
809
810 // Deoptimize the caller frame.
811 Deoptimization::deoptimize_frame(current, caller_frame.id());
812 // Return to the now deoptimized frame.
813 JRT_END
814
815
816 #ifndef DEOPTIMIZE_WHEN_PATCHING
817
818 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
819 Bytecode_field field_access(caller, bci);
820 // This can be static or non-static field access
821 Bytecodes::Code code = field_access.code();
822
823 // We must load class, initialize class and resolve the field
824 fieldDescriptor result; // initialize class if needed
825 constantPoolHandle constants(THREAD, caller->constants());
826 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller,
827 Bytecodes::java_code(code), true /*initialize_class*/, CHECK_NULL);
828 return result.field_holder();
829 }
830
831
832 //
833 // This routine patches sites where a class wasn't loaded or
834 // initialized at the time the code was generated. It handles
835 // references to classes, fields and forcing of initialization. Most
836 // of the cases are straightforward and involving simply forcing
837 // resolution of a class, rewriting the instruction stream with the
838 // needed constant and replacing the call in this function with the
839 // patched code. The case for static field is more complicated since
840 // the thread which is in the process of initializing a class can
841 // access it's static fields but other threads can't so the code
842 // either has to deoptimize when this case is detected or execute a
843 // check that the current thread is the initializing thread. The
844 // current
845 //
846 // Patches basically look like this:
847 //
910 // always end up with a correct outcome. This is easiest if there are
911 // few or no intermediate states. (Some inline caches have two
912 // related instructions that must be patched in tandem. For those,
913 // intermediate states seem to be unavoidable, but we will get the
914 // right answer from all possible observation orders.)
915 //
916 // When patching the entry instruction at the head of a method, or a
917 // linkable call instruction inside of a method, we try very hard to
918 // use a patch sequence which executes as a single memory transaction.
919 // This means, in practice, that when thread A patches an instruction,
920 // it should patch a 32-bit or 64-bit word that somehow overlaps the
921 // instruction or is contained in it. We believe that memory hardware
922 // will never break up such a word write, if it is naturally aligned
923 // for the word being written. We also know that some CPUs work very
924 // hard to create atomic updates even of naturally unaligned words,
925 // but we don't want to bet the farm on this always working.
926 //
927 // Therefore, if there is any chance of a race condition, we try to
928 // patch only naturally aligned words, as single, full-word writes.
929
930 JRT_ENTRY_PROF(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id))
931 #ifndef PRODUCT
932 if (PrintC1Statistics) {
933 _patch_code_slowcase_cnt++;
934 }
935 #endif
936
937 ResourceMark rm(current);
938 RegisterMap reg_map(current,
939 RegisterMap::UpdateMap::skip,
940 RegisterMap::ProcessFrames::include,
941 RegisterMap::WalkContinuation::skip);
942 frame runtime_frame = current->last_frame();
943 frame caller_frame = runtime_frame.sender(®_map);
944
945 // last java frame on stack
946 vframeStream vfst(current, true);
947 assert(!vfst.at_end(), "Java frame must exist");
948
949 methodHandle caller_method(current, vfst.method());
950 // Note that caller_method->code() may not be same as caller_code because of OSR's
955 Bytecodes::Code code = caller_method()->java_code_at(bci);
956
957 // this is used by assertions in the access_field_patching_id
958 BasicType patch_field_type = T_ILLEGAL;
959 bool deoptimize_for_volatile = false;
960 bool deoptimize_for_atomic = false;
961 int patch_field_offset = -1;
962 Klass* init_klass = nullptr; // klass needed by load_klass_patching code
963 Klass* load_klass = nullptr; // klass needed by load_klass_patching code
964 Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
965 Handle appendix(current, nullptr); // oop needed by appendix_patching code
966 bool load_klass_or_mirror_patch_id =
967 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
968
969 if (stub_id == Runtime1::access_field_patching_id) {
970
971 Bytecode_field field_access(caller_method, bci);
972 fieldDescriptor result; // initialize class if needed
973 Bytecodes::Code code = field_access.code();
974 constantPoolHandle constants(current, caller_method->constants());
975 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method,
976 Bytecodes::java_code(code), true /*initialize_class*/, CHECK);
977 patch_field_offset = result.offset();
978
979 // If we're patching a field which is volatile then at compile it
980 // must not have been know to be volatile, so the generated code
981 // isn't correct for a volatile reference. The nmethod has to be
982 // deoptimized so that the code can be regenerated correctly.
983 // This check is only needed for access_field_patching since this
984 // is the path for patching field offsets. load_klass is only
985 // used for patching references to oops which don't need special
986 // handling in the volatile case.
987
988 deoptimize_for_volatile = result.access_flags().is_volatile();
989
990 // If we are patching a field which should be atomic, then
991 // the generated code is not correct either, force deoptimizing.
992 // We need to only cover T_LONG and T_DOUBLE fields, as we can
993 // break access atomicity only for them.
994
995 // Strictly speaking, the deoptimization on 64-bit platforms
996 // is unnecessary, and T_LONG stores on 32-bit platforms need
1298 switch (code) {
1299 case Bytecodes::_new:
1300 case Bytecodes::_anewarray:
1301 case Bytecodes::_multianewarray:
1302 case Bytecodes::_instanceof:
1303 case Bytecodes::_checkcast: {
1304 Bytecode bc(caller_method(), caller_method->bcp_from(bci));
1305 constantTag tag = caller_method->constants()->tag_at(bc.get_index_u2(code));
1306 if (tag.is_unresolved_klass_in_error()) {
1307 return false; // throws resolution error
1308 }
1309 break;
1310 }
1311
1312 default: break;
1313 }
1314 }
1315 return true;
1316 }
1317
1318 PROF_ENTRY(void, Runtime1, patch_code, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id))
1319 #ifndef PRODUCT
1320 if (PrintC1Statistics) {
1321 _patch_code_slowcase_cnt++;
1322 }
1323 #endif
1324
1325 // Enable WXWrite: the function is called by c1 stub as a runtime function
1326 // (see another implementation above).
1327 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1328
1329 if (TracePatching) {
1330 tty->print_cr("Deoptimizing because patch is needed");
1331 }
1332
1333 RegisterMap reg_map(current,
1334 RegisterMap::UpdateMap::skip,
1335 RegisterMap::ProcessFrames::include,
1336 RegisterMap::WalkContinuation::skip);
1337
1338 frame runtime_frame = current->last_frame();
1339 frame caller_frame = runtime_frame.sender(®_map);
1340 assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1341
1342 if (is_patching_needed(current, stub_id)) {
1343 // Make sure the nmethod is invalidated, i.e. made not entrant.
1344 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1345 if (nm != nullptr) {
1346 nm->make_not_entrant();
1347 }
1348 }
1349
1350 Deoptimization::deoptimize_frame(current, caller_frame.id());
1351 // Return to the now deoptimized frame.
1352 postcond(caller_is_deopted(current));
1353 PROF_END
1354
1355 #endif // DEOPTIMIZE_WHEN_PATCHING
1356
1357 // Entry point for compiled code. We want to patch a nmethod.
1358 // We don't do a normal VM transition here because we want to
1359 // know after the patching is complete and any safepoint(s) are taken
1360 // if the calling nmethod was deoptimized. We do this by calling a
1361 // helper method which does the normal VM transition and when it
1362 // completes we can check for deoptimization. This simplifies the
1363 // assembly code in the cpu directories.
1364 //
1365 int Runtime1::move_klass_patching(JavaThread* current) {
1366 //
1367 // NOTE: we are still in Java
1368 //
1369 debug_only(NoHandleMark nhm;)
1370 {
1371 // Enter VM mode
1372 ResetNoHandleMark rnhm;
1373 patch_code(current, load_klass_patching_id);
1424 int Runtime1::access_field_patching(JavaThread* current) {
1425 //
1426 // NOTE: we are still in Java
1427 //
1428 // Handles created in this function will be deleted by the
1429 // HandleMarkCleaner in the transition to the VM.
1430 NoHandleMark nhm;
1431 {
1432 // Enter VM mode
1433 ResetNoHandleMark rnhm;
1434 patch_code(current, access_field_patching_id);
1435 }
1436 // Back in JAVA, use no oops DON'T safepoint
1437
1438 // Return true if calling code is deoptimized
1439
1440 return caller_is_deopted(current);
1441 }
1442
1443
1444 JRT_LEAF_PROF_NO_THREAD(void, Runtime1, trace_block_entry, Runtime1::trace_block_entry(jint block_id))
1445 // for now we just print out the block id
1446 tty->print("%d ", block_id);
1447 JRT_END
1448
1449
1450 JRT_LEAF_PROF_NO_THREAD(int, Runtime1, is_instance_of, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
1451 // had to return int instead of bool, otherwise there may be a mismatch
1452 // between the C calling convention and the Java one.
1453 // e.g., on x86, GCC may clear only %al when returning a bool false, but
1454 // JVM takes the whole %eax as the return value, which may misinterpret
1455 // the return value as a boolean true.
1456
1457 assert(mirror != nullptr, "should null-check on mirror before calling");
1458 Klass* k = java_lang_Class::as_Klass(mirror);
1459 return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1460 JRT_END
1461
1462 JRT_ENTRY_PROF(void, Runtime1, predicate_failed_trap, Runtime1::predicate_failed_trap(JavaThread* current))
1463 ResourceMark rm;
1464
1465 RegisterMap reg_map(current,
1466 RegisterMap::UpdateMap::skip,
1467 RegisterMap::ProcessFrames::include,
1468 RegisterMap::WalkContinuation::skip);
1469 frame runtime_frame = current->last_frame();
1470 frame caller_frame = runtime_frame.sender(®_map);
1471
1472 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1473 assert (nm != nullptr, "no more nmethod?");
1474 nm->make_not_entrant();
1475
1476 methodHandle m(current, nm->method());
1477 MethodData* mdo = m->method_data();
1478
1479 if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1480 // Build an MDO. Ignore errors like OutOfMemory;
1481 // that simply means we won't have an MDO to update.
1482 Method::build_profiling_method_data(m, THREAD);
1502 }
1503
1504
1505 Deoptimization::deoptimize_frame(current, caller_frame.id());
1506
1507 JRT_END
1508
1509 // Check exception if AbortVMOnException flag set
1510 JRT_LEAF(void, Runtime1::check_abort_on_vm_exception(oopDesc* ex))
1511 ResourceMark rm;
1512 const char* message = nullptr;
1513 if (ex->is_a(vmClasses::Throwable_klass())) {
1514 oop msg = java_lang_Throwable::message(ex);
1515 if (msg != nullptr) {
1516 message = java_lang_String::as_utf8_string(msg);
1517 }
1518 }
1519 Exceptions::debug_check_abort(ex->klass()->external_name(), message);
1520 JRT_END
1521
1522 #define DO_COUNTERS(macro) \
1523 macro(Runtime1, new_instance) \
1524 macro(Runtime1, new_type_array) \
1525 macro(Runtime1, new_object_array) \
1526 macro(Runtime1, new_multi_array) \
1527 macro(Runtime1, counter_overflow) \
1528 macro(Runtime1, exception_handler_for_pc_helper) \
1529 macro(Runtime1, monitorenter) \
1530 macro(Runtime1, monitorexit) \
1531 macro(Runtime1, deoptimize) \
1532 macro(Runtime1, is_instance_of) \
1533 macro(Runtime1, predicate_failed_trap) \
1534 macro(Runtime1, patch_code)
1535
1536 #define INIT_COUNTER(sub, name) \
1537 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_CI, #sub "::" #name); \
1538 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_CI, #sub "::" #name "_count");
1539
1540 void Runtime1::init_counters() {
1541 assert(CompilerConfig::is_c1_enabled(), "");
1542
1543 if (UsePerfData) {
1544 EXCEPTION_MARK;
1545
1546 DO_COUNTERS(INIT_COUNTER)
1547
1548 if (HAS_PENDING_EXCEPTION) {
1549 vm_exit_during_initialization("Runtime1::init_counters() failed unexpectedly");
1550 }
1551 }
1552 }
1553 #undef INIT_COUNTER
1554
1555 #define PRINT_COUNTER(sub, name) { \
1556 if (_perf_##sub##_##name##_count != nullptr) { \
1557 jlong count = _perf_##sub##_##name##_count->get_value(); \
1558 if (count > 0) { \
1559 st->print_cr(" %-30s = %4ldms (elapsed) %4ldms (thread) (%5ld events)", #sub "::" #name, \
1560 _perf_##sub##_##name##_timer->elapsed_counter_value_ms(), \
1561 _perf_##sub##_##name##_timer->thread_counter_value_ms(), \
1562 count); \
1563 }}}
1564
1565
1566 void Runtime1::print_counters_on(outputStream* st) {
1567 if (UsePerfData && ProfileRuntimeCalls && CompilerConfig::is_c1_enabled()) {
1568 DO_COUNTERS(PRINT_COUNTER)
1569 } else {
1570 st->print_cr(" Runtime1: no info (%s is disabled)",
1571 (!CompilerConfig::is_c1_enabled() ? "C1" : (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData")));
1572 }
1573 }
1574
1575 #undef PRINT_COUNTER
1576 #undef DO_COUNTERS
1577
1578 #ifndef PRODUCT
1579 void Runtime1::print_statistics_on(outputStream* st) {
1580 st->print_cr("C1 Runtime statistics:");
1581 st->print_cr(" _resolve_invoke_virtual_cnt: %u", SharedRuntime::_resolve_virtual_ctr);
1582 st->print_cr(" _resolve_invoke_opt_virtual_cnt: %u", SharedRuntime::_resolve_opt_virtual_ctr);
1583 st->print_cr(" _resolve_invoke_static_cnt: %u", SharedRuntime::_resolve_static_ctr);
1584 st->print_cr(" _handle_wrong_method_cnt: %u", SharedRuntime::_wrong_method_ctr);
1585 st->print_cr(" _ic_miss_cnt: %u", SharedRuntime::_ic_miss_ctr);
1586 st->print_cr(" _generic_arraycopystub_cnt: %u", _generic_arraycopystub_cnt);
1587 st->print_cr(" _byte_arraycopy_cnt: %u", _byte_arraycopy_stub_cnt);
1588 st->print_cr(" _short_arraycopy_cnt: %u", _short_arraycopy_stub_cnt);
1589 st->print_cr(" _int_arraycopy_cnt: %u", _int_arraycopy_stub_cnt);
1590 st->print_cr(" _long_arraycopy_cnt: %u", _long_arraycopy_stub_cnt);
1591 st->print_cr(" _oop_arraycopy_cnt: %u", _oop_arraycopy_stub_cnt);
1592 st->print_cr(" _arraycopy_slowcase_cnt: %u", _arraycopy_slowcase_cnt);
1593 st->print_cr(" _arraycopy_checkcast_cnt: %u", _arraycopy_checkcast_cnt);
1594 st->print_cr(" _arraycopy_checkcast_attempt_cnt:%u", _arraycopy_checkcast_attempt_cnt);
1595
1596 st->print_cr(" _new_type_array_slowcase_cnt: %u", _new_type_array_slowcase_cnt);
1597 st->print_cr(" _new_object_array_slowcase_cnt: %u", _new_object_array_slowcase_cnt);
1598 st->print_cr(" _new_instance_slowcase_cnt: %u", _new_instance_slowcase_cnt);
1599 st->print_cr(" _new_multi_array_slowcase_cnt: %u", _new_multi_array_slowcase_cnt);
1600 st->print_cr(" _monitorenter_slowcase_cnt: %u", _monitorenter_slowcase_cnt);
1601 st->print_cr(" _monitorexit_slowcase_cnt: %u", _monitorexit_slowcase_cnt);
1602 st->print_cr(" _patch_code_slowcase_cnt: %u", _patch_code_slowcase_cnt);
1603
1604 st->print_cr(" _throw_range_check_exception_count: %u:", _throw_range_check_exception_count);
1605 st->print_cr(" _throw_index_exception_count: %u:", _throw_index_exception_count);
1606 st->print_cr(" _throw_div0_exception_count: %u:", _throw_div0_exception_count);
1607 st->print_cr(" _throw_null_pointer_exception_count: %u:", _throw_null_pointer_exception_count);
1608 st->print_cr(" _throw_class_cast_exception_count: %u:", _throw_class_cast_exception_count);
1609 st->print_cr(" _throw_incompatible_class_change_error_count: %u:", _throw_incompatible_class_change_error_count);
1610 st->print_cr(" _throw_count: %u:", _throw_count);
1611
1612 SharedRuntime::print_ic_miss_histogram_on(st);
1613 st->cr();
1614 }
1615 #endif // PRODUCT
|