< prev index next >

src/hotspot/share/c1/c1_Runtime1.cpp

Print this page

 801 JRT_END
 802 
 803 // Cf. OptoRuntime::deoptimize_caller_frame
 804 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 805   // Called from within the owner thread, so no need for safepoint
 806   RegisterMap reg_map(current,
 807                       RegisterMap::UpdateMap::skip,
 808                       RegisterMap::ProcessFrames::include,
 809                       RegisterMap::WalkContinuation::skip);
 810   frame stub_frame = current->last_frame();
 811   assert(stub_frame.is_runtime_frame(), "Sanity check");
 812   frame caller_frame = stub_frame.sender(&reg_map);
 813   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 814   assert(nm != nullptr, "Sanity check");
 815   methodHandle method(current, nm->method());
 816   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 817   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 818   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 819 
 820   if (action == Deoptimization::Action_make_not_entrant) {
 821     if (nm->make_not_entrant("C1 deoptimize")) {
 822       if (reason == Deoptimization::Reason_tenured) {
 823         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 824         if (trap_mdo != nullptr) {
 825           trap_mdo->inc_tenure_traps();
 826         }
 827       }
 828     }
 829   }
 830 
 831   // Deoptimize the caller frame.
 832   Deoptimization::deoptimize_frame(current, caller_frame.id());
 833   // Return to the now deoptimized frame.
 834 JRT_END
 835 
 836 
 837 #ifndef DEOPTIMIZE_WHEN_PATCHING
 838 
 839 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 840   Bytecode_field field_access(caller, bci);
 841   // This can be static or non-static field access

1093     ShouldNotReachHere();
1094   }
1095 
1096   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1097     // At compile time we assumed the field wasn't volatile/atomic but after
1098     // loading it turns out it was volatile/atomic so we have to throw the
1099     // compiled code out and let it be regenerated.
1100     if (TracePatching) {
1101       if (deoptimize_for_volatile) {
1102         tty->print_cr("Deoptimizing for patching volatile field reference");
1103       }
1104       if (deoptimize_for_atomic) {
1105         tty->print_cr("Deoptimizing for patching atomic field reference");
1106       }
1107     }
1108 
1109     // It's possible the nmethod was invalidated in the last
1110     // safepoint, but if it's still alive then make it not_entrant.
1111     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1112     if (nm != nullptr) {
1113       nm->make_not_entrant("C1 code patch");
1114     }
1115 
1116     Deoptimization::deoptimize_frame(current, caller_frame.id());
1117 
1118     // Return to the now deoptimized frame.
1119   }
1120 
1121   // Now copy code back
1122 
1123   {
1124     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1125     //
1126     // Deoptimization may have happened while we waited for the lock.
1127     // In that case we don't bother to do any patching we just return
1128     // and let the deopt happen
1129     if (!caller_is_deopted(current)) {
1130       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1131       address instr_pc = jump->jump_destination();
1132       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1133       if (ni->is_jump() ) {

1341   // (see another implementation above).
1342   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1343 
1344   if (TracePatching) {
1345     tty->print_cr("Deoptimizing because patch is needed");
1346   }
1347 
1348   RegisterMap reg_map(current,
1349                       RegisterMap::UpdateMap::skip,
1350                       RegisterMap::ProcessFrames::include,
1351                       RegisterMap::WalkContinuation::skip);
1352 
1353   frame runtime_frame = current->last_frame();
1354   frame caller_frame = runtime_frame.sender(&reg_map);
1355   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1356 
1357   if (is_patching_needed(current, stub_id)) {
1358     // Make sure the nmethod is invalidated, i.e. made not entrant.
1359     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1360     if (nm != nullptr) {
1361       nm->make_not_entrant("C1 deoptimize for patching");
1362     }
1363   }
1364 
1365   Deoptimization::deoptimize_frame(current, caller_frame.id());
1366   // Return to the now deoptimized frame.
1367   postcond(caller_is_deopted(current));
1368 }
1369 
1370 #endif // DEOPTIMIZE_WHEN_PATCHING
1371 
1372 // Entry point for compiled code. We want to patch a nmethod.
1373 // We don't do a normal VM transition here because we want to
1374 // know after the patching is complete and any safepoint(s) are taken
1375 // if the calling nmethod was deoptimized. We do this by calling a
1376 // helper method which does the normal VM transition and when it
1377 // completes we can check for deoptimization. This simplifies the
1378 // assembly code in the cpu directories.
1379 //
1380 int Runtime1::move_klass_patching(JavaThread* current) {
1381 //

1469   // JVM takes the whole %eax as the return value, which may misinterpret
1470   // the return value as a boolean true.
1471 
1472   assert(mirror != nullptr, "should null-check on mirror before calling");
1473   Klass* k = java_lang_Class::as_Klass(mirror);
1474   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1475 JRT_END
1476 
1477 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1478   ResourceMark rm;
1479 
1480   RegisterMap reg_map(current,
1481                       RegisterMap::UpdateMap::skip,
1482                       RegisterMap::ProcessFrames::include,
1483                       RegisterMap::WalkContinuation::skip);
1484   frame runtime_frame = current->last_frame();
1485   frame caller_frame = runtime_frame.sender(&reg_map);
1486 
1487   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1488   assert (nm != nullptr, "no more nmethod?");
1489   nm->make_not_entrant("C1 predicate failed trap");
1490 
1491   methodHandle m(current, nm->method());
1492   MethodData* mdo = m->method_data();
1493 
1494   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1495     // Build an MDO.  Ignore errors like OutOfMemory;
1496     // that simply means we won't have an MDO to update.
1497     Method::build_profiling_method_data(m, THREAD);
1498     if (HAS_PENDING_EXCEPTION) {
1499       // Only metaspace OOM is expected. No Java code executed.
1500       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1501       CLEAR_PENDING_EXCEPTION;
1502     }
1503     mdo = m->method_data();
1504   }
1505 
1506   if (mdo != nullptr) {
1507     mdo->inc_trap_count(Deoptimization::Reason_none);
1508   }
1509 

 801 JRT_END
 802 
 803 // Cf. OptoRuntime::deoptimize_caller_frame
 804 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
 805   // Called from within the owner thread, so no need for safepoint
 806   RegisterMap reg_map(current,
 807                       RegisterMap::UpdateMap::skip,
 808                       RegisterMap::ProcessFrames::include,
 809                       RegisterMap::WalkContinuation::skip);
 810   frame stub_frame = current->last_frame();
 811   assert(stub_frame.is_runtime_frame(), "Sanity check");
 812   frame caller_frame = stub_frame.sender(&reg_map);
 813   nmethod* nm = caller_frame.cb()->as_nmethod_or_null();
 814   assert(nm != nullptr, "Sanity check");
 815   methodHandle method(current, nm->method());
 816   assert(nm == CodeCache::find_nmethod(caller_frame.pc()), "Should be the same");
 817   Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request);
 818   Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
 819 
 820   if (action == Deoptimization::Action_make_not_entrant) {
 821     if (nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE)) {
 822       if (reason == Deoptimization::Reason_tenured) {
 823         MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
 824         if (trap_mdo != nullptr) {
 825           trap_mdo->inc_tenure_traps();
 826         }
 827       }
 828     }
 829   }
 830 
 831   // Deoptimize the caller frame.
 832   Deoptimization::deoptimize_frame(current, caller_frame.id());
 833   // Return to the now deoptimized frame.
 834 JRT_END
 835 
 836 
 837 #ifndef DEOPTIMIZE_WHEN_PATCHING
 838 
 839 static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TRAPS) {
 840   Bytecode_field field_access(caller, bci);
 841   // This can be static or non-static field access

1093     ShouldNotReachHere();
1094   }
1095 
1096   if (deoptimize_for_volatile || deoptimize_for_atomic) {
1097     // At compile time we assumed the field wasn't volatile/atomic but after
1098     // loading it turns out it was volatile/atomic so we have to throw the
1099     // compiled code out and let it be regenerated.
1100     if (TracePatching) {
1101       if (deoptimize_for_volatile) {
1102         tty->print_cr("Deoptimizing for patching volatile field reference");
1103       }
1104       if (deoptimize_for_atomic) {
1105         tty->print_cr("Deoptimizing for patching atomic field reference");
1106       }
1107     }
1108 
1109     // It's possible the nmethod was invalidated in the last
1110     // safepoint, but if it's still alive then make it not_entrant.
1111     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1112     if (nm != nullptr) {
1113       nm->make_not_entrant(nmethod::InvalidationReason::C1_CODEPATCH);
1114     }
1115 
1116     Deoptimization::deoptimize_frame(current, caller_frame.id());
1117 
1118     // Return to the now deoptimized frame.
1119   }
1120 
1121   // Now copy code back
1122 
1123   {
1124     MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1125     //
1126     // Deoptimization may have happened while we waited for the lock.
1127     // In that case we don't bother to do any patching we just return
1128     // and let the deopt happen
1129     if (!caller_is_deopted(current)) {
1130       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
1131       address instr_pc = jump->jump_destination();
1132       NativeInstruction* ni = nativeInstruction_at(instr_pc);
1133       if (ni->is_jump() ) {

1341   // (see another implementation above).
1342   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
1343 
1344   if (TracePatching) {
1345     tty->print_cr("Deoptimizing because patch is needed");
1346   }
1347 
1348   RegisterMap reg_map(current,
1349                       RegisterMap::UpdateMap::skip,
1350                       RegisterMap::ProcessFrames::include,
1351                       RegisterMap::WalkContinuation::skip);
1352 
1353   frame runtime_frame = current->last_frame();
1354   frame caller_frame = runtime_frame.sender(&reg_map);
1355   assert(caller_frame.is_compiled_frame(), "Wrong frame type");
1356 
1357   if (is_patching_needed(current, stub_id)) {
1358     // Make sure the nmethod is invalidated, i.e. made not entrant.
1359     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1360     if (nm != nullptr) {
1361       nm->make_not_entrant(nmethod::InvalidationReason::C1_DEOPTIMIZE_FOR_PATCHING);
1362     }
1363   }
1364 
1365   Deoptimization::deoptimize_frame(current, caller_frame.id());
1366   // Return to the now deoptimized frame.
1367   postcond(caller_is_deopted(current));
1368 }
1369 
1370 #endif // DEOPTIMIZE_WHEN_PATCHING
1371 
1372 // Entry point for compiled code. We want to patch a nmethod.
1373 // We don't do a normal VM transition here because we want to
1374 // know after the patching is complete and any safepoint(s) are taken
1375 // if the calling nmethod was deoptimized. We do this by calling a
1376 // helper method which does the normal VM transition and when it
1377 // completes we can check for deoptimization. This simplifies the
1378 // assembly code in the cpu directories.
1379 //
1380 int Runtime1::move_klass_patching(JavaThread* current) {
1381 //

1469   // JVM takes the whole %eax as the return value, which may misinterpret
1470   // the return value as a boolean true.
1471 
1472   assert(mirror != nullptr, "should null-check on mirror before calling");
1473   Klass* k = java_lang_Class::as_Klass(mirror);
1474   return (k != nullptr && obj != nullptr && obj->is_a(k)) ? 1 : 0;
1475 JRT_END
1476 
1477 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
1478   ResourceMark rm;
1479 
1480   RegisterMap reg_map(current,
1481                       RegisterMap::UpdateMap::skip,
1482                       RegisterMap::ProcessFrames::include,
1483                       RegisterMap::WalkContinuation::skip);
1484   frame runtime_frame = current->last_frame();
1485   frame caller_frame = runtime_frame.sender(&reg_map);
1486 
1487   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1488   assert (nm != nullptr, "no more nmethod?");
1489   nm->make_not_entrant(nmethod::InvalidationReason::C1_PREDICATE_FAILED_TRAP);
1490 
1491   methodHandle m(current, nm->method());
1492   MethodData* mdo = m->method_data();
1493 
1494   if (mdo == nullptr && !HAS_PENDING_EXCEPTION) {
1495     // Build an MDO.  Ignore errors like OutOfMemory;
1496     // that simply means we won't have an MDO to update.
1497     Method::build_profiling_method_data(m, THREAD);
1498     if (HAS_PENDING_EXCEPTION) {
1499       // Only metaspace OOM is expected. No Java code executed.
1500       assert((PENDING_EXCEPTION->is_a(vmClasses::OutOfMemoryError_klass())), "we expect only an OOM error here");
1501       CLEAR_PENDING_EXCEPTION;
1502     }
1503     mdo = m->method_data();
1504   }
1505 
1506   if (mdo != nullptr) {
1507     mdo->inc_trap_count(Deoptimization::Reason_none);
1508   }
1509 
< prev index next >