63 #include "prims/methodHandles.hpp"
64 #include "prims/vectorSupport.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/basicLock.inline.hpp"
67 #include "runtime/continuation.hpp"
68 #include "runtime/continuationEntry.inline.hpp"
69 #include "runtime/deoptimization.hpp"
70 #include "runtime/escapeBarrier.hpp"
71 #include "runtime/fieldDescriptor.hpp"
72 #include "runtime/fieldDescriptor.inline.hpp"
73 #include "runtime/frame.inline.hpp"
74 #include "runtime/handles.inline.hpp"
75 #include "runtime/interfaceSupport.inline.hpp"
76 #include "runtime/javaThread.hpp"
77 #include "runtime/jniHandles.inline.hpp"
78 #include "runtime/keepStackGCProcessed.hpp"
79 #include "runtime/lightweightSynchronizer.hpp"
80 #include "runtime/lockStack.inline.hpp"
81 #include "runtime/objectMonitor.inline.hpp"
82 #include "runtime/osThread.hpp"
83 #include "runtime/safepointVerifiers.hpp"
84 #include "runtime/sharedRuntime.hpp"
85 #include "runtime/signature.hpp"
86 #include "runtime/stackFrameStream.inline.hpp"
87 #include "runtime/stackValue.hpp"
88 #include "runtime/stackWatermarkSet.hpp"
89 #include "runtime/stubRoutines.hpp"
90 #include "runtime/synchronizer.inline.hpp"
91 #include "runtime/threadSMR.hpp"
92 #include "runtime/threadWXSetters.inline.hpp"
93 #include "runtime/vframe.hpp"
94 #include "runtime/vframeArray.hpp"
95 #include "runtime/vframe_hp.hpp"
96 #include "runtime/vmOperations.hpp"
97 #include "utilities/checkedCast.hpp"
98 #include "utilities/events.hpp"
99 #include "utilities/growableArray.hpp"
100 #include "utilities/macros.hpp"
101 #include "utilities/preserveException.hpp"
102 #include "utilities/xmlstream.hpp"
103 #if INCLUDE_JFR
104 #include "jfr/jfrEvents.hpp"
105 #include "jfr/metadata/jfrSerializer.hpp"
106 #endif
107
108 uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
109 uint64_t DeoptimizationScope::_active_deopt_gen = 1;
110 bool DeoptimizationScope::_committing_in_progress = false;
111
112 DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
113 DEBUG_ONLY(_deopted = false;)
114
115 MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
116 // If there is nothing to deopt _required_gen is the same as comitted.
264 return checked_cast<int>(result);
265 }
266
267 void Deoptimization::UnrollBlock::print() {
268 ResourceMark rm;
269 stringStream st;
270 st.print_cr("UnrollBlock");
271 st.print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
272 st.print( " frame_sizes: ");
273 for (int index = 0; index < number_of_frames(); index++) {
274 st.print(INTX_FORMAT " ", frame_sizes()[index]);
275 }
276 st.cr();
277 tty->print_raw(st.freeze());
278 }
279
280 // In order to make fetch_unroll_info work properly with escape
281 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY.
282 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
283 // which is called from the method fetch_unroll_info_helper below.
284 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
285 // fetch_unroll_info() is called at the beginning of the deoptimization
286 // handler. Note this fact before we start generating temporary frames
287 // that can confuse an asynchronous stack walker. This counter is
288 // decremented at the end of unpack_frames().
289 current->inc_in_deopt_handler();
290
291 if (exec_mode == Unpack_exception) {
292 // When we get here, a callee has thrown an exception into a deoptimized
293 // frame. That throw might have deferred stack watermark checking until
294 // after unwinding. So we deal with such deferred requests here.
295 StackWatermarkSet::after_unwind(current);
296 }
297
298 return fetch_unroll_info_helper(current, exec_mode);
299 JRT_END
300
301 #if COMPILER2_OR_JVMCI
302 // print information about reallocated objects
303 static void print_objects(JavaThread* deoptee_thread,
304 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
836
837 assert(f->is_interpreted_frame(), "must be interpreted");
838 }
839
840 #ifndef PRODUCT
841 static bool falls_through(Bytecodes::Code bc) {
842 switch (bc) {
843 // List may be incomplete. Here we really only care about bytecodes where compiled code
844 // can deoptimize.
845 case Bytecodes::_goto:
846 case Bytecodes::_goto_w:
847 case Bytecodes::_athrow:
848 return false;
849 default:
850 return true;
851 }
852 }
853 #endif
854
855 // Return BasicType of value being returned
856 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
857 assert(thread == JavaThread::current(), "pre-condition");
858
859 // We are already active in the special DeoptResourceMark any ResourceObj's we
860 // allocate will be freed at the end of the routine.
861
862 // JRT_LEAF methods don't normally allocate handles and there is a
863 // NoHandleMark to enforce that. It is actually safe to use Handles
864 // in a JRT_LEAF method, and sometimes desirable, but to do so we
865 // must use ResetNoHandleMark to bypass the NoHandleMark, and
866 // then use a HandleMark to ensure any Handles we do create are
867 // cleaned up in this scope.
868 ResetNoHandleMark rnhm;
869 HandleMark hm(thread);
870
871 frame stub_frame = thread->last_frame();
872
873 Continuation::notify_deopt(thread, stub_frame.sp());
874
875 // Since the frame to unpack is the top frame of this thread, the vframe_array_head
876 // must point to the vframeArray for the unpack frame.
1747 if (monitors != nullptr) {
1748 // Unlock in reverse order starting from most nested monitor.
1749 for (int j = (monitors->number_of_monitors() - 1); j >= 0; j--) {
1750 BasicObjectLock* src = monitors->at(j);
1751 if (src->obj() != nullptr) {
1752 ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
1753 }
1754 }
1755 array->element(i)->free_monitors();
1756 #ifdef ASSERT
1757 array->element(i)->set_removed_monitors();
1758 #endif
1759 }
1760 }
1761 }
1762 #endif
1763
1764 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1765 assert(fr.can_be_deoptimized(), "checking frame type");
1766
1767 gather_statistics(reason, Action_none, Bytecodes::_illegal);
1768
1769 if (LogCompilation && xtty != nullptr) {
1770 nmethod* nm = fr.cb()->as_nmethod_or_null();
1771 assert(nm != nullptr, "only compiled methods can deopt");
1772
1773 ttyLocker ttyl;
1774 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1775 nm->log_identity(xtty);
1776 xtty->end_head();
1777 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1778 xtty->begin_elem("jvms bci='%d'", sd->bci());
1779 xtty->method(sd->method());
1780 xtty->end_elem();
1781 if (sd->is_top()) break;
1782 }
1783 xtty->tail("deoptimized");
1784 }
1785
1786 Continuation::notify_deopt(thread, fr.sp());
1787
1788 // Patch the compiled method so that when execution returns to it we will
1789 // deopt the execution state and return to the interpreter.
1790 fr.deoptimize(thread);
1791 }
1792
2002 static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci,
2003 const char* reason_name, const char* reason_action) {
2004 LogTarget(Debug, deoptimization) lt;
2005 if (lt.is_enabled()) {
2006 LogStream ls(lt);
2007 bool is_osr = nm->is_osr_method();
2008 ls.print("cid=%4d %s level=%d",
2009 nm->compile_id(), (is_osr ? "osr" : " "), nm->comp_level());
2010 ls.print(" %s", tm->name_and_sig_as_C_string());
2011 ls.print(" trap_bci=%d ", trap_bci);
2012 if (is_osr) {
2013 ls.print("osr_bci=%d ", nm->osr_entry_bci());
2014 }
2015 ls.print("%s ", reason_name);
2016 ls.print("%s ", reason_action);
2017 ls.print_cr("pc=" INTPTR_FORMAT " relative_pc=" INTPTR_FORMAT,
2018 pc, fr.pc() - nm->code_begin());
2019 }
2020 }
2021
2022 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint trap_request)) {
2023 HandleMark hm(current);
2024
2025 // uncommon_trap() is called at the beginning of the uncommon trap
2026 // handler. Note this fact before we start generating temporary frames
2027 // that can confuse an asynchronous stack walker. This counter is
2028 // decremented at the end of unpack_frames().
2029
2030 current->inc_in_deopt_handler();
2031
2032 #if INCLUDE_JVMCI
2033 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
2034 RegisterMap reg_map(current,
2035 RegisterMap::UpdateMap::include,
2036 RegisterMap::ProcessFrames::include,
2037 RegisterMap::WalkContinuation::skip);
2038 #else
2039 RegisterMap reg_map(current,
2040 RegisterMap::UpdateMap::skip,
2041 RegisterMap::ProcessFrames::include,
2042 RegisterMap::WalkContinuation::skip);
2078 #if INCLUDE_JVMCI
2079 jlong speculation = current->pending_failed_speculation();
2080 if (nm->is_compiled_by_jvmci()) {
2081 nm->update_speculation(current);
2082 } else {
2083 assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
2084 }
2085
2086 if (trap_bci == SynchronizationEntryBCI) {
2087 trap_bci = 0;
2088 current->set_pending_monitorenter(true);
2089 }
2090
2091 if (reason == Deoptimization::Reason_transfer_to_interpreter) {
2092 current->set_pending_transfer_to_interpreter(true);
2093 }
2094 #endif
2095
2096 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci);
2097 // Record this event in the histogram.
2098 gather_statistics(reason, action, trap_bc);
2099
2100 // Ensure that we can record deopt. history:
2101 bool create_if_missing = ProfileTraps;
2102
2103 methodHandle profiled_method;
2104 #if INCLUDE_JVMCI
2105 if (nm->is_compiled_by_jvmci()) {
2106 profiled_method = methodHandle(current, nm->method());
2107 } else {
2108 profiled_method = trap_method;
2109 }
2110 #else
2111 profiled_method = trap_method;
2112 #endif
2113
2114 MethodData* trap_mdo =
2115 get_method_data(current, profiled_method, create_if_missing);
2116
2117 { // Log Deoptimization event for JFR, UL and event system
2118 Method* tm = trap_method();
2590 bool ignore_maybe_prior_recompile;
2591 assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2592 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2593 bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
2594
2595 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
2596 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
2597
2598 query_update_method_data(trap_mdo, trap_bci,
2599 (DeoptReason)reason,
2600 update_total_counts,
2601 #if INCLUDE_JVMCI
2602 false,
2603 #endif
2604 nullptr,
2605 ignore_this_trap_count,
2606 ignore_maybe_prior_trap,
2607 ignore_maybe_prior_recompile);
2608 }
2609
2610 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* current, jint trap_request, jint exec_mode) {
2611 // Enable WXWrite: current function is called from methods compiled by C2 directly
2612 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
2613
2614 // Still in Java no safepoints
2615 {
2616 // This enters VM and may safepoint
2617 uncommon_trap_inner(current, trap_request);
2618 }
2619 HandleMark hm(current);
2620 return fetch_unroll_info_helper(current, exec_mode);
2621 }
2622
2623 // Local derived constants.
2624 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2625 const int DS_REASON_MASK = ((uint)DataLayout::trap_mask) >> 1;
2626 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2627
2628 //---------------------------trap_state_reason---------------------------------
2629 Deoptimization::DeoptReason
2630 Deoptimization::trap_state_reason(int trap_state) {
2631 // This assert provides the link between the width of DataLayout::trap_bits
2632 // and the encoding of "recorded" reasons. It ensures there are enough
2633 // bits to store all needed reasons in the per-BCI MDO profile.
2634 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2635 int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2636 trap_state -= recompile_bit;
2637 if (trap_state == DS_REASON_MASK) {
2638 return Reason_many;
2639 } else {
2640 assert((int)Reason_none == 0, "state=0 => Reason_none");
2641 return (DeoptReason)trap_state;
2790 size_t len;
2791 if (unloaded_class_index < 0) {
2792 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2793 reason, action
2794 #if INCLUDE_JVMCI
2795 ,debug_id
2796 #endif
2797 );
2798 } else {
2799 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2800 reason, action, unloaded_class_index
2801 #if INCLUDE_JVMCI
2802 ,debug_id
2803 #endif
2804 );
2805 }
2806 return buf;
2807 }
2808
2809 juint Deoptimization::_deoptimization_hist
2810 [Deoptimization::Reason_LIMIT]
2811 [1 + Deoptimization::Action_LIMIT]
2812 [Deoptimization::BC_CASE_LIMIT]
2813 = {0};
2814
2815 enum {
2816 LSB_BITS = 8,
2817 LSB_MASK = right_n_bits(LSB_BITS)
2818 };
2819
2820 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2821 Bytecodes::Code bc) {
2822 assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2823 assert(action >= 0 && action < Action_LIMIT, "oob");
2824 _deoptimization_hist[Reason_none][0][0] += 1; // total
2825 _deoptimization_hist[reason][0][0] += 1; // per-reason total
2826 juint* cases = _deoptimization_hist[reason][1+action];
2827 juint* bc_counter_addr = nullptr;
2828 juint bc_counter = 0;
2829 // Look for an unused counter, or an exact match to this BC.
2830 if (bc != Bytecodes::_illegal) {
2831 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2832 juint* counter_addr = &cases[bc_case];
2833 juint counter = *counter_addr;
2834 if ((counter == 0 && bc_counter_addr == nullptr)
2835 || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2836 // this counter is either free or is already devoted to this BC
2837 bc_counter_addr = counter_addr;
2838 bc_counter = counter | bc;
2839 }
2840 }
2841 }
2842 if (bc_counter_addr == nullptr) {
2843 // Overflow, or no given bytecode.
2844 bc_counter_addr = &cases[BC_CASE_LIMIT-1];
2845 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB
2846 }
2847 *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2848 }
2849
2850 jint Deoptimization::total_deoptimization_count() {
2851 return _deoptimization_hist[Reason_none][0][0];
2852 }
2853
2854 // Get the deopt count for a specific reason and a specific action. If either
2855 // one of 'reason' or 'action' is null, the method returns the sum of all
2856 // deoptimizations with the specific 'action' or 'reason' respectively.
2857 // If both arguments are null, the method returns the total deopt count.
2858 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
2859 if (reason_str == nullptr && action_str == nullptr) {
2860 return total_deoptimization_count();
2861 }
2862 juint counter = 0;
2863 for (int reason = 0; reason < Reason_LIMIT; reason++) {
2864 if (reason_str == nullptr || !strcmp(reason_str, trap_reason_name(reason))) {
2865 for (int action = 0; action < Action_LIMIT; action++) {
2866 if (action_str == nullptr || !strcmp(action_str, trap_action_name(action))) {
2867 juint* cases = _deoptimization_hist[reason][1+action];
2868 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2869 counter += cases[bc_case] >> LSB_BITS;
2870 }
2871 }
2872 }
2873 }
2874 }
2875 return counter;
2876 }
2877
2878 void Deoptimization::print_statistics() {
2879 juint total = total_deoptimization_count();
2880 juint account = total;
2881 if (total != 0) {
2882 ttyLocker ttyl;
2883 if (xtty != nullptr) xtty->head("statistics type='deoptimization'");
2884 tty->print_cr("Deoptimization traps recorded:");
2885 #define PRINT_STAT_LINE(name, r) \
2886 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
2887 PRINT_STAT_LINE("total", total);
2888 // For each non-zero entry in the histogram, print the reason,
2889 // the action, and (if specifically known) the type of bytecode.
2890 for (int reason = 0; reason < Reason_LIMIT; reason++) {
2891 for (int action = 0; action < Action_LIMIT; action++) {
2892 juint* cases = _deoptimization_hist[reason][1+action];
2893 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2894 juint counter = cases[bc_case];
2895 if (counter != 0) {
2896 char name[1*K];
2897 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2898 if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
2899 bc = Bytecodes::_illegal;
2900 os::snprintf_checked(name, sizeof(name), "%s/%s/%s",
2901 trap_reason_name(reason),
2902 trap_action_name(action),
2903 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
2904 juint r = counter >> LSB_BITS;
2905 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
2906 account -= r;
2907 }
2908 }
2909 }
2910 }
2911 if (account != 0) {
2912 PRINT_STAT_LINE("unaccounted", account);
2913 }
2914 #undef PRINT_STAT_LINE
2915 if (xtty != nullptr) xtty->tail("statistics");
2916 }
2917 }
2918
2919 #else // COMPILER2_OR_JVMCI
2920
2921
2922 // Stubs for C1 only system.
2923 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
2924 return false;
2925 }
2926
2927 const char* Deoptimization::trap_reason_name(int reason) {
2928 return "unknown";
2929 }
2930
2931 jint Deoptimization::total_deoptimization_count() {
2932 return 0;
2933 }
2934
2935 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
2936 return 0;
2937 }
2938
2939 void Deoptimization::print_statistics() {
2940 // no output
2941 }
2942
2943 void
2944 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
2945 // no update
2946 }
2947
2948 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
2949 return 0;
2950 }
2951
2952 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
2953 Bytecodes::Code bc) {
2954 // no update
2955 }
2956
2957 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
2958 int trap_state) {
2959 jio_snprintf(buf, buflen, "#%d", trap_state);
2960 return buf;
2961 }
2962
2963 #endif // COMPILER2_OR_JVMCI
|
63 #include "prims/methodHandles.hpp"
64 #include "prims/vectorSupport.hpp"
65 #include "runtime/atomic.hpp"
66 #include "runtime/basicLock.inline.hpp"
67 #include "runtime/continuation.hpp"
68 #include "runtime/continuationEntry.inline.hpp"
69 #include "runtime/deoptimization.hpp"
70 #include "runtime/escapeBarrier.hpp"
71 #include "runtime/fieldDescriptor.hpp"
72 #include "runtime/fieldDescriptor.inline.hpp"
73 #include "runtime/frame.inline.hpp"
74 #include "runtime/handles.inline.hpp"
75 #include "runtime/interfaceSupport.inline.hpp"
76 #include "runtime/javaThread.hpp"
77 #include "runtime/jniHandles.inline.hpp"
78 #include "runtime/keepStackGCProcessed.hpp"
79 #include "runtime/lightweightSynchronizer.hpp"
80 #include "runtime/lockStack.inline.hpp"
81 #include "runtime/objectMonitor.inline.hpp"
82 #include "runtime/osThread.hpp"
83 #include "runtime/perfData.inline.hpp"
84 #include "runtime/safepointVerifiers.hpp"
85 #include "runtime/sharedRuntime.hpp"
86 #include "runtime/signature.hpp"
87 #include "runtime/stackFrameStream.inline.hpp"
88 #include "runtime/stackValue.hpp"
89 #include "runtime/stackWatermarkSet.hpp"
90 #include "runtime/stubRoutines.hpp"
91 #include "runtime/synchronizer.inline.hpp"
92 #include "runtime/threadSMR.hpp"
93 #include "runtime/threadWXSetters.inline.hpp"
94 #include "runtime/vframe.hpp"
95 #include "runtime/vframeArray.hpp"
96 #include "runtime/vframe_hp.hpp"
97 #include "runtime/vmOperations.hpp"
98 #include "services/management.hpp"
99 #include "utilities/checkedCast.hpp"
100 #include "utilities/events.hpp"
101 #include "utilities/growableArray.hpp"
102 #include "utilities/macros.hpp"
103 #include "utilities/preserveException.hpp"
104 #include "utilities/xmlstream.hpp"
105 #if INCLUDE_JFR
106 #include "jfr/jfrEvents.hpp"
107 #include "jfr/metadata/jfrSerializer.hpp"
108 #endif
109
110 uint64_t DeoptimizationScope::_committed_deopt_gen = 0;
111 uint64_t DeoptimizationScope::_active_deopt_gen = 1;
112 bool DeoptimizationScope::_committing_in_progress = false;
113
114 DeoptimizationScope::DeoptimizationScope() : _required_gen(0) {
115 DEBUG_ONLY(_deopted = false;)
116
117 MutexLocker ml(NMethodState_lock, Mutex::_no_safepoint_check_flag);
118 // If there is nothing to deopt _required_gen is the same as comitted.
266 return checked_cast<int>(result);
267 }
268
269 void Deoptimization::UnrollBlock::print() {
270 ResourceMark rm;
271 stringStream st;
272 st.print_cr("UnrollBlock");
273 st.print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
274 st.print( " frame_sizes: ");
275 for (int index = 0; index < number_of_frames(); index++) {
276 st.print(INTX_FORMAT " ", frame_sizes()[index]);
277 }
278 st.cr();
279 tty->print_raw(st.freeze());
280 }
281
282 // In order to make fetch_unroll_info work properly with escape
283 // analysis, the method was changed from JRT_LEAF to JRT_BLOCK_ENTRY.
284 // The actual reallocation of previously eliminated objects occurs in realloc_objects,
285 // which is called from the method fetch_unroll_info_helper below.
286 JRT_BLOCK_ENTRY_PROF(Deoptimization::UnrollBlock*, Deoptimization, fetch_unroll_info, Deoptimization::fetch_unroll_info(JavaThread* current, int exec_mode))
287 // fetch_unroll_info() is called at the beginning of the deoptimization
288 // handler. Note this fact before we start generating temporary frames
289 // that can confuse an asynchronous stack walker. This counter is
290 // decremented at the end of unpack_frames().
291 current->inc_in_deopt_handler();
292
293 if (exec_mode == Unpack_exception) {
294 // When we get here, a callee has thrown an exception into a deoptimized
295 // frame. That throw might have deferred stack watermark checking until
296 // after unwinding. So we deal with such deferred requests here.
297 StackWatermarkSet::after_unwind(current);
298 }
299
300 return fetch_unroll_info_helper(current, exec_mode);
301 JRT_END
302
303 #if COMPILER2_OR_JVMCI
304 // print information about reallocated objects
305 static void print_objects(JavaThread* deoptee_thread,
306 GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
838
839 assert(f->is_interpreted_frame(), "must be interpreted");
840 }
841
842 #ifndef PRODUCT
843 static bool falls_through(Bytecodes::Code bc) {
844 switch (bc) {
845 // List may be incomplete. Here we really only care about bytecodes where compiled code
846 // can deoptimize.
847 case Bytecodes::_goto:
848 case Bytecodes::_goto_w:
849 case Bytecodes::_athrow:
850 return false;
851 default:
852 return true;
853 }
854 }
855 #endif
856
857 // Return BasicType of value being returned
858 JRT_LEAF_PROF_NO_THREAD(BasicType, Deoptimization, unpack_frames, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
859 assert(thread == JavaThread::current(), "pre-condition");
860
861 // We are already active in the special DeoptResourceMark any ResourceObj's we
862 // allocate will be freed at the end of the routine.
863
864 // JRT_LEAF methods don't normally allocate handles and there is a
865 // NoHandleMark to enforce that. It is actually safe to use Handles
866 // in a JRT_LEAF method, and sometimes desirable, but to do so we
867 // must use ResetNoHandleMark to bypass the NoHandleMark, and
868 // then use a HandleMark to ensure any Handles we do create are
869 // cleaned up in this scope.
870 ResetNoHandleMark rnhm;
871 HandleMark hm(thread);
872
873 frame stub_frame = thread->last_frame();
874
875 Continuation::notify_deopt(thread, stub_frame.sp());
876
877 // Since the frame to unpack is the top frame of this thread, the vframe_array_head
878 // must point to the vframeArray for the unpack frame.
1749 if (monitors != nullptr) {
1750 // Unlock in reverse order starting from most nested monitor.
1751 for (int j = (monitors->number_of_monitors() - 1); j >= 0; j--) {
1752 BasicObjectLock* src = monitors->at(j);
1753 if (src->obj() != nullptr) {
1754 ObjectSynchronizer::exit(src->obj(), src->lock(), thread);
1755 }
1756 }
1757 array->element(i)->free_monitors();
1758 #ifdef ASSERT
1759 array->element(i)->set_removed_monitors();
1760 #endif
1761 }
1762 }
1763 }
1764 #endif
1765
1766 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) {
1767 assert(fr.can_be_deoptimized(), "checking frame type");
1768
1769 nmethod* nm = fr.cb()->as_nmethod_or_null();
1770 assert(nm != nullptr, "only compiled methods can deopt");
1771 DeoptAction action = (nm->is_not_entrant() ? Action_make_not_entrant : Action_none);
1772 ScopeDesc* cur_sd = nm->scope_desc_at(fr.pc());
1773 Bytecodes::Code bc = (cur_sd->bci() == -1 ? Bytecodes::_nop // deopt on method entry
1774 : cur_sd->method()->java_code_at(cur_sd->bci()));
1775 gather_statistics(nm, reason, action, bc);
1776
1777 if (LogCompilation && xtty != nullptr) {
1778 ttyLocker ttyl;
1779 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1780 nm->log_identity(xtty);
1781 xtty->end_head();
1782 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1783 xtty->begin_elem("jvms bci='%d'", sd->bci());
1784 xtty->method(sd->method());
1785 xtty->end_elem();
1786 if (sd->is_top()) break;
1787 }
1788 xtty->tail("deoptimized");
1789 }
1790
1791 Continuation::notify_deopt(thread, fr.sp());
1792
1793 // Patch the compiled method so that when execution returns to it we will
1794 // deopt the execution state and return to the interpreter.
1795 fr.deoptimize(thread);
1796 }
1797
2007 static void log_deopt(nmethod* nm, Method* tm, intptr_t pc, frame& fr, int trap_bci,
2008 const char* reason_name, const char* reason_action) {
2009 LogTarget(Debug, deoptimization) lt;
2010 if (lt.is_enabled()) {
2011 LogStream ls(lt);
2012 bool is_osr = nm->is_osr_method();
2013 ls.print("cid=%4d %s level=%d",
2014 nm->compile_id(), (is_osr ? "osr" : " "), nm->comp_level());
2015 ls.print(" %s", tm->name_and_sig_as_C_string());
2016 ls.print(" trap_bci=%d ", trap_bci);
2017 if (is_osr) {
2018 ls.print("osr_bci=%d ", nm->osr_entry_bci());
2019 }
2020 ls.print("%s ", reason_name);
2021 ls.print("%s ", reason_action);
2022 ls.print_cr("pc=" INTPTR_FORMAT " relative_pc=" INTPTR_FORMAT,
2023 pc, fr.pc() - nm->code_begin());
2024 }
2025 }
2026
2027 JRT_ENTRY_PROF(void, Deoptimization, uncommon_trap_inner, Deoptimization::uncommon_trap_inner(JavaThread* current, jint trap_request)) {
2028 HandleMark hm(current);
2029
2030 // uncommon_trap() is called at the beginning of the uncommon trap
2031 // handler. Note this fact before we start generating temporary frames
2032 // that can confuse an asynchronous stack walker. This counter is
2033 // decremented at the end of unpack_frames().
2034
2035 current->inc_in_deopt_handler();
2036
2037 #if INCLUDE_JVMCI
2038 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid
2039 RegisterMap reg_map(current,
2040 RegisterMap::UpdateMap::include,
2041 RegisterMap::ProcessFrames::include,
2042 RegisterMap::WalkContinuation::skip);
2043 #else
2044 RegisterMap reg_map(current,
2045 RegisterMap::UpdateMap::skip,
2046 RegisterMap::ProcessFrames::include,
2047 RegisterMap::WalkContinuation::skip);
2083 #if INCLUDE_JVMCI
2084 jlong speculation = current->pending_failed_speculation();
2085 if (nm->is_compiled_by_jvmci()) {
2086 nm->update_speculation(current);
2087 } else {
2088 assert(speculation == 0, "There should not be a speculation for methods compiled by non-JVMCI compilers");
2089 }
2090
2091 if (trap_bci == SynchronizationEntryBCI) {
2092 trap_bci = 0;
2093 current->set_pending_monitorenter(true);
2094 }
2095
2096 if (reason == Deoptimization::Reason_transfer_to_interpreter) {
2097 current->set_pending_transfer_to_interpreter(true);
2098 }
2099 #endif
2100
2101 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci);
2102 // Record this event in the histogram.
2103 gather_statistics(nm, reason, action, trap_bc);
2104
2105 // Ensure that we can record deopt. history:
2106 bool create_if_missing = ProfileTraps;
2107
2108 methodHandle profiled_method;
2109 #if INCLUDE_JVMCI
2110 if (nm->is_compiled_by_jvmci()) {
2111 profiled_method = methodHandle(current, nm->method());
2112 } else {
2113 profiled_method = trap_method;
2114 }
2115 #else
2116 profiled_method = trap_method;
2117 #endif
2118
2119 MethodData* trap_mdo =
2120 get_method_data(current, profiled_method, create_if_missing);
2121
2122 { // Log Deoptimization event for JFR, UL and event system
2123 Method* tm = trap_method();
2595 bool ignore_maybe_prior_recompile;
2596 assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
2597 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts
2598 bool update_total_counts = true JVMCI_ONLY( && !UseJVMCICompiler);
2599
2600 // Lock to read ProfileData, and ensure lock is not broken by a safepoint
2601 MutexLocker ml(trap_mdo->extra_data_lock(), Mutex::_no_safepoint_check_flag);
2602
2603 query_update_method_data(trap_mdo, trap_bci,
2604 (DeoptReason)reason,
2605 update_total_counts,
2606 #if INCLUDE_JVMCI
2607 false,
2608 #endif
2609 nullptr,
2610 ignore_this_trap_count,
2611 ignore_maybe_prior_trap,
2612 ignore_maybe_prior_recompile);
2613 }
2614
2615 PROF_ENTRY(Deoptimization::UnrollBlock*, Deoptimization, uncommon_trap, Deoptimization::uncommon_trap(JavaThread* current, jint trap_request, jint exec_mode))
2616 // Enable WXWrite: current function is called from methods compiled by C2 directly
2617 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, current));
2618
2619 // Still in Java no safepoints
2620 {
2621 // This enters VM and may safepoint
2622 uncommon_trap_inner(current, trap_request);
2623 }
2624 HandleMark hm(current);
2625 return fetch_unroll_info_helper(current, exec_mode);
2626 PROF_END
2627
2628 // Local derived constants.
2629 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
2630 const int DS_REASON_MASK = ((uint)DataLayout::trap_mask) >> 1;
2631 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
2632
2633 //---------------------------trap_state_reason---------------------------------
2634 Deoptimization::DeoptReason
2635 Deoptimization::trap_state_reason(int trap_state) {
2636 // This assert provides the link between the width of DataLayout::trap_bits
2637 // and the encoding of "recorded" reasons. It ensures there are enough
2638 // bits to store all needed reasons in the per-BCI MDO profile.
2639 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
2640 int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
2641 trap_state -= recompile_bit;
2642 if (trap_state == DS_REASON_MASK) {
2643 return Reason_many;
2644 } else {
2645 assert((int)Reason_none == 0, "state=0 => Reason_none");
2646 return (DeoptReason)trap_state;
2795 size_t len;
2796 if (unloaded_class_index < 0) {
2797 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"),
2798 reason, action
2799 #if INCLUDE_JVMCI
2800 ,debug_id
2801 #endif
2802 );
2803 } else {
2804 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"),
2805 reason, action, unloaded_class_index
2806 #if INCLUDE_JVMCI
2807 ,debug_id
2808 #endif
2809 );
2810 }
2811 return buf;
2812 }
2813
2814 juint Deoptimization::_deoptimization_hist
2815 [1 + 4 + 5] // total + online + archived
2816 [Deoptimization::Reason_LIMIT]
2817 [1 + Deoptimization::Action_LIMIT]
2818 [Deoptimization::BC_CASE_LIMIT]
2819 = {0};
2820
2821 enum {
2822 LSB_BITS = 8,
2823 LSB_MASK = right_n_bits(LSB_BITS)
2824 };
2825
2826 static void update(juint* cases, Bytecodes::Code bc) {
2827 juint* bc_counter_addr = nullptr;
2828 juint bc_counter = 0;
2829 // Look for an unused counter, or an exact match to this BC.
2830 if (bc != Bytecodes::_illegal) {
2831 for (int bc_case = 0; bc_case < Deoptimization::BC_CASE_LIMIT; bc_case++) {
2832 juint* counter_addr = &cases[bc_case];
2833 juint counter = *counter_addr;
2834 if ((counter == 0 && bc_counter_addr == nullptr)
2835 || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
2836 // this counter is either free or is already devoted to this BC
2837 bc_counter_addr = counter_addr;
2838 bc_counter = counter | bc;
2839 }
2840 }
2841 }
2842 if (bc_counter_addr == nullptr) {
2843 // Overflow, or no given bytecode.
2844 bc_counter_addr = &cases[Deoptimization::BC_CASE_LIMIT-1];
2845 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB
2846 }
2847 *bc_counter_addr = bc_counter + (1 << LSB_BITS);
2848 }
2849
2850
2851 void Deoptimization::gather_statistics(nmethod* nm, DeoptReason reason, DeoptAction action,
2852 Bytecodes::Code bc) {
2853 assert(reason >= 0 && reason < Reason_LIMIT, "oob");
2854 assert(action >= 0 && action < Action_LIMIT, "oob");
2855 _deoptimization_hist[0][Reason_none][0][0] += 1; // total
2856 _deoptimization_hist[0][reason][0][0] += 1; // per-reason total
2857
2858 update(_deoptimization_hist[0][reason][1+action], bc);
2859
2860 uint lvl = nm->comp_level() + (nm->is_scc() ? 4 : 0) + (nm->preloaded() ? 1 : 0);
2861 _deoptimization_hist[lvl][Reason_none][0][0] += 1; // total
2862 _deoptimization_hist[lvl][reason][0][0] += 1; // per-reason total
2863 update(_deoptimization_hist[lvl][reason][1+action], bc);
2864 }
2865
2866 jint Deoptimization::total_deoptimization_count() {
2867 return _deoptimization_hist[0][Reason_none][0][0];
2868 }
2869
2870 // Get the deopt count for a specific reason and a specific action. If either
2871 // one of 'reason' or 'action' is null, the method returns the sum of all
2872 // deoptimizations with the specific 'action' or 'reason' respectively.
2873 // If both arguments are null, the method returns the total deopt count.
2874 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
2875 if (reason_str == nullptr && action_str == nullptr) {
2876 return total_deoptimization_count();
2877 }
2878 juint counter = 0;
2879 for (int reason = 0; reason < Reason_LIMIT; reason++) {
2880 if (reason_str == nullptr || !strcmp(reason_str, trap_reason_name(reason))) {
2881 for (int action = 0; action < Action_LIMIT; action++) {
2882 if (action_str == nullptr || !strcmp(action_str, trap_action_name(action))) {
2883 juint* cases = _deoptimization_hist[0][reason][1+action];
2884 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2885 counter += cases[bc_case] >> LSB_BITS;
2886 }
2887 }
2888 }
2889 }
2890 }
2891 return counter;
2892 }
2893
2894 void Deoptimization::print_statistics() {
2895 ttyLocker ttyl;
2896 if (xtty != nullptr) xtty->head("statistics type='deoptimization'");
2897 tty->print_cr("Deoptimization traps recorded:");
2898 print_statistics_on(tty);
2899 if (xtty != nullptr) xtty->tail("statistics");
2900 }
2901
2902 void Deoptimization::print_statistics_on(const char* title, int lvl, outputStream* st) {
2903 juint total = _deoptimization_hist[lvl][Reason_none][0][0];
2904 juint account = total;
2905 #define PRINT_STAT_LINE(name, r) \
2906 st->print_cr(" %d (%4.1f%%) %s", (int)(r), ((r) == total ? 100.0 : (((r) * 100.0) / total)), name);
2907 if (total > 0) {
2908 st->print(" %s: ", title);
2909 PRINT_STAT_LINE("total", total);
2910 // For each non-zero entry in the histogram, print the reason,
2911 // the action, and (if specifically known) the type of bytecode.
2912 for (int reason = 0; reason < Reason_LIMIT; reason++) {
2913 for (int action = 0; action < Action_LIMIT; action++) {
2914 juint* cases = Deoptimization::_deoptimization_hist[lvl][reason][1+action];
2915 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
2916 juint counter = cases[bc_case];
2917 if (counter != 0) {
2918 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
2919 const char* bc_name = "other";
2920 if (bc_case == (BC_CASE_LIMIT-1) && bc == Bytecodes::_nop) {
2921 // overwritten
2922 } else if (Bytecodes::is_defined(bc)) {
2923 bc_name = Bytecodes::name(bc);
2924 }
2925 juint r = counter >> LSB_BITS;
2926 st->print_cr(" %-34s %16s %16s: " UINT32_FORMAT_W(5) " (%4.1f%%)",
2927 trap_reason_name(reason), trap_action_name(action), bc_name,
2928 r, (r * 100.0) / total);
2929 account -= r;
2930 }
2931 }
2932 }
2933 }
2934 if (account != 0) {
2935 PRINT_STAT_LINE("unaccounted", account);
2936 }
2937 #undef PRINT_STAT_LINE
2938 }
2939 }
2940
2941 void Deoptimization::print_statistics_on(outputStream* st) {
2942 // print_statistics_on("Total", 0, st);
2943 print_statistics_on("Tier1", 1, st);
2944 print_statistics_on("Tier2", 2, st);
2945 print_statistics_on("Tier3", 3, st);
2946 print_statistics_on("Tier4", 4, st);
2947
2948 print_statistics_on("SC Tier1", 5, st);
2949 print_statistics_on("SC Tier2", 6, st);
2950 print_statistics_on("SC Tier4", 8, st);
2951 print_statistics_on("SC Tier5 (preloaded)", 9, st);
2952 }
2953
2954 #define DO_COUNTERS(macro) \
2955 macro(Deoptimization, fetch_unroll_info) \
2956 macro(Deoptimization, unpack_frames) \
2957 macro(Deoptimization, uncommon_trap_inner) \
2958 macro(Deoptimization, uncommon_trap)
2959
2960 #define INIT_COUNTER(sub, name) \
2961 NEWPERFTICKCOUNTERS(_perf_##sub##_##name##_timer, SUN_RT, #sub "::" #name) \
2962 NEWPERFEVENTCOUNTER(_perf_##sub##_##name##_count, SUN_RT, #sub "::" #name "_count");
2963
2964 void Deoptimization::init_counters() {
2965 if (ProfileRuntimeCalls && UsePerfData) {
2966 EXCEPTION_MARK;
2967
2968 DO_COUNTERS(INIT_COUNTER)
2969
2970 if (HAS_PENDING_EXCEPTION) {
2971 vm_exit_during_initialization("jvm_perf_init failed unexpectedly");
2972 }
2973 }
2974 }
2975 #undef INIT_COUNTER
2976
2977 #define PRINT_COUNTER(sub, name) { \
2978 jlong count = _perf_##sub##_##name##_count->get_value(); \
2979 if (count > 0) { \
2980 st->print_cr(" %-50s = " JLONG_FORMAT_W(6) "us (elapsed) " JLONG_FORMAT_W(6) "us (thread) (" JLONG_FORMAT_W(5) " events)", #sub "::" #name, \
2981 _perf_##sub##_##name##_timer->elapsed_counter_value_us(), \
2982 _perf_##sub##_##name##_timer->thread_counter_value_us(), \
2983 count); \
2984 }}
2985
2986 void Deoptimization::print_counters_on(outputStream* st) {
2987 if (ProfileRuntimeCalls && UsePerfData) {
2988 DO_COUNTERS(PRINT_COUNTER)
2989 } else {
2990 st->print_cr(" Deoptimization: no info (%s is disabled)", (UsePerfData ? "ProfileRuntimeCalls" : "UsePerfData"));
2991 }
2992 }
2993
2994 #undef PRINT_COUNTER
2995 #undef DO_COUNTERS
2996
2997 #else // COMPILER2_OR_JVMCI
2998
2999
3000 // Stubs for C1 only system.
3001 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
3002 return false;
3003 }
3004
3005 const char* Deoptimization::trap_reason_name(int reason) {
3006 return "unknown";
3007 }
3008
3009 jint Deoptimization::total_deoptimization_count() {
3010 return 0;
3011 }
3012
3013 jint Deoptimization::deoptimization_count(const char *reason_str, const char *action_str) {
3014 return 0;
3015 }
3016
3017 void Deoptimization::print_statistics() {
3018 // no output
3019 }
3020
3021 void
3022 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
3023 // no update
3024 }
3025
3026 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
3027 return 0;
3028 }
3029
3030 void Deoptimization::gather_statistics(nmethod* nm, DeoptReason reason, DeoptAction action,
3031 Bytecodes::Code bc) {
3032 // no update
3033 }
3034
3035 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
3036 int trap_state) {
3037 jio_snprintf(buf, buflen, "#%d", trap_state);
3038 return buf;
3039 }
3040
3041 void Deoptimization::init_counters() {
3042 // nothing to do
3043 }
3044
3045 void Deoptimization::print_counters_on(outputStream* st) {
3046 // no output
3047 }
3048
3049 void Deoptimization::print_statistics_on(outputStream* st) {
3050 // no output
3051 }
3052
3053 #endif // COMPILER2_OR_JVMCI
|