1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_RUNTIME_DEOPTIMIZATION_HPP 26 #define SHARE_RUNTIME_DEOPTIMIZATION_HPP 27 28 #include "interpreter/bytecodes.hpp" 29 #include "memory/allocation.hpp" 30 #include "runtime/frame.hpp" 31 32 class ProfileData; 33 class vframeArray; 34 class MonitorInfo; 35 class MonitorValue; 36 class ObjectValue; 37 class AutoBoxObjectValue; 38 class ScopeValue; 39 class compiledVFrame; 40 41 template<class E> class GrowableArray; 42 43 class DeoptimizationScope { 44 private: 45 // What gen we have done the deopt handshake for. 46 static uint64_t _committed_deopt_gen; 47 // What gen to mark a method with, hence larger than _committed_deopt_gen. 48 static uint64_t _active_deopt_gen; 49 // Indicate an in-progress deopt handshake. 50 static bool _committing_in_progress; 51 52 // The required gen we need to execute/wait for 53 uint64_t _required_gen; 54 DEBUG_ONLY(bool _deopted;) 55 56 public: 57 DeoptimizationScope(); 58 ~DeoptimizationScope(); 59 // Mark a method, if already marked as dependent. 60 void mark(nmethod* nm, bool inc_recompile_counts = true); 61 // Record this as a dependent method. 62 void dependent(nmethod* nm); 63 64 // Execute the deoptimization. 65 // Make the nmethods not entrant, stackwalks and patch return pcs and sets post call nops. 66 void deoptimize_marked(); 67 }; 68 69 class Deoptimization : AllStatic { 70 friend class VMStructs; 71 friend class EscapeBarrier; 72 73 public: 74 // What condition caused the deoptimization? 75 // Note: Keep this enum in sync. with Deoptimization::_trap_reason_name. 76 enum DeoptReason { 77 Reason_many = -1, // indicates presence of several reasons 78 Reason_none = 0, // indicates absence of a relevant deopt. 79 // Next 8 reasons are recorded per bytecode in DataLayout::trap_bits. 80 // This is more complicated for JVMCI as JVMCI may deoptimize to *some* bytecode before the 81 // bytecode that actually caused the deopt (with inlining, JVMCI may even deoptimize to a 82 // bytecode in another method): 83 // - bytecode y in method b() causes deopt 84 // - JVMCI deoptimizes to bytecode x in method a() 85 // -> the deopt reason will be recorded for method a() at bytecode x 86 Reason_null_check, // saw unexpected null or zero divisor (@bci) 87 Reason_null_assert, // saw unexpected non-null or non-zero (@bci) 88 Reason_range_check, // saw unexpected array index (@bci) 89 Reason_class_check, // saw unexpected object class (@bci) 90 Reason_array_check, // saw unexpected array class (aastore @bci) 91 Reason_intrinsic, // saw unexpected operand to intrinsic (@bci) 92 Reason_bimorphic, // saw unexpected object class in bimorphic inlining (@bci) 93 94 #if INCLUDE_JVMCI 95 Reason_unreached0 = Reason_null_assert, 96 Reason_type_checked_inlining = Reason_intrinsic, 97 Reason_optimized_type_check = Reason_bimorphic, 98 #endif 99 100 Reason_profile_predicate, // compiler generated predicate moved from frequent branch in a loop failed 101 102 // recorded per method 103 Reason_unloaded, // unloaded class or constant pool entry 104 Reason_uninitialized, // bad class state (uninitialized) 105 Reason_initialized, // class has been fully initialized 106 Reason_unreached, // code is not reached, compiler 107 Reason_unhandled, // arbitrary compiler limitation 108 Reason_constraint, // arbitrary runtime constraint violated 109 Reason_div0_check, // a null_check due to division by zero 110 Reason_age, // nmethod too old; tier threshold reached 111 Reason_predicate, // compiler generated predicate failed 112 Reason_loop_limit_check, // compiler generated loop limits check failed 113 Reason_speculate_class_check, // saw unexpected object class from type speculation 114 Reason_speculate_null_check, // saw unexpected null from type speculation 115 Reason_speculate_null_assert, // saw unexpected null from type speculation 116 Reason_unstable_if, // a branch predicted always false was taken 117 Reason_unstable_fused_if, // fused two ifs that had each one untaken branch. One is now taken. 118 Reason_receiver_constraint, // receiver subtype check failed 119 #if INCLUDE_JVMCI 120 Reason_aliasing, // optimistic assumption about aliasing failed 121 Reason_transfer_to_interpreter, // explicit transferToInterpreter() 122 Reason_not_compiled_exception_handler, 123 Reason_unresolved, 124 Reason_jsr_mismatch, 125 #endif 126 127 // Used to define MethodData::_trap_hist_limit where Reason_tenured isn't included 128 Reason_TRAP_HISTORY_LENGTH, 129 130 // Reason_tenured is counted separately, add normal counted Reasons above. 131 Reason_tenured = Reason_TRAP_HISTORY_LENGTH, // age of the code has reached the limit 132 Reason_LIMIT, 133 134 // Note: Reason_RECORDED_LIMIT should fit into 31 bits of 135 // DataLayout::trap_bits. This dependency is enforced indirectly 136 // via asserts, to avoid excessive direct header-to-header dependencies. 137 // See Deoptimization::trap_state_reason and class DataLayout. 138 Reason_RECORDED_LIMIT = Reason_profile_predicate, // some are not recorded per bc 139 }; 140 141 // What action must be taken by the runtime? 142 // Note: Keep this enum in sync. with Deoptimization::_trap_action_name. 143 enum DeoptAction { 144 Action_none, // just interpret, do not invalidate nmethod 145 Action_maybe_recompile, // recompile the nmethod; need not invalidate 146 Action_reinterpret, // invalidate the nmethod, reset IC, maybe recompile 147 Action_make_not_entrant, // invalidate the nmethod, recompile (probably) 148 Action_make_not_compilable, // invalidate the nmethod and do not compile 149 Action_LIMIT 150 }; 151 152 enum { 153 _action_bits = 3, 154 _reason_bits = 5, 155 _debug_id_bits = 23, 156 _action_shift = 0, 157 _reason_shift = _action_shift+_action_bits, 158 _debug_id_shift = _reason_shift+_reason_bits, 159 BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist 160 }; 161 162 enum UnpackType { 163 Unpack_deopt = 0, // normal deoptimization, use pc computed in unpack_vframe_on_stack 164 Unpack_exception = 1, // exception is pending 165 Unpack_uncommon_trap = 2, // redo last byte code (C2 only) 166 Unpack_reexecute = 3, // reexecute bytecode (C1 only) 167 Unpack_none = 4, // not deoptimizing the frame, just reallocating/relocking for JVMTI 168 Unpack_LIMIT = 5 169 }; 170 171 #if INCLUDE_JVMCI 172 // Can reconstruct virtualized unsafe large accesses to byte arrays. 173 static const int _support_large_access_byte_array_virtualization = 1; 174 #endif 175 176 // Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live 177 // activations using those nmethods. Scan of the code cache is done to 178 // find all marked nmethods and they are made not_entrant. 179 static void deoptimize_all_marked(); 180 181 public: 182 // Deoptimizes a frame lazily. Deopt happens on return to the frame. 183 static void deoptimize(JavaThread* thread, frame fr, DeoptReason reason = Reason_constraint); 184 185 #if INCLUDE_JVMCI 186 static address deoptimize_for_missing_exception_handler(nmethod* nm); 187 static oop get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS); 188 #endif 189 190 private: 191 // Does the actual work for deoptimizing a single frame 192 static void deoptimize_single_frame(JavaThread* thread, frame fr, DeoptReason reason); 193 194 #if COMPILER2_OR_JVMCI 195 // Deoptimize objects, that is reallocate and relock them, just before they 196 // escape through JVMTI. The given vframes cover one physical frame. 197 static bool deoptimize_objects_internal(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, 198 bool& realloc_failures); 199 200 public: 201 202 // Support for restoring non-escaping objects 203 static bool realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS); 204 static bool realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS); 205 static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type); 206 static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj); 207 static void reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool skip_internal, TRAPS); 208 static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS); 209 static bool relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors, 210 JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures); 211 static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array); 212 #endif // COMPILER2_OR_JVMCI 213 214 public: 215 static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures); 216 217 // Interface used for unpacking deoptimized frames 218 219 // UnrollBlock is returned by fetch_unroll_info() to the deoptimization handler (blob). 220 // This is only a CheapObj to ease debugging after a deopt failure 221 class UnrollBlock : public CHeapObj<mtCompiler> { 222 friend class VMStructs; 223 friend class JVMCIVMStructs; 224 private: 225 int _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame 226 int _caller_adjustment; // Adjustment, in bytes, to caller's SP by initial interpreted frame 227 int _number_of_frames; // Number frames to unroll 228 int _total_frame_sizes; // Total of number*sizes frames 229 intptr_t* _frame_sizes; // Array of frame sizes, in bytes, for unrolling the stack 230 address* _frame_pcs; // Array of frame pc's, in bytes, for unrolling the stack 231 intptr_t* _register_block; // Block for storing callee-saved registers. 232 BasicType _return_type; // Tells if we have to restore double or long return value 233 intptr_t _initial_info; // Platform dependent data for the sender frame (was FP on x86) 234 int _caller_actual_parameters; // The number of actual arguments at the 235 // interpreted caller of the deoptimized frame 236 int _unpack_kind; // exec_mode that can be changed during fetch_unroll_info 237 238 // The following fields are used as temps during the unpacking phase 239 // (which is tight on registers, especially on x86). They really ought 240 // to be PD variables but that involves moving this class into its own 241 // file to use the pd include mechanism. Maybe in a later cleanup ... 242 intptr_t _counter_temp; // SHOULD BE PD VARIABLE (x86 frame count temp) 243 intptr_t _sender_sp_temp; // SHOULD BE PD VARIABLE (x86 sender_sp) 244 public: 245 // Constructor 246 UnrollBlock(int size_of_deoptimized_frame, 247 int caller_adjustment, 248 int caller_actual_parameters, 249 int number_of_frames, 250 intptr_t* frame_sizes, 251 address* frames_pcs, 252 BasicType return_type, 253 int unpack_kind); 254 ~UnrollBlock(); 255 256 // Accessors 257 intptr_t* frame_sizes() const { return _frame_sizes; } 258 int number_of_frames() const { return _number_of_frames; } 259 260 // Returns the total size of frames 261 int size_of_frames() const; 262 263 void set_initial_info(intptr_t info) { _initial_info = info; } 264 265 int caller_actual_parameters() const { return _caller_actual_parameters; } 266 267 // Accessors used by the code generator for the unpack stub. 268 static ByteSize size_of_deoptimized_frame_offset() { return byte_offset_of(UnrollBlock, _size_of_deoptimized_frame); } 269 static ByteSize caller_adjustment_offset() { return byte_offset_of(UnrollBlock, _caller_adjustment); } 270 static ByteSize number_of_frames_offset() { return byte_offset_of(UnrollBlock, _number_of_frames); } 271 static ByteSize frame_sizes_offset() { return byte_offset_of(UnrollBlock, _frame_sizes); } 272 static ByteSize total_frame_sizes_offset() { return byte_offset_of(UnrollBlock, _total_frame_sizes); } 273 static ByteSize frame_pcs_offset() { return byte_offset_of(UnrollBlock, _frame_pcs); } 274 static ByteSize counter_temp_offset() { return byte_offset_of(UnrollBlock, _counter_temp); } 275 static ByteSize initial_info_offset() { return byte_offset_of(UnrollBlock, _initial_info); } 276 static ByteSize unpack_kind_offset() { return byte_offset_of(UnrollBlock, _unpack_kind); } 277 static ByteSize sender_sp_temp_offset() { return byte_offset_of(UnrollBlock, _sender_sp_temp); } 278 279 BasicType return_type() const { return _return_type; } 280 void print(); 281 }; 282 283 //** Returns an UnrollBlock continuing information 284 // how to make room for the resulting interpreter frames. 285 // Called by assembly stub after execution has returned to 286 // deoptimized frame. 287 // @argument thread. Thread where stub_frame resides. 288 // @see OptoRuntime::deoptimization_fetch_unroll_info_C 289 static UnrollBlock* fetch_unroll_info(JavaThread* current, int exec_mode); 290 291 //** Unpacks vframeArray onto execution stack 292 // Called by assembly stub after execution has returned to 293 // deoptimized frame and after the stack unrolling. 294 // @argument thread. Thread where stub_frame resides. 295 // @argument exec_mode. Determines how execution should be continued in top frame. 296 // 0 means continue after current byte code 297 // 1 means exception has happened, handle exception 298 // 2 means reexecute current bytecode (for uncommon traps). 299 // @see OptoRuntime::deoptimization_unpack_frames_C 300 // Return BasicType of call return type, if any 301 static BasicType unpack_frames(JavaThread* thread, int exec_mode); 302 303 // Cleans up deoptimization bits on thread after unpacking or in the 304 // case of an exception. 305 static void cleanup_deopt_info(JavaThread *thread, 306 vframeArray * array); 307 308 // Restores callee saved values from deoptimized frame into oldest interpreter frame 309 // so caller of the deoptimized frame will get back the values it expects. 310 static void unwind_callee_save_values(frame* f, vframeArray* vframe_array); 311 312 //** Performs an uncommon trap for compiled code. 313 // The top most compiler frame is converted into interpreter frames 314 static UnrollBlock* uncommon_trap(JavaThread* current, jint unloaded_class_index, jint exec_mode); 315 // Helper routine that enters the VM and may block 316 static void uncommon_trap_inner(JavaThread* current, jint unloaded_class_index); 317 318 //** Deoptimizes the frame identified by id. 319 // Only called from VMDeoptimizeFrame 320 // @argument thread. Thread where stub_frame resides. 321 // @argument id. id of frame that should be deoptimized. 322 static void deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason); 323 324 // if thread is not the current thread then execute 325 // VM_DeoptimizeFrame otherwise deoptimize directly. 326 static void deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason); 327 static void deoptimize_frame(JavaThread* thread, intptr_t* id); 328 329 // Statistics 330 static void gather_statistics(DeoptReason reason, DeoptAction action, 331 Bytecodes::Code bc = Bytecodes::_illegal); 332 static void print_statistics(); 333 334 // How much room to adjust the last frame's SP by, to make space for 335 // the callee's interpreter frame (which expects locals to be next to 336 // incoming arguments) 337 static int last_frame_adjust(int callee_parameters, int callee_locals); 338 339 // trap_request codes 340 static DeoptReason trap_request_reason(int trap_request) { 341 if (trap_request < 0) 342 return (DeoptReason) 343 ((~(trap_request) >> _reason_shift) & right_n_bits(_reason_bits)); 344 else 345 // standard reason for unloaded CP entry 346 return Reason_unloaded; 347 } 348 static DeoptAction trap_request_action(int trap_request) { 349 if (trap_request < 0) 350 return (DeoptAction) 351 ((~(trap_request) >> _action_shift) & right_n_bits(_action_bits)); 352 else 353 // standard action for unloaded CP entry 354 return _unloaded_action; 355 } 356 static int trap_request_debug_id(int trap_request) { 357 if (trap_request < 0) { 358 return ((~(trap_request) >> _debug_id_shift) & right_n_bits(_debug_id_bits)); 359 } else { 360 // standard action for unloaded CP entry 361 return 0; 362 } 363 } 364 static int trap_request_index(int trap_request) { 365 if (trap_request < 0) 366 return -1; 367 else 368 return trap_request; 369 } 370 static int make_trap_request(DeoptReason reason, DeoptAction action, 371 int index = -1) { 372 assert((1 << _reason_bits) >= Reason_LIMIT, "enough bits"); 373 assert((1 << _action_bits) >= Action_LIMIT, "enough bits"); 374 int trap_request; 375 if (index != -1) 376 trap_request = index; 377 else 378 trap_request = (~(((reason) << _reason_shift) 379 + ((action) << _action_shift))); 380 assert(reason == trap_request_reason(trap_request), "valid reason"); 381 assert(action == trap_request_action(trap_request), "valid action"); 382 assert(index == trap_request_index(trap_request), "valid index"); 383 return trap_request; 384 } 385 386 // The trap_state stored in a MDO is decoded here. 387 // It records two items of information. 388 // reason: If a deoptimization happened here, what its reason was, 389 // or if there were multiple deopts with differing reasons. 390 // recompiled: If a deoptimization here triggered a recompilation. 391 // Note that not all reasons are recorded per-bci. 392 static DeoptReason trap_state_reason(int trap_state); 393 static int trap_state_has_reason(int trap_state, int reason); 394 static int trap_state_add_reason(int trap_state, int reason); 395 static bool trap_state_is_recompiled(int trap_state); 396 static int trap_state_set_recompiled(int trap_state, bool z); 397 static const char* format_trap_state(char* buf, size_t buflen, 398 int trap_state); 399 400 static bool reason_is_recorded_per_bytecode(DeoptReason reason) { 401 return reason > Reason_none && reason <= Reason_RECORDED_LIMIT; 402 } 403 404 static DeoptReason reason_recorded_per_bytecode_if_any(DeoptReason reason) { 405 if (reason_is_recorded_per_bytecode(reason)) 406 return reason; 407 else if (reason == Reason_div0_check) // null check due to divide-by-zero? 408 return Reason_null_check; // recorded per BCI as a null check 409 else if (reason == Reason_speculate_class_check) 410 return Reason_class_check; 411 else if (reason == Reason_speculate_null_check) 412 return Reason_null_check; 413 else if (reason == Reason_speculate_null_assert) 414 return Reason_null_assert; 415 else if (reason == Reason_unstable_if) 416 return Reason_intrinsic; 417 else if (reason == Reason_unstable_fused_if) 418 return Reason_range_check; 419 else 420 return Reason_none; 421 } 422 423 static bool reason_is_speculate(int reason) { 424 if (reason == Reason_speculate_class_check || 425 reason == Reason_speculate_null_check || 426 reason == Reason_speculate_null_assert) { 427 return true; 428 } 429 return false; 430 } 431 432 static DeoptReason reason_null_check(bool speculative) { 433 return speculative ? Deoptimization::Reason_speculate_null_check : Deoptimization::Reason_null_check; 434 } 435 436 static DeoptReason reason_class_check(bool speculative) { 437 return speculative ? Deoptimization::Reason_speculate_class_check : Deoptimization::Reason_class_check; 438 } 439 440 static DeoptReason reason_null_assert(bool speculative) { 441 return speculative ? Deoptimization::Reason_speculate_null_assert : Deoptimization::Reason_null_assert; 442 } 443 444 static uint per_method_trap_limit(int reason) { 445 return reason_is_speculate(reason) ? (uint)PerMethodSpecTrapLimit : (uint)PerMethodTrapLimit; 446 } 447 448 static const char* trap_reason_name(int reason); 449 static const char* trap_action_name(int action); 450 // Format like reason='foo' action='bar' index='123'. 451 // This is suitable both for XML and for tty output. 452 static const char* format_trap_request(char* buf, size_t buflen, 453 int trap_request); 454 455 static jint total_deoptimization_count(); 456 static jint deoptimization_count(const char* reason_str, const char* action_str); 457 458 // JVMTI PopFrame support 459 460 // Preserves incoming arguments to the popped frame when it is 461 // returning to a deoptimized caller 462 static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address); 463 464 static MethodData* get_method_data(JavaThread* thread, const methodHandle& m, bool create_if_missing); 465 private: 466 // Update the mdo's count and per-BCI reason bits, returning previous state: 467 static ProfileData* query_update_method_data(MethodData* trap_mdo, 468 int trap_bci, 469 DeoptReason reason, 470 bool update_total_trap_count, 471 #if INCLUDE_JVMCI 472 bool is_osr, 473 #endif 474 Method* compiled_method, 475 //outputs: 476 uint& ret_this_trap_count, 477 bool& ret_maybe_prior_trap, 478 bool& ret_maybe_prior_recompile); 479 // class loading support for uncommon trap 480 static void load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS); 481 482 static UnrollBlock* fetch_unroll_info_helper(JavaThread* current, int exec_mode); 483 484 static DeoptAction _unloaded_action; // == Action_reinterpret; 485 static const char* _trap_reason_name[]; 486 static const char* _trap_action_name[]; 487 488 static juint _deoptimization_hist[Reason_LIMIT][1+Action_LIMIT][BC_CASE_LIMIT]; 489 // Note: Histogram array size is 1-2 Kb. 490 491 public: 492 static void update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason); 493 }; 494 495 #endif // SHARE_RUNTIME_DEOPTIMIZATION_HPP