< prev index next >

src/hotspot/share/runtime/deoptimization.hpp

Print this page

140   };
141 
142   // What action must be taken by the runtime?
143   // Note: Keep this enum in sync. with Deoptimization::_trap_action_name.
144   enum DeoptAction {
145     Action_none,                  // just interpret, do not invalidate nmethod
146     Action_maybe_recompile,       // recompile the nmethod; need not invalidate
147     Action_reinterpret,           // invalidate the nmethod, reset IC, maybe recompile
148     Action_make_not_entrant,      // invalidate the nmethod, recompile (probably)
149     Action_make_not_compilable,   // invalidate the nmethod and do not compile
150     Action_LIMIT
151   };
152 
153   enum {
154     _action_bits = 3,
155     _reason_bits = 5,
156     _debug_id_bits = 23,
157     _action_shift = 0,
158     _reason_shift = _action_shift+_action_bits,
159     _debug_id_shift = _reason_shift+_reason_bits,
160     BC_CASE_LIMIT = PRODUCT_ONLY(1) NOT_PRODUCT(4) // for _deoptimization_hist
161   };
162 
163   enum UnpackType {
164     Unpack_deopt                = 0, // normal deoptimization, use pc computed in unpack_vframe_on_stack
165     Unpack_exception            = 1, // exception is pending
166     Unpack_uncommon_trap        = 2, // redo last byte code (C2 only)
167     Unpack_reexecute            = 3, // reexecute bytecode (C1 only)
168     Unpack_none                 = 4, // not deoptimizing the frame, just reallocating/relocking for JVMTI
169     Unpack_LIMIT                = 5
170   };
171 
172 #if INCLUDE_JVMCI
173   // Can reconstruct virtualized unsafe large accesses to byte arrays.
174   static const int _support_large_access_byte_array_virtualization = 1;
175 #endif
176 
177   // Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live
178   // activations using those nmethods. Scan of the code cache is done to
179   // find all marked nmethods and they are made not_entrant.
180   static void deoptimize_all_marked();

312   // The top most compiler frame is converted into interpreter frames
313   static UnrollBlock* uncommon_trap(JavaThread* current, jint unloaded_class_index, jint exec_mode);
314   // Helper routine that enters the VM and may block
315   static void uncommon_trap_inner(JavaThread* current, jint unloaded_class_index);
316 
317   //** Deoptimizes the frame identified by id.
318   // Only called from VMDeoptimizeFrame
319   // @argument thread.     Thread where stub_frame resides.
320   // @argument id.         id of frame that should be deoptimized.
321   static void deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason);
322 
323   // if thread is not the current thread then execute
324   // VM_DeoptimizeFrame otherwise deoptimize directly.
325   static void deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason);
326   static void deoptimize_frame(JavaThread* thread, intptr_t* id);
327 
328   // Statistics
329   static void gather_statistics(DeoptReason reason, DeoptAction action,
330                                 Bytecodes::Code bc = Bytecodes::_illegal);
331   static void print_statistics();

332 
333   // How much room to adjust the last frame's SP by, to make space for
334   // the callee's interpreter frame (which expects locals to be next to
335   // incoming arguments)
336   static int last_frame_adjust(int callee_parameters, int callee_locals);
337 
338   // trap_request codes
339   static DeoptReason trap_request_reason(int trap_request) {
340     if (trap_request < 0)
341       return (DeoptReason)
342         ((~(trap_request) >> _reason_shift) & right_n_bits(_reason_bits));
343     else
344       // standard reason for unloaded CP entry
345       return Reason_unloaded;
346   }
347   static DeoptAction trap_request_action(int trap_request) {
348     if (trap_request < 0)
349       return (DeoptAction)
350         ((~(trap_request) >> _action_shift) & right_n_bits(_action_bits));
351     else

472 #endif
473                                                Method* compiled_method,
474                                                //outputs:
475                                                uint& ret_this_trap_count,
476                                                bool& ret_maybe_prior_trap,
477                                                bool& ret_maybe_prior_recompile);
478   // class loading support for uncommon trap
479   static void load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS);
480 
481   static UnrollBlock* fetch_unroll_info_helper(JavaThread* current, int exec_mode);
482 
483   static DeoptAction _unloaded_action; // == Action_reinterpret;
484   static const char* _trap_reason_name[];
485   static const char* _trap_action_name[];
486 
487   static juint _deoptimization_hist[Reason_LIMIT][1+Action_LIMIT][BC_CASE_LIMIT];
488   // Note:  Histogram array size is 1-2 Kb.
489 
490  public:
491   static void update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason);



492 };
493 
494 #endif // SHARE_RUNTIME_DEOPTIMIZATION_HPP

140   };
141 
142   // What action must be taken by the runtime?
143   // Note: Keep this enum in sync. with Deoptimization::_trap_action_name.
144   enum DeoptAction {
145     Action_none,                  // just interpret, do not invalidate nmethod
146     Action_maybe_recompile,       // recompile the nmethod; need not invalidate
147     Action_reinterpret,           // invalidate the nmethod, reset IC, maybe recompile
148     Action_make_not_entrant,      // invalidate the nmethod, recompile (probably)
149     Action_make_not_compilable,   // invalidate the nmethod and do not compile
150     Action_LIMIT
151   };
152 
153   enum {
154     _action_bits = 3,
155     _reason_bits = 5,
156     _debug_id_bits = 23,
157     _action_shift = 0,
158     _reason_shift = _action_shift+_action_bits,
159     _debug_id_shift = _reason_shift+_reason_bits,
160     BC_CASE_LIMIT = 4 // for _deoptimization_hist
161   };
162 
163   enum UnpackType {
164     Unpack_deopt                = 0, // normal deoptimization, use pc computed in unpack_vframe_on_stack
165     Unpack_exception            = 1, // exception is pending
166     Unpack_uncommon_trap        = 2, // redo last byte code (C2 only)
167     Unpack_reexecute            = 3, // reexecute bytecode (C1 only)
168     Unpack_none                 = 4, // not deoptimizing the frame, just reallocating/relocking for JVMTI
169     Unpack_LIMIT                = 5
170   };
171 
172 #if INCLUDE_JVMCI
173   // Can reconstruct virtualized unsafe large accesses to byte arrays.
174   static const int _support_large_access_byte_array_virtualization = 1;
175 #endif
176 
177   // Make all nmethods that are marked_for_deoptimization not_entrant and deoptimize any live
178   // activations using those nmethods. Scan of the code cache is done to
179   // find all marked nmethods and they are made not_entrant.
180   static void deoptimize_all_marked();

312   // The top most compiler frame is converted into interpreter frames
313   static UnrollBlock* uncommon_trap(JavaThread* current, jint unloaded_class_index, jint exec_mode);
314   // Helper routine that enters the VM and may block
315   static void uncommon_trap_inner(JavaThread* current, jint unloaded_class_index);
316 
317   //** Deoptimizes the frame identified by id.
318   // Only called from VMDeoptimizeFrame
319   // @argument thread.     Thread where stub_frame resides.
320   // @argument id.         id of frame that should be deoptimized.
321   static void deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason);
322 
323   // if thread is not the current thread then execute
324   // VM_DeoptimizeFrame otherwise deoptimize directly.
325   static void deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason);
326   static void deoptimize_frame(JavaThread* thread, intptr_t* id);
327 
328   // Statistics
329   static void gather_statistics(DeoptReason reason, DeoptAction action,
330                                 Bytecodes::Code bc = Bytecodes::_illegal);
331   static void print_statistics();
332   static void print_statistics_on(outputStream* st);
333 
334   // How much room to adjust the last frame's SP by, to make space for
335   // the callee's interpreter frame (which expects locals to be next to
336   // incoming arguments)
337   static int last_frame_adjust(int callee_parameters, int callee_locals);
338 
339   // trap_request codes
340   static DeoptReason trap_request_reason(int trap_request) {
341     if (trap_request < 0)
342       return (DeoptReason)
343         ((~(trap_request) >> _reason_shift) & right_n_bits(_reason_bits));
344     else
345       // standard reason for unloaded CP entry
346       return Reason_unloaded;
347   }
348   static DeoptAction trap_request_action(int trap_request) {
349     if (trap_request < 0)
350       return (DeoptAction)
351         ((~(trap_request) >> _action_shift) & right_n_bits(_action_bits));
352     else

473 #endif
474                                                Method* compiled_method,
475                                                //outputs:
476                                                uint& ret_this_trap_count,
477                                                bool& ret_maybe_prior_trap,
478                                                bool& ret_maybe_prior_recompile);
479   // class loading support for uncommon trap
480   static void load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS);
481 
482   static UnrollBlock* fetch_unroll_info_helper(JavaThread* current, int exec_mode);
483 
484   static DeoptAction _unloaded_action; // == Action_reinterpret;
485   static const char* _trap_reason_name[];
486   static const char* _trap_action_name[];
487 
488   static juint _deoptimization_hist[Reason_LIMIT][1+Action_LIMIT][BC_CASE_LIMIT];
489   // Note:  Histogram array size is 1-2 Kb.
490 
491  public:
492   static void update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason);
493 
494   static void init_counters();
495   static void print_counters_on(outputStream* st);
496 };
497 
498 #endif // SHARE_RUNTIME_DEOPTIMIZATION_HPP
< prev index next >