< prev index next >

src/hotspot/share/code/compiledMethod.hpp

Print this page




 142   friend class NMethodSweeper;
 143 
 144   void init_defaults();
 145 protected:
 146   enum MarkForDeoptimizationStatus {
 147     not_marked,
 148     deoptimize,
 149     deoptimize_noupdate
 150   };
 151 
 152   MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
 153 
 154   bool _is_far_code; // Code is far from CodeCache.
 155                      // Have to use far call instructions to call it from code in CodeCache.
 156 
 157   // set during construction
 158   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 159   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 160   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 161   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints

 162 
 163   Method*   _method;
 164   address _scopes_data_begin;
 165   // All deoptee's will resume execution at this location described by
 166   // this address.
 167   address _deopt_handler_begin;
 168   // All deoptee's at a MethodHandle call site will resume execution
 169   // at this location described by this offset.
 170   address _deopt_mh_handler_begin;
 171 
 172   PcDescContainer _pc_desc_container;
 173   ExceptionCache * volatile _exception_cache;
 174 
 175   void* _gc_data;
 176 
 177   virtual void flush() = 0;


 178 protected:
 179   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
 180   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
 181 
 182 public:
 183   // Only used by unit test.
 184   CompiledMethod() {}
 185 
 186   virtual bool is_compiled() const                { return true; }
 187 
 188   template<typename T>
 189   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
 190   template<typename T>
 191   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
 192 
 193   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 194   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 195 



 196   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 197   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 198 
 199   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
 200   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 201 
 202   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 203   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 204 
 205   enum { not_installed = -1, // in construction, only the owner doing the construction is
 206                              // allowed to advance state
 207          in_use        = 0,  // executable nmethod
 208          not_used      = 1,  // not entrant, but revivable
 209          not_entrant   = 2,  // marked for deoptimization but activations may still exist,
 210                              // will be transformed to zombie when all activations are gone
 211          unloaded      = 3,  // there should be no activations, should not be called, will be
 212                              // transformed to zombie by the sweeper, when not "locked in vm".
 213          zombie        = 4   // no activations exist, nmethod is ready for purge
 214   };
 215 


 288 
 289   virtual address stub_begin() const = 0;
 290   virtual address stub_end() const = 0;
 291   bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
 292   int stub_size() const { return stub_end() - stub_begin(); }
 293 
 294   virtual address handler_table_begin() const = 0;
 295   virtual address handler_table_end() const = 0;
 296   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 297   int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
 298 
 299   virtual address exception_begin() const = 0;
 300 
 301   virtual address nul_chk_table_begin() const = 0;
 302   virtual address nul_chk_table_end() const = 0;
 303   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 304   int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
 305 
 306   virtual oop* oop_addr_at(int index) const = 0;
 307   virtual Metadata** metadata_addr_at(int index) const = 0;
 308   virtual void    set_original_pc(const frame* fr, address pc) = 0;
 309 
 310 protected:
 311   // Exception cache support
 312   // Note: _exception_cache may be read and cleaned concurrently.
 313   ExceptionCache* exception_cache() const         { return _exception_cache; }
 314   ExceptionCache* exception_cache_acquire() const;
 315   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 316 
 317 public:
 318   address handler_for_exception_and_pc(Handle exception, address pc);
 319   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 320   void clean_exception_cache();
 321 
 322   void add_exception_cache_entry(ExceptionCache* new_entry);
 323   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 324 
 325   // MethodHandle
 326   bool is_method_handle_return(address return_pc);
 327   address deopt_mh_handler_begin() const  { return _deopt_mh_handler_begin; }
 328 
 329   address deopt_handler_begin() const { return _deopt_handler_begin; }
 330   virtual address get_original_pc(const frame* fr) = 0;
 331   // Deopt
 332   // Return true is the PC is one would expect if the frame is being deopted.
 333   inline bool is_deopt_pc(address pc);
 334   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 335   inline bool is_deopt_entry(address pc);
 336 









 337   virtual bool can_convert_to_zombie() = 0;
 338   virtual const char* compile_kind() const = 0;
 339   virtual int get_state() const = 0;
 340 
 341   const char* state() const;
 342 
 343   bool is_far_code() const { return _is_far_code; }
 344 
 345   bool inlinecache_check_contains(address addr) const {
 346     return (addr >= code_begin() && addr < verified_entry_point());
 347   }
 348 
 349   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 350 
 351   // implicit exceptions support
 352   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
 353   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
 354 
 355   static address get_deopt_original_pc(const frame* fr);
 356 


 389   Method* attached_method(address call_pc);
 390   Method* attached_method_before_pc(address pc);
 391 
 392   virtual void metadata_do(MetadataClosure* f) = 0;
 393 
 394   // GC support
 395  protected:
 396   address oops_reloc_begin() const;
 397 
 398  private:
 399   bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
 400 
 401  public:
 402   // GC unloading support
 403   // Cleans unloaded klasses and unloaded nmethods in inline caches
 404 
 405   virtual bool is_unloading() = 0;
 406 
 407   bool unload_nmethod_caches(bool class_unloading_occurred);
 408   virtual void do_unloading(bool unloading_occurred) = 0;






 409 
 410 private:
 411   PcDesc* find_pc_desc(address pc, bool approximate) {
 412     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
 413   }
 414 };
 415 
 416 #endif // SHARE_CODE_COMPILEDMETHOD_HPP


 142   friend class NMethodSweeper;
 143 
 144   void init_defaults();
 145 protected:
 146   enum MarkForDeoptimizationStatus {
 147     not_marked,
 148     deoptimize,
 149     deoptimize_noupdate
 150   };
 151 
 152   MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
 153 
 154   bool _is_far_code; // Code is far from CodeCache.
 155                      // Have to use far call instructions to call it from code in CodeCache.
 156 
 157   // set during construction
 158   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 159   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 160   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 161   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 162   unsigned int _has_monitors:1;              // Fastpath monitor detection for continuations
 163 
 164   Method*   _method;
 165   address _scopes_data_begin;
 166   // All deoptee's will resume execution at this location described by
 167   // this address.
 168   address _deopt_handler_begin;
 169   // All deoptee's at a MethodHandle call site will resume execution
 170   // at this location described by this offset.
 171   address _deopt_mh_handler_begin;
 172 
 173   PcDescContainer _pc_desc_container;
 174   ExceptionCache * volatile _exception_cache;
 175 
 176   void* _gc_data;
 177 
 178   virtual void flush() = 0;
 179 
 180   oop* _keepalive; // allocated and maintained by Continuation::weak_storage().
 181 protected:
 182   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
 183   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
 184 
 185 public:
 186   // Only used by unit test.
 187   CompiledMethod() {}
 188 


 189   template<typename T>
 190   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
 191   template<typename T>
 192   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
 193 
 194   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 195   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 196 
 197   bool  has_monitors() const                      { return _has_monitors; }
 198   void  set_has_monitors(bool z)                  { _has_monitors = z; }
 199 
 200   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 201   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 202 
 203   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
 204   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 205 
 206   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 207   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 208 
 209   enum { not_installed = -1, // in construction, only the owner doing the construction is
 210                              // allowed to advance state
 211          in_use        = 0,  // executable nmethod
 212          not_used      = 1,  // not entrant, but revivable
 213          not_entrant   = 2,  // marked for deoptimization but activations may still exist,
 214                              // will be transformed to zombie when all activations are gone
 215          unloaded      = 3,  // there should be no activations, should not be called, will be
 216                              // transformed to zombie by the sweeper, when not "locked in vm".
 217          zombie        = 4   // no activations exist, nmethod is ready for purge
 218   };
 219 


 292 
 293   virtual address stub_begin() const = 0;
 294   virtual address stub_end() const = 0;
 295   bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
 296   int stub_size() const { return stub_end() - stub_begin(); }
 297 
 298   virtual address handler_table_begin() const = 0;
 299   virtual address handler_table_end() const = 0;
 300   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 301   int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
 302 
 303   virtual address exception_begin() const = 0;
 304 
 305   virtual address nul_chk_table_begin() const = 0;
 306   virtual address nul_chk_table_end() const = 0;
 307   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 308   int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
 309 
 310   virtual oop* oop_addr_at(int index) const = 0;
 311   virtual Metadata** metadata_addr_at(int index) const = 0;

 312 
 313 protected:
 314   // Exception cache support
 315   // Note: _exception_cache may be read and cleaned concurrently.
 316   ExceptionCache* exception_cache() const         { return _exception_cache; }
 317   ExceptionCache* exception_cache_acquire() const;
 318   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 319 
 320 public:
 321   address handler_for_exception_and_pc(Handle exception, address pc);
 322   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 323   void clean_exception_cache();
 324 
 325   void add_exception_cache_entry(ExceptionCache* new_entry);
 326   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 327 
 328   // MethodHandle
 329   bool is_method_handle_return(address return_pc);
 330   address deopt_mh_handler_begin() const  { return _deopt_mh_handler_begin; }
 331 
 332   address deopt_handler_begin() const { return _deopt_handler_begin; }
 333   address* deopt_handler_begin_addr()        { return &_deopt_handler_begin; }
 334   // Deopt
 335   // Return true is the PC is one would expect if the frame is being deopted.
 336   inline bool is_deopt_pc(address pc);
 337   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 338   inline bool is_deopt_entry(address pc);
 339 
 340   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 341   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 342   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 343 
 344   virtual int orig_pc_offset() = 0;
 345 private:
 346   address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); };
 347 
 348 public:
 349   virtual bool can_convert_to_zombie() = 0;
 350   virtual const char* compile_kind() const = 0;
 351   virtual int get_state() const = 0;
 352 
 353   const char* state() const;
 354 
 355   bool is_far_code() const { return _is_far_code; }
 356 
 357   bool inlinecache_check_contains(address addr) const {
 358     return (addr >= code_begin() && addr < verified_entry_point());
 359   }
 360 
 361   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 362 
 363   // implicit exceptions support
 364   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
 365   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
 366 
 367   static address get_deopt_original_pc(const frame* fr);
 368 


 401   Method* attached_method(address call_pc);
 402   Method* attached_method_before_pc(address pc);
 403 
 404   virtual void metadata_do(MetadataClosure* f) = 0;
 405 
 406   // GC support
 407  protected:
 408   address oops_reloc_begin() const;
 409 
 410  private:
 411   bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
 412 
 413  public:
 414   // GC unloading support
 415   // Cleans unloaded klasses and unloaded nmethods in inline caches
 416 
 417   virtual bool is_unloading() = 0;
 418 
 419   bool unload_nmethod_caches(bool class_unloading_occurred);
 420   virtual void do_unloading(bool unloading_occurred) = 0;
 421 
 422   bool is_on_continuation_stack();
 423 
 424   oop* get_keepalive();
 425   oop* set_keepalive(oop* keepalive);
 426   bool clear_keepalive(oop* old);
 427 
 428 private:
 429   PcDesc* find_pc_desc(address pc, bool approximate) {
 430     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
 431   }
 432 };
 433 
 434 #endif // SHARE_CODE_COMPILEDMETHOD_HPP
< prev index next >