1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_CODE_COMPILEDMETHOD_HPP
  26 #define SHARE_CODE_COMPILEDMETHOD_HPP
  27 
  28 #include "code/codeBlob.hpp"
  29 #include "code/pcDesc.hpp"
  30 #include "oops/metadata.hpp"
  31 
  32 class Dependencies;
  33 class ExceptionHandlerTable;
  34 class ImplicitExceptionTable;
  35 class AbstractCompiler;
  36 class xmlStream;
  37 class CompiledStaticCall;
  38 class NativeCallWrapper;
  39 class ScopeDesc;
  40 class CompiledIC;
  41 class MetadataClosure;
  42 
  43 // This class is used internally by nmethods, to cache
  44 // exception/pc/handler information.
  45 
  46 class ExceptionCache : public CHeapObj<mtCode> {
  47   friend class VMStructs;
  48  private:
  49   enum { cache_size = 16 };
  50   Klass*   _exception_type;
  51   address  _pc[cache_size];
  52   address  _handler[cache_size];
  53   volatile int _count;
  54   ExceptionCache* volatile _next;
  55   ExceptionCache* _purge_list_next;
  56 
  57   inline address pc_at(int index);
  58   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
  59 
  60   inline address handler_at(int index);
  61   void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
  62 
  63   inline int count();
  64   // increment_count is only called under lock, but there may be concurrent readers.
  65   void increment_count();
  66 
  67  public:
  68 
  69   ExceptionCache(Handle exception, address pc, address handler);
  70 
  71   Klass*    exception_type()                { return _exception_type; }
  72   ExceptionCache* next();
  73   void      set_next(ExceptionCache *ec);
  74   ExceptionCache* purge_list_next()                 { return _purge_list_next; }
  75   void      set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
  76 
  77   address match(Handle exception, address pc);
  78   bool    match_exception_with_space(Handle exception) ;
  79   address test_address(address addr);
  80   bool    add_address_and_handler(address addr, address handler) ;
  81 };
  82 
  83 class nmethod;
  84 
  85 // cache pc descs found in earlier inquiries
  86 class PcDescCache {
  87   friend class VMStructs;
  88  private:
  89   enum { cache_size = 4 };
  90   // The array elements MUST be volatile! Several threads may modify
  91   // and read from the cache concurrently. find_pc_desc_internal has
  92   // returned wrong results. C++ compiler (namely xlC12) may duplicate
  93   // C++ field accesses if the elements are not volatile.
  94   typedef PcDesc* PcDescPtr;
  95   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
  96  public:
  97   PcDescCache() { debug_only(_pc_descs[0] = NULL); }
  98   void    reset_to(PcDesc* initial_pc_desc);
  99   PcDesc* find_pc_desc(int pc_offset, bool approximate);
 100   void    add_pc_desc(PcDesc* pc_desc);
 101   PcDesc* last_pc_desc() { return _pc_descs[0]; }
 102 };
 103 
 104 class PcDescSearch {
 105 private:
 106   address _code_begin;
 107   PcDesc* _lower;
 108   PcDesc* _upper;
 109 public:
 110   PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
 111     _code_begin(code), _lower(lower), _upper(upper)
 112   {
 113   }
 114 
 115   address code_begin() const { return _code_begin; }
 116   PcDesc* scopes_pcs_begin() const { return _lower; }
 117   PcDesc* scopes_pcs_end() const { return _upper; }
 118 };
 119 
 120 class PcDescContainer {
 121 private:
 122   PcDescCache _pc_desc_cache;
 123 public:
 124   PcDescContainer() {}
 125 
 126   PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
 127   void    reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
 128 
 129   PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
 130     address base_address = search.code_begin();
 131     PcDesc* desc = _pc_desc_cache.last_pc_desc();
 132     if (desc != NULL && desc->pc_offset() == pc - base_address) {
 133       return desc;
 134     }
 135     return find_pc_desc_internal(pc, approximate, search);
 136   }
 137 };
 138 
 139 
 140 class CompiledMethod : public CodeBlob {
 141   friend class VMStructs;
 142   friend class NMethodSweeper;
 143 
 144   void init_defaults();
 145 protected:
 146   enum MarkForDeoptimizationStatus {
 147     not_marked,
 148     deoptimize,
 149     deoptimize_noupdate
 150   };
 151 
 152   MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
 153 
 154   bool _is_far_code; // Code is far from CodeCache.
 155                      // Have to use far call instructions to call it from code in CodeCache.
 156 
 157   // set during construction
 158   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
 159   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
 160   unsigned int _lazy_critical_native:1;      // Lazy JNI critical native
 161   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 162   unsigned int _has_monitors:1;              // Fastpath monitor detection for continuations
 163 
 164   Method*   _method;
 165   address _scopes_data_begin;
 166   // All deoptee's will resume execution at this location described by
 167   // this address.
 168   address _deopt_handler_begin;
 169   // All deoptee's at a MethodHandle call site will resume execution
 170   // at this location described by this offset.
 171   address _deopt_mh_handler_begin;
 172 
 173   PcDescContainer _pc_desc_container;
 174   ExceptionCache * volatile _exception_cache;
 175 
 176   void* _gc_data;
 177 
 178   virtual void flush() = 0;
 179 
 180   oop* _keepalive; // allocated and maintained by Continuation::weak_storage().
 181 protected:
 182   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
 183   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
 184 
 185 public:
 186   // Only used by unit test.
 187   CompiledMethod() {}
 188 
 189   template<typename T>
 190   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
 191   template<typename T>
 192   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
 193 
 194   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
 195   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
 196 
 197   bool  has_monitors() const                      { return _has_monitors; }
 198   void  set_has_monitors(bool z)                  { _has_monitors = z; }
 199 
 200   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
 201   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 202 
 203   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
 204   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 205 
 206   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
 207   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
 208 
 209   enum { not_installed = -1, // in construction, only the owner doing the construction is
 210                              // allowed to advance state
 211          in_use        = 0,  // executable nmethod
 212          not_used      = 1,  // not entrant, but revivable
 213          not_entrant   = 2,  // marked for deoptimization but activations may still exist,
 214                              // will be transformed to zombie when all activations are gone
 215          unloaded      = 3,  // there should be no activations, should not be called, will be
 216                              // transformed to zombie by the sweeper, when not "locked in vm".
 217          zombie        = 4   // no activations exist, nmethod is ready for purge
 218   };
 219 
 220   virtual bool  is_in_use() const = 0;
 221   virtual int   comp_level() const = 0;
 222   virtual int   compile_id() const = 0;
 223 
 224   virtual address verified_entry_point() const = 0;
 225   virtual void log_identity(xmlStream* log) const = 0;
 226   virtual void log_state_change() const = 0;
 227   virtual bool make_not_used() = 0;
 228   virtual bool make_not_entrant() = 0;
 229   virtual bool make_entrant() = 0;
 230   virtual address entry_point() const = 0;
 231   virtual bool make_zombie() = 0;
 232   virtual bool is_osr_method() const = 0;
 233   virtual int osr_entry_bci() const = 0;
 234   Method* method() const                          { return _method; }
 235   virtual void print_pcs() = 0;
 236   bool is_native_method() const { return _method != NULL && _method->is_native(); }
 237   bool is_java_method() const { return _method != NULL && !_method->is_native(); }
 238 
 239   // ScopeDesc retrieval operation
 240   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
 241   // pc_desc_near returns the first PcDesc at or after the given pc.
 242   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
 243 
 244   // ScopeDesc for an instruction
 245   ScopeDesc* scope_desc_at(address pc);
 246   ScopeDesc* scope_desc_near(address pc);
 247 
 248   bool is_at_poll_return(address pc);
 249   bool is_at_poll_or_poll_return(address pc);
 250 
 251   bool  is_marked_for_deoptimization() const      { return _mark_for_deoptimization_status != not_marked; }
 252   void  mark_for_deoptimization(bool inc_recompile_counts = true) {
 253     _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
 254   }
 255   bool update_recompile_counts() const {
 256     // Update recompile counts when either the update is explicitly requested (deoptimize)
 257     // or the nmethod is not marked for deoptimization at all (not_marked).
 258     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
 259     return _mark_for_deoptimization_status != deoptimize_noupdate;
 260   }
 261 
 262   static bool nmethod_access_is_safe(nmethod* nm);
 263 
 264   // tells whether frames described by this nmethod can be deoptimized
 265   // note: native wrappers cannot be deoptimized.
 266   bool can_be_deoptimized() const { return is_java_method(); }
 267 
 268   virtual oop oop_at(int index) const = 0;
 269   virtual Metadata* metadata_at(int index) const = 0;
 270 
 271   address scopes_data_begin() const { return _scopes_data_begin; }
 272   virtual address scopes_data_end() const = 0;
 273   int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
 274 
 275   virtual PcDesc* scopes_pcs_begin() const = 0;
 276   virtual PcDesc* scopes_pcs_end() const = 0;
 277   int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
 278 
 279   address insts_begin() const { return code_begin(); }
 280   address insts_end() const { return stub_begin(); }
 281   // Returns true if a given address is in the 'insts' section. The method
 282   // insts_contains_inclusive() is end-inclusive.
 283   bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
 284   bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
 285 
 286   int insts_size() const { return insts_end() - insts_begin(); }
 287 
 288   virtual address consts_begin() const = 0;
 289   virtual address consts_end() const = 0;
 290   bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
 291   int consts_size() const { return consts_end() - consts_begin(); }
 292 
 293   virtual address stub_begin() const = 0;
 294   virtual address stub_end() const = 0;
 295   bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
 296   int stub_size() const { return stub_end() - stub_begin(); }
 297 
 298   virtual address handler_table_begin() const = 0;
 299   virtual address handler_table_end() const = 0;
 300   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
 301   int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
 302 
 303   virtual address exception_begin() const = 0;
 304 
 305   virtual address nul_chk_table_begin() const = 0;
 306   virtual address nul_chk_table_end() const = 0;
 307   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
 308   int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
 309 
 310   virtual oop* oop_addr_at(int index) const = 0;
 311   virtual Metadata** metadata_addr_at(int index) const = 0;
 312 
 313 protected:
 314   // Exception cache support
 315   // Note: _exception_cache may be read and cleaned concurrently.
 316   ExceptionCache* exception_cache() const         { return _exception_cache; }
 317   ExceptionCache* exception_cache_acquire() const;
 318   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
 319 
 320 public:
 321   address handler_for_exception_and_pc(Handle exception, address pc);
 322   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
 323   void clean_exception_cache();
 324 
 325   void add_exception_cache_entry(ExceptionCache* new_entry);
 326   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
 327 
 328   // MethodHandle
 329   bool is_method_handle_return(address return_pc);
 330   address deopt_mh_handler_begin() const  { return _deopt_mh_handler_begin; }
 331 
 332   address deopt_handler_begin() const { return _deopt_handler_begin; }
 333   address* deopt_handler_begin_addr()        { return &_deopt_handler_begin; }
 334   // Deopt
 335   // Return true is the PC is one would expect if the frame is being deopted.
 336   inline bool is_deopt_pc(address pc);
 337   bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
 338   inline bool is_deopt_entry(address pc);
 339 
 340   // Accessor/mutator for the original pc of a frame before a frame was deopted.
 341   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
 342   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
 343 
 344   virtual int orig_pc_offset() = 0;
 345 private:
 346   address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + orig_pc_offset()); };
 347 
 348 public:
 349   virtual bool can_convert_to_zombie() = 0;
 350   virtual const char* compile_kind() const = 0;
 351   virtual int get_state() const = 0;
 352 
 353   const char* state() const;
 354 
 355   bool is_far_code() const { return _is_far_code; }
 356 
 357   bool inlinecache_check_contains(address addr) const {
 358     return (addr >= code_begin() && addr < verified_entry_point());
 359   }
 360 
 361   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
 362 
 363   // implicit exceptions support
 364   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
 365   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
 366 
 367   static address get_deopt_original_pc(const frame* fr);
 368 
 369   // Inline cache support for class unloading and nmethod unloading
 370  private:
 371   bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
 372 
 373   address continuation_for_implicit_exception(address pc, bool for_div0_check);
 374 
 375  public:
 376   // Serial version used by sweeper and whitebox test
 377   void cleanup_inline_caches(bool clean_all);
 378 
 379   virtual void clear_inline_caches();
 380   void clear_ic_callsites();
 381 
 382   // Verify and count cached icholder relocations.
 383   int  verify_icholder_relocations();
 384   void verify_oop_relocations();
 385 
 386   bool has_evol_metadata();
 387 
 388   // Fast breakpoint support. Tells if this compiled method is
 389   // dependent on the given method. Returns true if this nmethod
 390   // corresponds to the given method as well.
 391   virtual bool is_dependent_on_method(Method* dependee) = 0;
 392 
 393   virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
 394   virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
 395   virtual address call_instruction_address(address pc) const = 0;
 396 
 397   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
 398   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
 399   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
 400 
 401   Method* attached_method(address call_pc);
 402   Method* attached_method_before_pc(address pc);
 403 
 404   virtual void metadata_do(MetadataClosure* f) = 0;
 405 
 406   // GC support
 407  protected:
 408   address oops_reloc_begin() const;
 409 
 410  private:
 411   bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
 412 
 413  public:
 414   // GC unloading support
 415   // Cleans unloaded klasses and unloaded nmethods in inline caches
 416 
 417   virtual bool is_unloading() = 0;
 418 
 419   bool unload_nmethod_caches(bool class_unloading_occurred);
 420   virtual void do_unloading(bool unloading_occurred) = 0;
 421 
 422   bool is_on_continuation_stack();
 423 
 424   oop* get_keepalive();
 425   oop* set_keepalive(oop* keepalive);
 426   bool clear_keepalive(oop* old);
 427 
 428 private:
 429   PcDesc* find_pc_desc(address pc, bool approximate) {
 430     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
 431   }
 432 };
 433 
 434 #endif // SHARE_CODE_COMPILEDMETHOD_HPP