1 /*
  2  * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CODE_COMPILEDMETHOD_HPP
 26 #define SHARE_CODE_COMPILEDMETHOD_HPP
 27 
 28 #include "code/codeBlob.hpp"
 29 #include "code/pcDesc.hpp"
 30 #include "oops/metadata.hpp"
 31 #include "oops/method.hpp"
 32 
 33 class Dependencies;
 34 class ExceptionHandlerTable;
 35 class ImplicitExceptionTable;
 36 class AbstractCompiler;
 37 class xmlStream;
 38 class CompiledStaticCall;
 39 class NativeCallWrapper;
 40 class ScopeDesc;
 41 class CompiledIC;
 42 class MetadataClosure;
 43 
 44 // This class is used internally by nmethods, to cache
 45 // exception/pc/handler information.
 46 
 47 class ExceptionCache : public CHeapObj<mtCode> {
 48   friend class VMStructs;
 49  private:
 50   enum { cache_size = 16 };
 51   Klass*   _exception_type;
 52   address  _pc[cache_size];
 53   address  _handler[cache_size];
 54   volatile int _count;
 55   ExceptionCache* volatile _next;
 56   ExceptionCache* _purge_list_next;
 57 
 58   inline address pc_at(int index);
 59   void set_pc_at(int index, address a)      { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
 60 
 61   inline address handler_at(int index);
 62   void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
 63 
 64   inline int count();
 65   // increment_count is only called under lock, but there may be concurrent readers.
 66   void increment_count();
 67 
 68  public:
 69 
 70   ExceptionCache(Handle exception, address pc, address handler);
 71 
 72   Klass*    exception_type()                { return _exception_type; }
 73   ExceptionCache* next();
 74   void      set_next(ExceptionCache *ec);
 75   ExceptionCache* purge_list_next()                 { return _purge_list_next; }
 76   void      set_purge_list_next(ExceptionCache *ec) { _purge_list_next = ec; }
 77 
 78   address match(Handle exception, address pc);
 79   bool    match_exception_with_space(Handle exception) ;
 80   address test_address(address addr);
 81   bool    add_address_and_handler(address addr, address handler) ;
 82 };
 83 
 84 class nmethod;
 85 
 86 // cache pc descs found in earlier inquiries
 87 class PcDescCache {
 88   friend class VMStructs;
 89  private:
 90   enum { cache_size = 4 };
 91   // The array elements MUST be volatile! Several threads may modify
 92   // and read from the cache concurrently. find_pc_desc_internal has
 93   // returned wrong results. C++ compiler (namely xlC12) may duplicate
 94   // C++ field accesses if the elements are not volatile.
 95   typedef PcDesc* PcDescPtr;
 96   volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
 97  public:
 98   PcDescCache() { debug_only(_pc_descs[0] = nullptr); }
 99   void    reset_to(PcDesc* initial_pc_desc);
100   PcDesc* find_pc_desc(int pc_offset, bool approximate);
101   void    add_pc_desc(PcDesc* pc_desc);
102   PcDesc* last_pc_desc() { return _pc_descs[0]; }
103 };
104 
105 class PcDescSearch {
106 private:
107   address _code_begin;
108   PcDesc* _lower;
109   PcDesc* _upper;
110 public:
111   PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
112     _code_begin(code), _lower(lower), _upper(upper)
113   {
114   }
115 
116   address code_begin() const { return _code_begin; }
117   PcDesc* scopes_pcs_begin() const { return _lower; }
118   PcDesc* scopes_pcs_end() const { return _upper; }
119 };
120 
121 class PcDescContainer {
122 private:
123   PcDescCache _pc_desc_cache;
124 public:
125   PcDescContainer() {}
126 
127   PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
128   void    reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
129 
130   PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
131     address base_address = search.code_begin();
132     PcDesc* desc = _pc_desc_cache.last_pc_desc();
133     if (desc != nullptr && desc->pc_offset() == pc - base_address) {
134       return desc;
135     }
136     return find_pc_desc_internal(pc, approximate, search);
137   }
138 };
139 
140 
141 class CompiledMethod : public CodeBlob {
142   friend class VMStructs;
143   friend class DeoptimizationScope;
144   void init_defaults();
145 protected:
146   enum DeoptimizationStatus : u1 {
147     not_marked,
148     deoptimize,
149     deoptimize_noupdate,
150     deoptimize_done
151   };
152 
153   volatile DeoptimizationStatus _deoptimization_status; // Used for stack deoptimization
154   // Used to track in which deoptimize handshake this method will be deoptimized.
155   uint64_t                      _deoptimization_generation;
156 
157   // set during construction
158   unsigned int _has_unsafe_access:1;         // May fault due to unsafe access.
159   unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
160   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
161   unsigned int _has_monitors:1;              // Fastpath monitor detection for continuations
162 
163   Method*   _method;
164   address _scopes_data_begin;
165   // All deoptee's will resume execution at this location described by
166   // this address.
167   address _deopt_handler_begin;
168   // All deoptee's at a MethodHandle call site will resume execution
169   // at this location described by this offset.
170   address _deopt_mh_handler_begin;
171 
172   PcDescContainer _pc_desc_container;
173   ExceptionCache * volatile _exception_cache;
174 
175   void* _gc_data;
176 
177   virtual void purge(bool free_code_cache_data = true) = 0;
178 
179 private:
180   DeoptimizationStatus deoptimization_status() const {
181     return Atomic::load(&_deoptimization_status);
182   }
183 
184 protected:
185   CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
186   CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, bool compiled);
187 
188 public:
189   // Only used by unit test.
190   CompiledMethod() {}
191 
192   template<typename T>
193   T* gc_data() const                              { return reinterpret_cast<T*>(_gc_data); }
194   template<typename T>
195   void set_gc_data(T* gc_data)                    { _gc_data = reinterpret_cast<void*>(gc_data); }
196 
197   bool  has_unsafe_access() const                 { return _has_unsafe_access; }
198   void  set_has_unsafe_access(bool z)             { _has_unsafe_access = z; }
199 
200   bool  has_monitors() const                      { return _has_monitors; }
201   void  set_has_monitors(bool z)                  { _has_monitors = z; }
202 
203   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
204   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
205 
206   bool  has_wide_vectors() const                  { return _has_wide_vectors; }
207   void  set_has_wide_vectors(bool z)              { _has_wide_vectors = z; }
208 
209   bool  needs_stack_repair() const {
210     if (is_compiled_by_c1()) {
211       return method()->c1_needs_stack_repair();
212     } else if (is_compiled_by_c2()) {
213       return method()->c2_needs_stack_repair();
214     } else {
215       return false;
216     }
217   }
218 
219   enum : signed char { not_installed = -1, // in construction, only the owner doing the construction is
220                                            // allowed to advance state
221                        in_use        = 0,  // executable nmethod
222                        not_used      = 1,  // not entrant, but revivable
223                        not_entrant   = 2,  // marked for deoptimization but activations may still exist
224   };
225 
226   virtual bool  is_in_use() const = 0;
227   virtual int   comp_level() const = 0;
228   virtual int   compile_id() const = 0;
229 
230   virtual address verified_entry_point() const = 0;
231   virtual address verified_inline_entry_point() const = 0;
232   virtual address verified_inline_ro_entry_point() const = 0;
233   virtual void log_identity(xmlStream* log) const = 0;
234   virtual void log_state_change() const = 0;
235   virtual bool make_not_used() = 0;
236   virtual bool make_not_entrant() = 0;
237   virtual bool make_entrant() = 0;
238   virtual address entry_point() const = 0;
239   virtual address inline_entry_point() const = 0;
240   virtual bool is_osr_method() const = 0;
241   virtual int osr_entry_bci() const = 0;
242   Method* method() const                          { return _method; }
243   virtual void print_pcs() = 0;
244   bool is_native_method() const { return _method != nullptr && _method->is_native(); }
245   bool is_java_method() const { return _method != nullptr && !_method->is_native(); }
246 
247   // ScopeDesc retrieval operation
248   PcDesc* pc_desc_at(address pc)   { return find_pc_desc(pc, false); }
249   // pc_desc_near returns the first PcDesc at or after the given pc.
250   PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
251 
252   // ScopeDesc for an instruction
253   ScopeDesc* scope_desc_at(address pc);
254   ScopeDesc* scope_desc_near(address pc);
255 
256   bool is_at_poll_return(address pc);
257   bool is_at_poll_or_poll_return(address pc);
258 
259   bool  is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
260   bool  has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
261   void  set_deoptimized_done();
262 
263   virtual void  make_deoptimized() { assert(false, "not supported"); };
264 
265   bool update_recompile_counts() const {
266     // Update recompile counts when either the update is explicitly requested (deoptimize)
267     // or the nmethod is not marked for deoptimization at all (not_marked).
268     // The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
269     DeoptimizationStatus status = deoptimization_status();
270     return status != deoptimize_noupdate && status != deoptimize_done;
271   }
272 
273   // tells whether frames described by this nmethod can be deoptimized
274   // note: native wrappers cannot be deoptimized.
275   bool can_be_deoptimized() const { return is_java_method(); }
276 
277   virtual oop oop_at(int index) const = 0;
278   virtual Metadata* metadata_at(int index) const = 0;
279 
280   address scopes_data_begin() const { return _scopes_data_begin; }
281   virtual address scopes_data_end() const = 0;
282   int scopes_data_size() const { return int(scopes_data_end() - scopes_data_begin()); }
283 
284   virtual PcDesc* scopes_pcs_begin() const = 0;
285   virtual PcDesc* scopes_pcs_end() const = 0;
286   int scopes_pcs_size() const { return int((intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin()); }
287 
288   address insts_begin() const { return code_begin(); }
289   address insts_end() const { return stub_begin(); }
290   // Returns true if a given address is in the 'insts' section. The method
291   // insts_contains_inclusive() is end-inclusive.
292   bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
293   bool insts_contains_inclusive(address addr) const { return insts_begin() <= addr && addr <= insts_end(); }
294 
295   int insts_size() const { return int(insts_end() - insts_begin()); }
296 
297   virtual address consts_begin() const = 0;
298   virtual address consts_end() const = 0;
299   bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
300   int consts_size() const { return int(consts_end() - consts_begin()); }
301 
302   virtual int skipped_instructions_size() const = 0;
303 
304   virtual address stub_begin() const = 0;
305   virtual address stub_end() const = 0;
306   bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
307   int stub_size() const { return int(stub_end() - stub_begin()); }
308 
309   virtual address handler_table_begin() const = 0;
310   virtual address handler_table_end() const = 0;
311   bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
312   int handler_table_size() const { return int(handler_table_end() - handler_table_begin()); }
313 
314   virtual address exception_begin() const = 0;
315 
316   virtual address nul_chk_table_begin() const = 0;
317   virtual address nul_chk_table_end() const = 0;
318   bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
319   int nul_chk_table_size() const { return int(nul_chk_table_end() - nul_chk_table_begin()); }
320 
321   virtual oop* oop_addr_at(int index) const = 0;
322   virtual Metadata** metadata_addr_at(int index) const = 0;
323 
324 protected:
325   // Exception cache support
326   // Note: _exception_cache may be read and cleaned concurrently.
327   ExceptionCache* exception_cache() const         { return _exception_cache; }
328   ExceptionCache* exception_cache_acquire() const;
329   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
330 
331 public:
332   address handler_for_exception_and_pc(Handle exception, address pc);
333   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
334   void clean_exception_cache();
335 
336   void add_exception_cache_entry(ExceptionCache* new_entry);
337   ExceptionCache* exception_cache_entry_for_exception(Handle exception);
338 
339   // MethodHandle
340   bool is_method_handle_return(address return_pc);
341   address deopt_mh_handler_begin() const  { return _deopt_mh_handler_begin; }
342 
343   address deopt_handler_begin() const { return _deopt_handler_begin; }
344   address* deopt_handler_begin_addr() { return &_deopt_handler_begin; }
345   // Deopt
346   // Return true is the PC is one would expect if the frame is being deopted.
347   inline bool is_deopt_pc(address pc);
348   inline bool is_deopt_mh_entry(address pc);
349   inline bool is_deopt_entry(address pc);
350 
351   // Accessor/mutator for the original pc of a frame before a frame was deopted.
352   address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
353   void    set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
354 
355   virtual int orig_pc_offset() = 0;
356 
357 private:
358   address* orig_pc_addr(const frame* fr);
359 
360 public:
361   virtual const char* compile_kind() const = 0;
362   virtual int get_state() const = 0;
363 
364   const char* state() const;
365 
366   bool inlinecache_check_contains(address addr) const {
367     return (addr >= code_begin() && addr < verified_entry_point());
368   }
369 
370   void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
371 
372   // implicit exceptions support
373   address continuation_for_implicit_div0_exception(address pc) { return continuation_for_implicit_exception(pc, true); }
374   address continuation_for_implicit_null_exception(address pc) { return continuation_for_implicit_exception(pc, false); }
375 
376   static address get_deopt_original_pc(const frame* fr);
377 
378   // Inline cache support for class unloading and nmethod unloading
379  private:
380   bool cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all);
381 
382   address continuation_for_implicit_exception(address pc, bool for_div0_check);
383 
384  public:
385   // Serial version used by whitebox test
386   void cleanup_inline_caches_whitebox();
387 
388   virtual void clear_inline_caches();
389   void clear_ic_callsites();
390 
391   // Execute nmethod barrier code, as if entering through nmethod call.
392   void run_nmethod_entry_barrier();
393 
394   // Verify and count cached icholder relocations.
395   int  verify_icholder_relocations();
396   void verify_oop_relocations();
397 
398   bool has_evol_metadata();
399 
400   // Fast breakpoint support. Tells if this compiled method is
401   // dependent on the given method. Returns true if this nmethod
402   // corresponds to the given method as well.
403   virtual bool is_dependent_on_method(Method* dependee) = 0;
404 
405   virtual NativeCallWrapper* call_wrapper_at(address call) const = 0;
406   virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0;
407   virtual address call_instruction_address(address pc) const = 0;
408 
409   virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0;
410   virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0;
411   virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0;
412 
413   Method* attached_method(address call_pc);
414   Method* attached_method_before_pc(address pc);
415 
416   virtual void metadata_do(MetadataClosure* f) = 0;
417 
418   // GC support
419  protected:
420   address oops_reloc_begin() const;
421 
422  private:
423   bool static clean_ic_if_metadata_is_dead(CompiledIC *ic);
424 
425  public:
426   // GC unloading support
427   // Cleans unloaded klasses and unloaded nmethods in inline caches
428 
429   virtual bool is_unloading() = 0;
430 
431   bool unload_nmethod_caches(bool class_unloading_occurred);
432   virtual void do_unloading(bool unloading_occurred) = 0;
433 
434 private:
435   PcDesc* find_pc_desc(address pc, bool approximate) {
436     return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
437   }
438 };
439 
440 #endif // SHARE_CODE_COMPILEDMETHOD_HPP