1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "interpreter/bytecodeStream.hpp"
 26 #include "interpreter/oopMapCache.hpp"
 27 #include "logging/log.hpp"
 28 #include "logging/logStream.hpp"
 29 #include "memory/allocation.inline.hpp"
 30 #include "memory/resourceArea.hpp"
 31 #include "oops/generateOopMap.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "runtime/atomic.hpp"
 34 #include "runtime/handles.inline.hpp"
 35 #include "runtime/safepoint.hpp"
 36 #include "runtime/signature.hpp"
 37 #include "utilities/globalCounter.inline.hpp"
 38 
 39 class OopMapCacheEntry: private InterpreterOopMap {
 40   friend class InterpreterOopMap;
 41   friend class OopMapForCacheEntry;
 42   friend class OopMapCache;
 43   friend class VerifyClosure;
 44 
 45  private:
 46   OopMapCacheEntry* _next;
 47 
 48  protected:
 49   // Initialization
 50   void fill(const methodHandle& method, int bci);
 51   // fills the bit mask for native calls
 52   void fill_for_native(const methodHandle& method);
 53   void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
 54 
 55   // Deallocate bit masks and initialize fields
 56   void flush();
 57 
 58   static void deallocate(OopMapCacheEntry* const entry);
 59 
 60  private:
 61   void allocate_bit_mask();   // allocates the bit mask on C heap f necessary
 62   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
 63   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
 64 
 65  public:
 66   OopMapCacheEntry() : InterpreterOopMap() {
 67     _next = nullptr;
 68   }
 69 };
 70 
 71 
 72 // Implementation of OopMapForCacheEntry
 73 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
 74 
 75 class OopMapForCacheEntry: public GenerateOopMap {
 76   OopMapCacheEntry *_entry;
 77   int               _bci;
 78   int               _stack_top;
 79 
 80   virtual bool report_results() const     { return false; }
 81   virtual bool possible_gc_point          (BytecodeStream *bcs);
 82   virtual void fill_stackmap_prolog       (int nof_gc_points);
 83   virtual void fill_stackmap_epilog       ();
 84   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
 85                                            CellTypeState* vars,
 86                                            CellTypeState* stack,
 87                                            int stack_top);
 88   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars);
 89 
 90  public:
 91   OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry);
 92 
 93   // Computes stack map for (method,bci) and initialize entry
 94   bool compute_map(Thread* current);
 95   int  size();
 96 };
 97 
 98 
 99 OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
100   _bci       = bci;
101   _entry     = entry;
102   _stack_top = -1;
103 }
104 
105 
106 bool OopMapForCacheEntry::compute_map(Thread* current) {
107   assert(!method()->is_native(), "cannot compute oop map for native methods");
108   // First check if it is a method where the stackmap is always empty
109   if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
110     _entry->set_mask_size(0);
111   } else {
112     ResourceMark rm;
113     if (!GenerateOopMap::compute_map(current)) {
114       fatal("Unrecoverable verification or out-of-memory error");
115       return false;
116     }
117     result_for_basicblock(_bci);
118   }
119   return true;
120 }
121 
122 
123 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {
124   return false; // We are not reporting any result. We call result_for_basicblock directly
125 }
126 
127 
128 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {
129   // Do nothing
130 }
131 
132 
133 void OopMapForCacheEntry::fill_stackmap_epilog() {
134   // Do nothing
135 }
136 
137 
138 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {
139   // Do nothing
140 }
141 
142 
143 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
144                                                     CellTypeState* vars,
145                                                     CellTypeState* stack,
146                                                     int stack_top) {
147   // Only interested in one specific bci
148   if (bcs->bci() == _bci) {
149     _entry->set_mask(vars, stack, stack_top);
150     _stack_top = stack_top;
151   }
152 }
153 
154 
155 int OopMapForCacheEntry::size() {
156   assert(_stack_top != -1, "compute_map must be called first");
157   return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;
158 }
159 
160 
161 // Implementation of InterpreterOopMap and OopMapCacheEntry
162 
163 class VerifyClosure : public OffsetClosure {
164  private:
165   OopMapCacheEntry* _entry;
166   bool              _failed;
167 
168  public:
169   VerifyClosure(OopMapCacheEntry* entry)         { _entry = entry; _failed = false; }
170   void offset_do(int offset)                     { if (!_entry->is_oop(offset)) _failed = true; }
171   bool failed() const                            { return _failed; }
172 };
173 
174 InterpreterOopMap::InterpreterOopMap() {
175   initialize();
176 }
177 
178 InterpreterOopMap::~InterpreterOopMap() {
179   if (has_valid_mask() && mask_size() > small_mask_limit) {
180     assert(_bit_mask[0] != 0, "should have pointer to C heap");
181     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
182   }
183 }
184 
185 bool InterpreterOopMap::is_empty() const {
186   bool result = _method == nullptr;
187   assert(_method != nullptr || (_bci == 0 &&
188     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
189     _bit_mask[0] == 0), "Should be completely empty");
190   return result;
191 }
192 
193 void InterpreterOopMap::initialize() {
194   _method    = nullptr;
195   _mask_size = USHRT_MAX;  // This value should cause a failure quickly
196   _bci       = 0;
197   _expression_stack_size = 0;
198   _num_oops  = 0;
199   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
200 }
201 
202 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const {
203   int n = number_of_entries();
204   int word_index = 0;
205   uintptr_t value = 0;
206   uintptr_t mask = 0;
207   // iterate over entries
208   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
209     // get current word
210     if (mask == 0) {
211       value = bit_mask()[word_index++];
212       mask = 1;
213     }
214     // test for oop
215     if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);
216   }
217 }
218 
219 void InterpreterOopMap::print() const {
220   int n = number_of_entries();
221   tty->print("oop map for ");
222   method()->print_value();
223   tty->print(" @ %d = [%d] { ", bci(), n);
224   for (int i = 0; i < n; i++) {
225     if (is_dead(i)) tty->print("%d+ ", i);
226     else
227     if (is_oop(i)) tty->print("%d ", i);
228   }
229   tty->print_cr("}");
230 }
231 
232 class MaskFillerForNative: public NativeSignatureIterator {
233  private:
234   uintptr_t * _mask;                             // the bit mask to be filled
235   int         _size;                             // the mask size in bits
236   int         _num_oops;
237 
238   void set_one(int i) {
239     _num_oops++;
240     i *= InterpreterOopMap::bits_per_entry;
241     assert(0 <= i && i < _size, "offset out of bounds");
242     _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
243   }
244 
245  public:
246   void pass_byte()                               { /* ignore */ }
247   void pass_short()                              { /* ignore */ }
248   void pass_int()                                { /* ignore */ }
249   void pass_long()                               { /* ignore */ }
250   void pass_float()                              { /* ignore */ }
251   void pass_double()                             { /* ignore */ }
252   void pass_object()                             { set_one(offset()); }
253 
254   MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
255     _mask   = mask;
256     _size   = size;
257     _num_oops = 0;
258     // initialize with 0
259     int i = (size + BitsPerWord - 1) / BitsPerWord;
260     while (i-- > 0) _mask[i] = 0;
261   }
262 
263   void generate() {
264     iterate();
265   }
266 
267   int num_oops() { return _num_oops; }
268 };
269 
270 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
271   // Check mask includes map
272   VerifyClosure blk(this);
273   iterate_oop(&blk);
274   if (blk.failed()) return false;
275 
276   // Check if map is generated correctly
277   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
278   const bool log = log_is_enabled(Trace, interpreter, oopmap);
279   LogStream st(Log(interpreter, oopmap)::trace());
280 
281   if (log) st.print("Locals (%d): ", max_locals);
282   for(int i = 0; i < max_locals; i++) {
283     bool v1 = is_oop(i)               ? true : false;
284     bool v2 = vars[i].is_reference();
285     assert(v1 == v2, "locals oop mask generation error");
286     if (log) st.print("%d", v1 ? 1 : 0);
287   }
288   if (log) st.cr();
289 
290   if (log) st.print("Stack (%d): ", stack_top);
291   for(int j = 0; j < stack_top; j++) {
292     bool v1 = is_oop(max_locals + j)  ? true : false;
293     bool v2 = stack[j].is_reference();
294     assert(v1 == v2, "stack oop mask generation error");
295     if (log) st.print("%d", v1 ? 1 : 0);
296   }
297   if (log) st.cr();
298   return true;
299 }
300 
301 void OopMapCacheEntry::allocate_bit_mask() {
302   if (mask_size() > small_mask_limit) {
303     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
304     _bit_mask[0] = (intptr_t)
305       NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);
306   }
307 }
308 
309 void OopMapCacheEntry::deallocate_bit_mask() {
310   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
311     assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
312       "This bit mask should not be in the resource area");
313     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
314     debug_only(_bit_mask[0] = 0;)
315   }
316 }
317 
318 
319 void OopMapCacheEntry::fill_for_native(const methodHandle& mh) {
320   assert(mh->is_native(), "method must be native method");
321   set_mask_size(mh->size_of_parameters() * bits_per_entry);
322   allocate_bit_mask();
323   // fill mask for parameters
324   MaskFillerForNative mf(mh, bit_mask(), mask_size());
325   mf.generate();
326   _num_oops = mf.num_oops();
327 }
328 
329 
330 void OopMapCacheEntry::fill(const methodHandle& method, int bci) {
331   // Flush entry to deallocate an existing entry
332   flush();
333   set_method(method());
334   set_bci(checked_cast<unsigned short>(bci));  // bci is always u2
335   if (method->is_native()) {
336     // Native method activations have oops only among the parameters and one
337     // extra oop following the parameters (the mirror for static native methods).
338     fill_for_native(method);
339   } else {
340     OopMapForCacheEntry gen(method, bci, this);
341     if (!gen.compute_map(Thread::current())) {
342       fatal("Unrecoverable verification or out-of-memory error");
343     }
344   }
345 }
346 
347 
348 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {
349   // compute bit mask size
350   int max_locals = method()->max_locals();
351   int n_entries = max_locals + stack_top;
352   set_mask_size(n_entries * bits_per_entry);
353   allocate_bit_mask();
354   set_expression_stack_size(stack_top);
355 
356   // compute bits
357   int word_index = 0;
358   uintptr_t value = 0;
359   uintptr_t mask = 1;
360 
361   _num_oops = 0;
362   CellTypeState* cell = vars;
363   for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
364     // store last word
365     if (mask == 0) {
366       bit_mask()[word_index++] = value;
367       value = 0;
368       mask = 1;
369     }
370 
371     // switch to stack when done with locals
372     if (entry_index == max_locals) {
373       cell = stack;
374     }
375 
376     // set oop bit
377     if (cell->is_reference()) {
378       value |= (mask << oop_bit_number );
379       _num_oops++;
380     }
381 
382     // set dead bit
383     if (!cell->is_live()) {
384       value |= (mask << dead_bit_number);
385       assert(!cell->is_reference(), "dead value marked as oop");
386     }
387   }
388 
389   // make sure last word is stored
390   bit_mask()[word_index] = value;
391 
392   // verify bit mask
393   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
394 }
395 
396 void OopMapCacheEntry::flush() {
397   deallocate_bit_mask();
398   initialize();
399 }
400 
401 void OopMapCacheEntry::deallocate(OopMapCacheEntry* const entry) {
402   entry->flush();
403   FREE_C_HEAP_OBJ(entry);
404 }
405 
406 // Implementation of OopMapCache
407 
408 void InterpreterOopMap::copy_from(const OopMapCacheEntry* src) {
409   // The expectation is that this InterpreterOopMap is recently created
410   // and empty. It is used to get a copy of a cached entry.
411   assert(!has_valid_mask(), "InterpreterOopMap object can only be filled once");
412   assert(src->has_valid_mask(), "Cannot copy entry with an invalid mask");
413 
414   set_method(src->method());
415   set_bci(src->bci());
416   set_mask_size(src->mask_size());
417   set_expression_stack_size(src->expression_stack_size());
418   _num_oops = src->num_oops();
419 
420   // Is the bit mask contained in the entry?
421   if (src->mask_size() <= small_mask_limit) {
422     memcpy(_bit_mask, src->_bit_mask, mask_word_size() * BytesPerWord);
423   } else {
424     _bit_mask[0] = (uintptr_t) NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);
425     memcpy((void*) _bit_mask[0], (void*) src->_bit_mask[0], mask_word_size() * BytesPerWord);
426   }
427 }
428 
429 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const {
430   // We use method->code_size() rather than method->identity_hash() below since
431   // the mark may not be present if a pointer to the method is already reversed.
432   return   ((unsigned int) bci)
433          ^ ((unsigned int) method->max_locals()         << 2)
434          ^ ((unsigned int) method->code_size()          << 4)
435          ^ ((unsigned int) method->size_of_parameters() << 6);
436 }
437 
438 OopMapCacheEntry* volatile OopMapCache::_old_entries = nullptr;
439 
440 OopMapCache::OopMapCache() {
441   for(int i = 0; i < size; i++) _array[i] = nullptr;
442 }
443 
444 
445 OopMapCache::~OopMapCache() {
446   // Deallocate oop maps that are allocated out-of-line
447   flush();
448 }
449 
450 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
451   return Atomic::load_acquire(&(_array[i % size]));
452 }
453 
454 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
455   return Atomic::cmpxchg(&_array[i % size], old, entry) == old;
456 }
457 
458 void OopMapCache::flush() {
459   for (int i = 0; i < size; i++) {
460     OopMapCacheEntry* entry = _array[i];
461     if (entry != nullptr) {
462       _array[i] = nullptr;  // no barrier, only called in OopMapCache destructor
463       OopMapCacheEntry::deallocate(entry);
464     }
465   }
466 }
467 
468 void OopMapCache::flush_obsolete_entries() {
469   assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
470   for (int i = 0; i < size; i++) {
471     OopMapCacheEntry* entry = _array[i];
472     if (entry != nullptr && !entry->is_empty() && entry->method()->is_old()) {
473       // Cache entry is occupied by an old redefined method and we don't want
474       // to pin it down so flush the entry.
475       if (log_is_enabled(Debug, redefine, class, oopmap)) {
476         ResourceMark rm;
477         log_debug(redefine, class, interpreter, oopmap)
478           ("flush: %s(%s): cached entry @%d",
479            entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);
480       }
481       _array[i] = nullptr;
482       OopMapCacheEntry::deallocate(entry);
483     }
484   }
485 }
486 
487 // Lookup or compute/cache the entry.
488 void OopMapCache::lookup(const methodHandle& method,
489                          int bci,
490                          InterpreterOopMap* entry_for) {
491   int probe = hash_value_for(method, bci);
492 
493   if (log_is_enabled(Debug, interpreter, oopmap)) {
494     static int count = 0;
495     ResourceMark rm;
496     log_debug(interpreter, oopmap)
497           ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,
498            method()->name_and_sig_as_C_string(), probe);
499   }
500 
501   // Search hashtable for match.
502   // Need a critical section to avoid race against concurrent reclamation.
503   {
504     GlobalCounter::CriticalSection cs(Thread::current());
505     for (int i = 0; i < probe_depth; i++) {
506       OopMapCacheEntry *entry = entry_at(probe + i);
507       if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) {
508         entry_for->copy_from(entry);
509         assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
510         log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
511         return;
512       }
513     }
514   }
515 
516   // Entry is not in hashtable.
517   // Compute entry
518 
519   OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
520   tmp->initialize();
521   tmp->fill(method, bci);
522   entry_for->copy_from(tmp);
523 
524   if (method->should_not_be_cached()) {
525     // It is either not safe or not a good idea to cache this Method*
526     // at this time. We give the caller of lookup() a copy of the
527     // interesting info via parameter entry_for, but we don't add it to
528     // the cache. See the gory details in Method*.cpp.
529     OopMapCacheEntry::deallocate(tmp);
530     return;
531   }
532 
533   // First search for an empty slot
534   for (int i = 0; i < probe_depth; i++) {
535     OopMapCacheEntry* entry = entry_at(probe + i);
536     if (entry == nullptr) {
537       if (put_at(probe + i, tmp, nullptr)) {
538         assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
539         return;
540       }
541     }
542   }
543 
544   log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");
545 
546   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
547   // where the first entry in the collision array is replaced with the new one.
548   OopMapCacheEntry* old = entry_at(probe + 0);
549   if (put_at(probe + 0, tmp, old)) {
550     // Cannot deallocate old entry on the spot: it can still be used by readers
551     // that got a reference to it before we were able to replace it in the map.
552     // Instead of synchronizing on GlobalCounter here and incurring heavy thread
553     // walk, we do this clean up out of band.
554     enqueue_for_cleanup(old);
555   } else {
556     OopMapCacheEntry::deallocate(tmp);
557   }
558 
559   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
560   return;
561 }
562 
563 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
564   while (true) {
565     OopMapCacheEntry* head = Atomic::load(&_old_entries);
566     entry->_next = head;
567     if (Atomic::cmpxchg(&_old_entries, head, entry) == head) {
568       // Enqueued successfully.
569       break;
570     }
571   }
572 
573   if (log_is_enabled(Debug, interpreter, oopmap)) {
574     ResourceMark rm;
575     log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
576                           entry->method()->name_and_sig_as_C_string(), entry->bci());
577   }
578 }
579 
580 bool OopMapCache::has_cleanup_work() {
581   return Atomic::load(&_old_entries) != nullptr;
582 }
583 
584 void OopMapCache::try_trigger_cleanup() {
585   // See we can take the lock for the notification without blocking.
586   // This allows triggering the cleanup from GC paths, that can hold
587   // the service lock for e.g. oop iteration in service thread.
588   if (has_cleanup_work() && Service_lock->try_lock_without_rank_check()) {
589     Service_lock->notify_all();
590     Service_lock->unlock();
591   }
592 }
593 
594 void OopMapCache::cleanup() {
595   OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr);
596   if (entry == nullptr) {
597     // No work.
598     return;
599   }
600 
601   // About to delete the entries than might still be accessed by other threads
602   // on lookup path. Need to sync up with them before proceeding.
603   GlobalCounter::write_synchronize();
604 
605   while (entry != nullptr) {
606     if (log_is_enabled(Debug, interpreter, oopmap)) {
607       ResourceMark rm;
608       log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
609                           entry->method()->name_and_sig_as_C_string(), entry->bci());
610     }
611     OopMapCacheEntry* next = entry->_next;
612     OopMapCacheEntry::deallocate(entry);
613     entry = next;
614   }
615 }
616 
617 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
618   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
619   OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
620   tmp->initialize();
621   tmp->fill(method, bci);
622   if (tmp->has_valid_mask()) {
623     entry->copy_from(tmp);
624   }
625   OopMapCacheEntry::deallocate(tmp);
626 }