1 /*
  2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "interpreter/bytecodeStream.hpp"
 27 #include "interpreter/oopMapCache.hpp"
 28 #include "logging/log.hpp"
 29 #include "logging/logStream.hpp"
 30 #include "memory/allocation.inline.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "oops/generateOopMap.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "runtime/atomic.hpp"
 35 #include "runtime/handles.inline.hpp"
 36 #include "runtime/safepoint.hpp"
 37 #include "runtime/signature.hpp"
 38 #include "utilities/globalCounter.inline.hpp"
 39 
 40 class OopMapCacheEntry: private InterpreterOopMap {
 41   friend class InterpreterOopMap;
 42   friend class OopMapForCacheEntry;
 43   friend class OopMapCache;
 44   friend class VerifyClosure;
 45 
 46  private:
 47   OopMapCacheEntry* _next;
 48 
 49  protected:
 50   // Initialization
 51   void fill(const methodHandle& method, int bci);
 52   // fills the bit mask for native calls
 53   void fill_for_native(const methodHandle& method);
 54   void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top);
 55 
 56   // Deallocate bit masks and initialize fields
 57   void flush();
 58 
 59   static void deallocate(OopMapCacheEntry* const entry);
 60 
 61  private:
 62   void allocate_bit_mask();   // allocates the bit mask on C heap f necessary
 63   void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary
 64   bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top);
 65 
 66  public:
 67   OopMapCacheEntry() : InterpreterOopMap() {
 68     _next = nullptr;
 69   }
 70 };
 71 
 72 
 73 // Implementation of OopMapForCacheEntry
 74 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci)
 75 
 76 class OopMapForCacheEntry: public GenerateOopMap {
 77   OopMapCacheEntry *_entry;
 78   int               _bci;
 79   int               _stack_top;
 80 
 81   virtual bool report_results() const     { return false; }
 82   virtual bool possible_gc_point          (BytecodeStream *bcs);
 83   virtual void fill_stackmap_prolog       (int nof_gc_points);
 84   virtual void fill_stackmap_epilog       ();
 85   virtual void fill_stackmap_for_opcodes  (BytecodeStream *bcs,
 86                                            CellTypeState* vars,
 87                                            CellTypeState* stack,
 88                                            int stack_top);
 89   virtual void fill_init_vars             (GrowableArray<intptr_t> *init_vars);
 90 
 91  public:
 92   OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry);
 93 
 94   // Computes stack map for (method,bci) and initialize entry
 95   bool compute_map(Thread* current);
 96   int  size();
 97 };
 98 
 99 
100 OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) {
101   _bci       = bci;
102   _entry     = entry;
103   _stack_top = -1;
104 }
105 
106 
107 bool OopMapForCacheEntry::compute_map(Thread* current) {
108   assert(!method()->is_native(), "cannot compute oop map for native methods");
109   // First check if it is a method where the stackmap is always empty
110   if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) {
111     _entry->set_mask_size(0);
112   } else {
113     ResourceMark rm;
114     if (!GenerateOopMap::compute_map(current)) {
115       fatal("Unrecoverable verification or out-of-memory error");
116       return false;
117     }
118     result_for_basicblock(_bci);
119   }
120   return true;
121 }
122 
123 
124 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) {
125   return false; // We are not reporting any result. We call result_for_basicblock directly
126 }
127 
128 
129 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) {
130   // Do nothing
131 }
132 
133 
134 void OopMapForCacheEntry::fill_stackmap_epilog() {
135   // Do nothing
136 }
137 
138 
139 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) {
140   // Do nothing
141 }
142 
143 
144 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs,
145                                                     CellTypeState* vars,
146                                                     CellTypeState* stack,
147                                                     int stack_top) {
148   // Only interested in one specific bci
149   if (bcs->bci() == _bci) {
150     _entry->set_mask(vars, stack, stack_top);
151     _stack_top = stack_top;
152   }
153 }
154 
155 
156 int OopMapForCacheEntry::size() {
157   assert(_stack_top != -1, "compute_map must be called first");
158   return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top;
159 }
160 
161 
162 // Implementation of InterpreterOopMap and OopMapCacheEntry
163 
164 class VerifyClosure : public OffsetClosure {
165  private:
166   OopMapCacheEntry* _entry;
167   bool              _failed;
168 
169  public:
170   VerifyClosure(OopMapCacheEntry* entry)         { _entry = entry; _failed = false; }
171   void offset_do(int offset)                     { if (!_entry->is_oop(offset)) _failed = true; }
172   bool failed() const                            { return _failed; }
173 };
174 
175 InterpreterOopMap::InterpreterOopMap() {
176   initialize();
177 }
178 
179 InterpreterOopMap::~InterpreterOopMap() {
180   if (has_valid_mask() && mask_size() > small_mask_limit) {
181     assert(_bit_mask[0] != 0, "should have pointer to C heap");
182     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
183   }
184 }
185 
186 bool InterpreterOopMap::is_empty() const {
187   bool result = _method == nullptr;
188   assert(_method != nullptr || (_bci == 0 &&
189     (_mask_size == 0 || _mask_size == USHRT_MAX) &&
190     _bit_mask[0] == 0), "Should be completely empty");
191   return result;
192 }
193 
194 void InterpreterOopMap::initialize() {
195   _method    = nullptr;
196   _mask_size = USHRT_MAX;  // This value should cause a failure quickly
197   _bci       = 0;
198   _expression_stack_size = 0;
199   _num_oops  = 0;
200   for (int i = 0; i < N; i++) _bit_mask[i] = 0;
201 }
202 
203 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const {
204   int n = number_of_entries();
205   int word_index = 0;
206   uintptr_t value = 0;
207   uintptr_t mask = 0;
208   // iterate over entries
209   for (int i = 0; i < n; i++, mask <<= bits_per_entry) {
210     // get current word
211     if (mask == 0) {
212       value = bit_mask()[word_index++];
213       mask = 1;
214     }
215     // test for oop
216     if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i);
217   }
218 }
219 
220 void InterpreterOopMap::print() const {
221   int n = number_of_entries();
222   tty->print("oop map for ");
223   method()->print_value();
224   tty->print(" @ %d = [%d] { ", bci(), n);
225   for (int i = 0; i < n; i++) {
226     if (is_dead(i)) tty->print("%d+ ", i);
227     else
228     if (is_oop(i)) tty->print("%d ", i);
229   }
230   tty->print_cr("}");
231 }
232 
233 class MaskFillerForNative: public NativeSignatureIterator {
234  private:
235   uintptr_t * _mask;                             // the bit mask to be filled
236   int         _size;                             // the mask size in bits
237 
238   void set_one(int i) {
239     i *= InterpreterOopMap::bits_per_entry;
240     assert(0 <= i && i < _size, "offset out of bounds");
241     _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord));
242   }
243 
244  public:
245   void pass_byte()                               { /* ignore */ }
246   void pass_short()                              { /* ignore */ }
247   void pass_int()                                { /* ignore */ }
248   void pass_long()                               { /* ignore */ }
249   void pass_float()                              { /* ignore */ }
250   void pass_double()                             { /* ignore */ }
251   void pass_object()                             { set_one(offset()); }
252 
253   MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) {
254     _mask   = mask;
255     _size   = size;
256     // initialize with 0
257     int i = (size + BitsPerWord - 1) / BitsPerWord;
258     while (i-- > 0) _mask[i] = 0;
259   }
260 
261   void generate() {
262     iterate();
263   }
264 };
265 
266 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) {
267   // Check mask includes map
268   VerifyClosure blk(this);
269   iterate_oop(&blk);
270   if (blk.failed()) return false;
271 
272   // Check if map is generated correctly
273   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
274   const bool log = log_is_enabled(Trace, interpreter, oopmap);
275   LogStream st(Log(interpreter, oopmap)::trace());
276 
277   if (log) st.print("Locals (%d): ", max_locals);
278   for(int i = 0; i < max_locals; i++) {
279     bool v1 = is_oop(i)               ? true : false;
280     bool v2 = vars[i].is_reference();
281     assert(v1 == v2, "locals oop mask generation error");
282     if (log) st.print("%d", v1 ? 1 : 0);
283   }
284   if (log) st.cr();
285 
286   if (log) st.print("Stack (%d): ", stack_top);
287   for(int j = 0; j < stack_top; j++) {
288     bool v1 = is_oop(max_locals + j)  ? true : false;
289     bool v2 = stack[j].is_reference();
290     assert(v1 == v2, "stack oop mask generation error");
291     if (log) st.print("%d", v1 ? 1 : 0);
292   }
293   if (log) st.cr();
294   return true;
295 }
296 
297 void OopMapCacheEntry::allocate_bit_mask() {
298   if (mask_size() > small_mask_limit) {
299     assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
300     _bit_mask[0] = (intptr_t)
301       NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);
302   }
303 }
304 
305 void OopMapCacheEntry::deallocate_bit_mask() {
306   if (mask_size() > small_mask_limit && _bit_mask[0] != 0) {
307     assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
308       "This bit mask should not be in the resource area");
309     FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
310     debug_only(_bit_mask[0] = 0;)
311   }
312 }
313 
314 
315 void OopMapCacheEntry::fill_for_native(const methodHandle& mh) {
316   assert(mh->is_native(), "method must be native method");
317   set_mask_size(mh->size_of_parameters() * bits_per_entry);
318   allocate_bit_mask();
319   // fill mask for parameters
320   MaskFillerForNative mf(mh, bit_mask(), mask_size());
321   mf.generate();
322 }
323 
324 
325 void OopMapCacheEntry::fill(const methodHandle& method, int bci) {
326   // Flush entry to deallocate an existing entry
327   flush();
328   set_method(method());
329   set_bci(checked_cast<unsigned short>(bci));  // bci is always u2
330   if (method->is_native()) {
331     // Native method activations have oops only among the parameters and one
332     // extra oop following the parameters (the mirror for static native methods).
333     fill_for_native(method);
334   } else {
335     OopMapForCacheEntry gen(method, bci, this);
336     if (!gen.compute_map(Thread::current())) {
337       fatal("Unrecoverable verification or out-of-memory error");
338     }
339   }
340 }
341 
342 
343 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) {
344   // compute bit mask size
345   int max_locals = method()->max_locals();
346   int n_entries = max_locals + stack_top;
347   set_mask_size(n_entries * bits_per_entry);
348   allocate_bit_mask();
349   set_expression_stack_size(stack_top);
350 
351   // compute bits
352   int word_index = 0;
353   uintptr_t value = 0;
354   uintptr_t mask = 1;
355 
356   _num_oops = 0;
357   CellTypeState* cell = vars;
358   for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) {
359     // store last word
360     if (mask == 0) {
361       bit_mask()[word_index++] = value;
362       value = 0;
363       mask = 1;
364     }
365 
366     // switch to stack when done with locals
367     if (entry_index == max_locals) {
368       cell = stack;
369     }
370 
371     // set oop bit
372     if (cell->is_reference()) {
373       value |= (mask << oop_bit_number );
374       _num_oops++;
375     }
376 
377     // set dead bit
378     if (!cell->is_live()) {
379       value |= (mask << dead_bit_number);
380       assert(!cell->is_reference(), "dead value marked as oop");
381     }
382   }
383 
384   // make sure last word is stored
385   bit_mask()[word_index] = value;
386 
387   // verify bit mask
388   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
389 }
390 
391 void OopMapCacheEntry::flush() {
392   deallocate_bit_mask();
393   initialize();
394 }
395 
396 void OopMapCacheEntry::deallocate(OopMapCacheEntry* const entry) {
397   entry->flush();
398   FREE_C_HEAP_OBJ(entry);
399 }
400 
401 // Implementation of OopMapCache
402 
403 void InterpreterOopMap::copy_from(const OopMapCacheEntry* src) {
404   // The expectation is that this InterpreterOopMap is recently created
405   // and empty. It is used to get a copy of a cached entry.
406   assert(!has_valid_mask(), "InterpreterOopMap object can only be filled once");
407   assert(src->has_valid_mask(), "Cannot copy entry with an invalid mask");
408 
409   set_method(src->method());
410   set_bci(src->bci());
411   set_mask_size(src->mask_size());
412   set_expression_stack_size(src->expression_stack_size());
413   _num_oops = src->num_oops();
414 
415   // Is the bit mask contained in the entry?
416   if (src->mask_size() <= small_mask_limit) {
417     memcpy(_bit_mask, src->_bit_mask, mask_word_size() * BytesPerWord);
418   } else {
419     _bit_mask[0] = (uintptr_t) NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);
420     memcpy((void*) _bit_mask[0], (void*) src->_bit_mask[0], mask_word_size() * BytesPerWord);
421   }
422 }
423 
424 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const {
425   // We use method->code_size() rather than method->identity_hash() below since
426   // the mark may not be present if a pointer to the method is already reversed.
427   return   ((unsigned int) bci)
428          ^ ((unsigned int) method->max_locals()         << 2)
429          ^ ((unsigned int) method->code_size()          << 4)
430          ^ ((unsigned int) method->size_of_parameters() << 6);
431 }
432 
433 OopMapCacheEntry* volatile OopMapCache::_old_entries = nullptr;
434 
435 OopMapCache::OopMapCache() {
436   for(int i = 0; i < size; i++) _array[i] = nullptr;
437 }
438 
439 
440 OopMapCache::~OopMapCache() {
441   // Deallocate oop maps that are allocated out-of-line
442   flush();
443 }
444 
445 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
446   return Atomic::load_acquire(&(_array[i % size]));
447 }
448 
449 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
450   return Atomic::cmpxchg(&_array[i % size], old, entry) == old;
451 }
452 
453 void OopMapCache::flush() {
454   for (int i = 0; i < size; i++) {
455     OopMapCacheEntry* entry = _array[i];
456     if (entry != nullptr) {
457       _array[i] = nullptr;  // no barrier, only called in OopMapCache destructor
458       OopMapCacheEntry::deallocate(entry);
459     }
460   }
461 }
462 
463 void OopMapCache::flush_obsolete_entries() {
464   assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
465   for (int i = 0; i < size; i++) {
466     OopMapCacheEntry* entry = _array[i];
467     if (entry != nullptr && !entry->is_empty() && entry->method()->is_old()) {
468       // Cache entry is occupied by an old redefined method and we don't want
469       // to pin it down so flush the entry.
470       if (log_is_enabled(Debug, redefine, class, oopmap)) {
471         ResourceMark rm;
472         log_debug(redefine, class, interpreter, oopmap)
473           ("flush: %s(%s): cached entry @%d",
474            entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);
475       }
476       _array[i] = nullptr;
477       OopMapCacheEntry::deallocate(entry);
478     }
479   }
480 }
481 
482 // Lookup or compute/cache the entry.
483 void OopMapCache::lookup(const methodHandle& method,
484                          int bci,
485                          InterpreterOopMap* entry_for) {
486   int probe = hash_value_for(method, bci);
487 
488   if (log_is_enabled(Debug, interpreter, oopmap)) {
489     static int count = 0;
490     ResourceMark rm;
491     log_debug(interpreter, oopmap)
492           ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,
493            method()->name_and_sig_as_C_string(), probe);
494   }
495 
496   // Search hashtable for match.
497   // Need a critical section to avoid race against concurrent reclamation.
498   {
499     GlobalCounter::CriticalSection cs(Thread::current());
500     for (int i = 0; i < probe_depth; i++) {
501       OopMapCacheEntry *entry = entry_at(probe + i);
502       if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) {
503         entry_for->copy_from(entry);
504         assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
505         log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
506         return;
507       }
508     }
509   }
510 
511   // Entry is not in hashtable.
512   // Compute entry
513 
514   OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
515   tmp->initialize();
516   tmp->fill(method, bci);
517   entry_for->copy_from(tmp);
518 
519   if (method->should_not_be_cached()) {
520     // It is either not safe or not a good idea to cache this Method*
521     // at this time. We give the caller of lookup() a copy of the
522     // interesting info via parameter entry_for, but we don't add it to
523     // the cache. See the gory details in Method*.cpp.
524     OopMapCacheEntry::deallocate(tmp);
525     return;
526   }
527 
528   // First search for an empty slot
529   for (int i = 0; i < probe_depth; i++) {
530     OopMapCacheEntry* entry = entry_at(probe + i);
531     if (entry == nullptr) {
532       if (put_at(probe + i, tmp, nullptr)) {
533         assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
534         return;
535       }
536     }
537   }
538 
539   log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");
540 
541   // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
542   // where the first entry in the collision array is replaced with the new one.
543   OopMapCacheEntry* old = entry_at(probe + 0);
544   if (put_at(probe + 0, tmp, old)) {
545     // Cannot deallocate old entry on the spot: it can still be used by readers
546     // that got a reference to it before we were able to replace it in the map.
547     // Instead of synchronizing on GlobalCounter here and incurring heavy thread
548     // walk, we do this clean up out of band.
549     enqueue_for_cleanup(old);
550   } else {
551     OopMapCacheEntry::deallocate(tmp);
552   }
553 
554   assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
555   return;
556 }
557 
558 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
559   while (true) {
560     OopMapCacheEntry* head = Atomic::load(&_old_entries);
561     entry->_next = head;
562     if (Atomic::cmpxchg(&_old_entries, head, entry) == head) {
563       // Enqueued successfully.
564       break;
565     }
566   }
567 
568   if (log_is_enabled(Debug, interpreter, oopmap)) {
569     ResourceMark rm;
570     log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
571                           entry->method()->name_and_sig_as_C_string(), entry->bci());
572   }
573 }
574 
575 bool OopMapCache::has_cleanup_work() {
576   return Atomic::load(&_old_entries) != nullptr;
577 }
578 
579 void OopMapCache::try_trigger_cleanup() {
580   // See we can take the lock for the notification without blocking.
581   // This allows triggering the cleanup from GC paths, that can hold
582   // the service lock for e.g. oop iteration in service thread.
583   if (has_cleanup_work() && Service_lock->try_lock_without_rank_check()) {
584     Service_lock->notify_all();
585     Service_lock->unlock();
586   }
587 }
588 
589 void OopMapCache::cleanup() {
590   OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr);
591   if (entry == nullptr) {
592     // No work.
593     return;
594   }
595 
596   // About to delete the entries than might still be accessed by other threads
597   // on lookup path. Need to sync up with them before proceeding.
598   GlobalCounter::write_synchronize();
599 
600   while (entry != nullptr) {
601     if (log_is_enabled(Debug, interpreter, oopmap)) {
602       ResourceMark rm;
603       log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
604                           entry->method()->name_and_sig_as_C_string(), entry->bci());
605     }
606     OopMapCacheEntry* next = entry->_next;
607     OopMapCacheEntry::deallocate(entry);
608     entry = next;
609   }
610 }
611 
612 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
613   // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack
614   OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
615   tmp->initialize();
616   tmp->fill(method, bci);
617   if (tmp->has_valid_mask()) {
618     entry->copy_from(tmp);
619   }
620   OopMapCacheEntry::deallocate(tmp);
621 }