1 /* 2 * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "interpreter/bytecodeStream.hpp" 27 #include "interpreter/oopMapCache.hpp" 28 #include "logging/log.hpp" 29 #include "logging/logStream.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/generateOopMap.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/atomic.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/safepoint.hpp" 37 #include "runtime/signature.hpp" 38 #include "utilities/globalCounter.inline.hpp" 39 40 class OopMapCacheEntry: private InterpreterOopMap { 41 friend class InterpreterOopMap; 42 friend class OopMapForCacheEntry; 43 friend class OopMapCache; 44 friend class VerifyClosure; 45 46 private: 47 OopMapCacheEntry* _next; 48 49 protected: 50 // Initialization 51 void fill(const methodHandle& method, int bci); 52 // fills the bit mask for native calls 53 void fill_for_native(const methodHandle& method); 54 void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); 55 56 // Deallocate bit masks and initialize fields 57 void flush(); 58 59 static void deallocate(OopMapCacheEntry* const entry); 60 61 private: 62 void allocate_bit_mask(); // allocates the bit mask on C heap f necessary 63 void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary 64 bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); 65 66 public: 67 OopMapCacheEntry() : InterpreterOopMap() { 68 _next = nullptr; 69 #ifdef ASSERT 70 _resource_allocate_bit_mask = false; 71 #endif 72 } 73 }; 74 75 76 // Implementation of OopMapForCacheEntry 77 // (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) 78 79 class OopMapForCacheEntry: public GenerateOopMap { 80 OopMapCacheEntry *_entry; 81 int _bci; 82 int _stack_top; 83 84 virtual bool report_results() const { return false; } 85 virtual bool possible_gc_point (BytecodeStream *bcs); 86 virtual void fill_stackmap_prolog (int nof_gc_points); 87 virtual void fill_stackmap_epilog (); 88 virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, 89 CellTypeState* vars, 90 CellTypeState* stack, 91 int stack_top); 92 virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars); 93 94 public: 95 OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry *entry); 96 97 // Computes stack map for (method,bci) and initialize entry 98 bool compute_map(Thread* current); 99 int size(); 100 }; 101 102 103 OopMapForCacheEntry::OopMapForCacheEntry(const methodHandle& method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { 104 _bci = bci; 105 _entry = entry; 106 _stack_top = -1; 107 } 108 109 110 bool OopMapForCacheEntry::compute_map(Thread* current) { 111 assert(!method()->is_native(), "cannot compute oop map for native methods"); 112 // First check if it is a method where the stackmap is always empty 113 if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) { 114 _entry->set_mask_size(0); 115 } else { 116 ResourceMark rm; 117 if (!GenerateOopMap::compute_map(current)) { 118 fatal("Unrecoverable verification or out-of-memory error"); 119 return false; 120 } 121 result_for_basicblock(_bci); 122 } 123 return true; 124 } 125 126 127 bool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { 128 return false; // We are not reporting any result. We call result_for_basicblock directly 129 } 130 131 132 void OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { 133 // Do nothing 134 } 135 136 137 void OopMapForCacheEntry::fill_stackmap_epilog() { 138 // Do nothing 139 } 140 141 142 void OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) { 143 // Do nothing 144 } 145 146 147 void OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, 148 CellTypeState* vars, 149 CellTypeState* stack, 150 int stack_top) { 151 // Only interested in one specific bci 152 if (bcs->bci() == _bci) { 153 _entry->set_mask(vars, stack, stack_top); 154 _stack_top = stack_top; 155 } 156 } 157 158 159 int OopMapForCacheEntry::size() { 160 assert(_stack_top != -1, "compute_map must be called first"); 161 return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top; 162 } 163 164 165 // Implementation of InterpreterOopMap and OopMapCacheEntry 166 167 class VerifyClosure : public OffsetClosure { 168 private: 169 OopMapCacheEntry* _entry; 170 bool _failed; 171 172 public: 173 VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; } 174 void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; } 175 bool failed() const { return _failed; } 176 }; 177 178 InterpreterOopMap::InterpreterOopMap() { 179 initialize(); 180 #ifdef ASSERT 181 _resource_allocate_bit_mask = true; 182 #endif 183 } 184 185 bool InterpreterOopMap::is_empty() const { 186 bool result = _method == nullptr; 187 assert(_method != nullptr || (_bci == 0 && 188 (_mask_size == 0 || _mask_size == USHRT_MAX) && 189 _bit_mask[0] == 0), "Should be completely empty"); 190 return result; 191 } 192 193 void InterpreterOopMap::initialize() { 194 _method = nullptr; 195 _mask_size = USHRT_MAX; // This value should cause a failure quickly 196 _bci = 0; 197 _expression_stack_size = 0; 198 _num_oops = 0; 199 for (int i = 0; i < N; i++) _bit_mask[i] = 0; 200 } 201 202 void InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) const { 203 int n = number_of_entries(); 204 int word_index = 0; 205 uintptr_t value = 0; 206 uintptr_t mask = 0; 207 // iterate over entries 208 for (int i = 0; i < n; i++, mask <<= bits_per_entry) { 209 // get current word 210 if (mask == 0) { 211 value = bit_mask()[word_index++]; 212 mask = 1; 213 } 214 // test for oop 215 if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); 216 } 217 } 218 219 void InterpreterOopMap::print() const { 220 int n = number_of_entries(); 221 tty->print("oop map for "); 222 method()->print_value(); 223 tty->print(" @ %d = [%d] { ", bci(), n); 224 for (int i = 0; i < n; i++) { 225 if (is_dead(i)) tty->print("%d+ ", i); 226 else 227 if (is_oop(i)) tty->print("%d ", i); 228 } 229 tty->print_cr("}"); 230 } 231 232 class MaskFillerForNative: public NativeSignatureIterator { 233 private: 234 uintptr_t * _mask; // the bit mask to be filled 235 int _size; // the mask size in bits 236 237 void set_one(int i) { 238 i *= InterpreterOopMap::bits_per_entry; 239 assert(0 <= i && i < _size, "offset out of bounds"); 240 _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord)); 241 } 242 243 public: 244 void pass_byte() { /* ignore */ } 245 void pass_short() { /* ignore */ } 246 void pass_int() { /* ignore */ } 247 void pass_long() { /* ignore */ } 248 void pass_float() { /* ignore */ } 249 void pass_double() { /* ignore */ } 250 void pass_object() { set_one(offset()); } 251 252 MaskFillerForNative(const methodHandle& method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { 253 _mask = mask; 254 _size = size; 255 // initialize with 0 256 int i = (size + BitsPerWord - 1) / BitsPerWord; 257 while (i-- > 0) _mask[i] = 0; 258 } 259 260 void generate() { 261 iterate(); 262 } 263 }; 264 265 bool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { 266 // Check mask includes map 267 VerifyClosure blk(this); 268 iterate_oop(&blk); 269 if (blk.failed()) return false; 270 271 // Check if map is generated correctly 272 // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) 273 const bool log = log_is_enabled(Trace, interpreter, oopmap); 274 LogStream st(Log(interpreter, oopmap)::trace()); 275 276 if (log) st.print("Locals (%d): ", max_locals); 277 for(int i = 0; i < max_locals; i++) { 278 bool v1 = is_oop(i) ? true : false; 279 bool v2 = vars[i].is_reference(); 280 assert(v1 == v2, "locals oop mask generation error"); 281 if (log) st.print("%d", v1 ? 1 : 0); 282 } 283 if (log) st.cr(); 284 285 if (log) st.print("Stack (%d): ", stack_top); 286 for(int j = 0; j < stack_top; j++) { 287 bool v1 = is_oop(max_locals + j) ? true : false; 288 bool v2 = stack[j].is_reference(); 289 assert(v1 == v2, "stack oop mask generation error"); 290 if (log) st.print("%d", v1 ? 1 : 0); 291 } 292 if (log) st.cr(); 293 return true; 294 } 295 296 void OopMapCacheEntry::allocate_bit_mask() { 297 if (mask_size() > small_mask_limit) { 298 assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); 299 _bit_mask[0] = (intptr_t) 300 NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); 301 } 302 } 303 304 void OopMapCacheEntry::deallocate_bit_mask() { 305 if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { 306 assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 307 "This bit mask should not be in the resource area"); 308 FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); 309 debug_only(_bit_mask[0] = 0;) 310 } 311 } 312 313 314 void OopMapCacheEntry::fill_for_native(const methodHandle& mh) { 315 assert(mh->is_native(), "method must be native method"); 316 set_mask_size(mh->size_of_parameters() * bits_per_entry); 317 allocate_bit_mask(); 318 // fill mask for parameters 319 MaskFillerForNative mf(mh, bit_mask(), mask_size()); 320 mf.generate(); 321 } 322 323 324 void OopMapCacheEntry::fill(const methodHandle& method, int bci) { 325 // Flush entry to deallocate an existing entry 326 flush(); 327 set_method(method()); 328 set_bci(checked_cast<unsigned short>(bci)); // bci is always u2 329 if (method->is_native()) { 330 // Native method activations have oops only among the parameters and one 331 // extra oop following the parameters (the mirror for static native methods). 332 fill_for_native(method); 333 } else { 334 OopMapForCacheEntry gen(method, bci, this); 335 if (!gen.compute_map(Thread::current())) { 336 fatal("Unrecoverable verification or out-of-memory error"); 337 } 338 } 339 } 340 341 342 void OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) { 343 // compute bit mask size 344 int max_locals = method()->max_locals(); 345 int n_entries = max_locals + stack_top; 346 set_mask_size(n_entries * bits_per_entry); 347 allocate_bit_mask(); 348 set_expression_stack_size(stack_top); 349 350 // compute bits 351 int word_index = 0; 352 uintptr_t value = 0; 353 uintptr_t mask = 1; 354 355 _num_oops = 0; 356 CellTypeState* cell = vars; 357 for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) { 358 // store last word 359 if (mask == 0) { 360 bit_mask()[word_index++] = value; 361 value = 0; 362 mask = 1; 363 } 364 365 // switch to stack when done with locals 366 if (entry_index == max_locals) { 367 cell = stack; 368 } 369 370 // set oop bit 371 if (cell->is_reference()) { 372 value |= (mask << oop_bit_number ); 373 _num_oops++; 374 } 375 376 // set dead bit 377 if (!cell->is_live()) { 378 value |= (mask << dead_bit_number); 379 assert(!cell->is_reference(), "dead value marked as oop"); 380 } 381 } 382 383 // make sure last word is stored 384 bit_mask()[word_index] = value; 385 386 // verify bit mask 387 assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); 388 } 389 390 void OopMapCacheEntry::flush() { 391 deallocate_bit_mask(); 392 initialize(); 393 } 394 395 void OopMapCacheEntry::deallocate(OopMapCacheEntry* const entry) { 396 entry->flush(); 397 FREE_C_HEAP_OBJ(entry); 398 } 399 400 // Implementation of OopMapCache 401 402 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { 403 assert(_resource_allocate_bit_mask, 404 "Should not resource allocate the _bit_mask"); 405 assert(from->has_valid_mask(), 406 "Cannot copy entry with an invalid mask"); 407 408 set_method(from->method()); 409 set_bci(from->bci()); 410 set_mask_size(from->mask_size()); 411 set_expression_stack_size(from->expression_stack_size()); 412 _num_oops = from->num_oops(); 413 414 // Is the bit mask contained in the entry? 415 if (from->mask_size() <= small_mask_limit) { 416 memcpy((void *)_bit_mask, (void *)from->_bit_mask, 417 mask_word_size() * BytesPerWord); 418 } else { 419 // The expectation is that this InterpreterOopMap is a recently created 420 // and empty. It is used to get a copy of a cached entry. 421 // If the bit mask has a value, it should be in the 422 // resource area. 423 assert(_bit_mask[0] == 0 || 424 Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 425 "The bit mask should have been allocated from a resource area"); 426 // Allocate the bit_mask from a Resource area for performance. Allocating 427 // from the C heap as is done for OopMapCache has a significant 428 // performance impact. 429 _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); 430 assert(_bit_mask[0] != 0, "bit mask was not allocated"); 431 memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], 432 mask_word_size() * BytesPerWord); 433 } 434 } 435 436 inline unsigned int OopMapCache::hash_value_for(const methodHandle& method, int bci) const { 437 // We use method->code_size() rather than method->identity_hash() below since 438 // the mark may not be present if a pointer to the method is already reversed. 439 return ((unsigned int) bci) 440 ^ ((unsigned int) method->max_locals() << 2) 441 ^ ((unsigned int) method->code_size() << 4) 442 ^ ((unsigned int) method->size_of_parameters() << 6); 443 } 444 445 OopMapCacheEntry* volatile OopMapCache::_old_entries = nullptr; 446 447 OopMapCache::OopMapCache() { 448 _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass); 449 for(int i = 0; i < _size; i++) _array[i] = nullptr; 450 } 451 452 453 OopMapCache::~OopMapCache() { 454 assert(_array != nullptr, "sanity check"); 455 // Deallocate oop maps that are allocated out-of-line 456 flush(); 457 // Deallocate array 458 FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array); 459 } 460 461 OopMapCacheEntry* OopMapCache::entry_at(int i) const { 462 return Atomic::load_acquire(&(_array[i % _size])); 463 } 464 465 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) { 466 return Atomic::cmpxchg(&_array[i % _size], old, entry) == old; 467 } 468 469 void OopMapCache::flush() { 470 for (int i = 0; i < _size; i++) { 471 OopMapCacheEntry* entry = _array[i]; 472 if (entry != nullptr) { 473 _array[i] = nullptr; // no barrier, only called in OopMapCache destructor 474 OopMapCacheEntry::deallocate(entry); 475 } 476 } 477 } 478 479 void OopMapCache::flush_obsolete_entries() { 480 assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint"); 481 for (int i = 0; i < _size; i++) { 482 OopMapCacheEntry* entry = _array[i]; 483 if (entry != nullptr && !entry->is_empty() && entry->method()->is_old()) { 484 // Cache entry is occupied by an old redefined method and we don't want 485 // to pin it down so flush the entry. 486 if (log_is_enabled(Debug, redefine, class, oopmap)) { 487 ResourceMark rm; 488 log_debug(redefine, class, interpreter, oopmap) 489 ("flush: %s(%s): cached entry @%d", 490 entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i); 491 } 492 _array[i] = nullptr; 493 OopMapCacheEntry::deallocate(entry); 494 } 495 } 496 } 497 498 // Lookup or compute/cache the entry. 499 void OopMapCache::lookup(const methodHandle& method, 500 int bci, 501 InterpreterOopMap* entry_for) { 502 int probe = hash_value_for(method, bci); 503 504 if (log_is_enabled(Debug, interpreter, oopmap)) { 505 static int count = 0; 506 ResourceMark rm; 507 log_debug(interpreter, oopmap) 508 ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci, 509 method()->name_and_sig_as_C_string(), probe); 510 } 511 512 // Search hashtable for match. 513 // Need a critical section to avoid race against concurrent reclamation. 514 { 515 GlobalCounter::CriticalSection cs(Thread::current()); 516 for (int i = 0; i < _probe_depth; i++) { 517 OopMapCacheEntry *entry = entry_at(probe + i); 518 if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) { 519 entry_for->resource_copy(entry); 520 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 521 log_debug(interpreter, oopmap)("- found at hash %d", probe + i); 522 return; 523 } 524 } 525 } 526 527 // Entry is not in hashtable. 528 // Compute entry 529 530 OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass); 531 tmp->initialize(); 532 tmp->fill(method, bci); 533 entry_for->resource_copy(tmp); 534 535 if (method->should_not_be_cached()) { 536 // It is either not safe or not a good idea to cache this Method* 537 // at this time. We give the caller of lookup() a copy of the 538 // interesting info via parameter entry_for, but we don't add it to 539 // the cache. See the gory details in Method*.cpp. 540 OopMapCacheEntry::deallocate(tmp); 541 return; 542 } 543 544 // First search for an empty slot 545 for (int i = 0; i < _probe_depth; i++) { 546 OopMapCacheEntry* entry = entry_at(probe + i); 547 if (entry == nullptr) { 548 if (put_at(probe + i, tmp, nullptr)) { 549 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 550 return; 551 } 552 } 553 } 554 555 log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***"); 556 557 // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm 558 // where the first entry in the collision array is replaced with the new one. 559 OopMapCacheEntry* old = entry_at(probe + 0); 560 if (put_at(probe + 0, tmp, old)) { 561 // Cannot deallocate old entry on the spot: it can still be used by readers 562 // that got a reference to it before we were able to replace it in the map. 563 // Instead of synchronizing on GlobalCounter here and incurring heavy thread 564 // walk, we do this clean up out of band. 565 enqueue_for_cleanup(old); 566 } else { 567 OopMapCacheEntry::deallocate(tmp); 568 } 569 570 assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 571 return; 572 } 573 574 void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) { 575 while (true) { 576 OopMapCacheEntry* head = Atomic::load(&_old_entries); 577 entry->_next = head; 578 if (Atomic::cmpxchg(&_old_entries, head, entry) == head) { 579 // Enqueued successfully. 580 break; 581 } 582 } 583 584 if (log_is_enabled(Debug, interpreter, oopmap)) { 585 ResourceMark rm; 586 log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup", 587 entry->method()->name_and_sig_as_C_string(), entry->bci()); 588 } 589 } 590 591 bool OopMapCache::has_cleanup_work() { 592 return Atomic::load(&_old_entries) != nullptr; 593 } 594 595 void OopMapCache::trigger_cleanup() { 596 if (has_cleanup_work()) { 597 MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 598 Service_lock->notify_all(); 599 } 600 } 601 602 void OopMapCache::cleanup() { 603 OopMapCacheEntry* entry = Atomic::xchg(&_old_entries, (OopMapCacheEntry*)nullptr); 604 if (entry == nullptr) { 605 // No work. 606 return; 607 } 608 609 // About to delete the entries than might still be accessed by other threads 610 // on lookup path. Need to sync up with them before proceeding. 611 GlobalCounter::write_synchronize(); 612 613 while (entry != nullptr) { 614 if (log_is_enabled(Debug, interpreter, oopmap)) { 615 ResourceMark rm; 616 log_debug(interpreter, oopmap)("cleanup entry %s at bci %d", 617 entry->method()->name_and_sig_as_C_string(), entry->bci()); 618 } 619 OopMapCacheEntry* next = entry->_next; 620 OopMapCacheEntry::deallocate(entry); 621 entry = next; 622 } 623 } 624 625 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) { 626 // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack 627 OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass); 628 tmp->initialize(); 629 tmp->fill(method, bci); 630 if (tmp->has_valid_mask()) { 631 entry->resource_copy(tmp); 632 } 633 OopMapCacheEntry::deallocate(tmp); 634 }