1 /* 2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/nmethod.hpp" 29 #include "code/scopeDesc.hpp" 30 #include "compiler/oopMap.inline.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "logging/log.hpp" 33 #include "logging/logStream.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/iterator.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/inlineKlass.hpp" 38 #include "oops/compressedOops.hpp" 39 #include "runtime/atomic.hpp" 40 #include "runtime/frame.inline.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/signature.hpp" 43 #include "runtime/stackWatermarkSet.inline.hpp" 44 #include "utilities/align.hpp" 45 #include "utilities/lockFreeStack.hpp" 46 #ifdef COMPILER1 47 #include "c1/c1_Defs.hpp" 48 #endif 49 #ifdef COMPILER2 50 #include "opto/optoreg.hpp" 51 #endif 52 #if INCLUDE_JVMCI 53 #include "jvmci/jvmci_globals.hpp" 54 #endif 55 56 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check"); 57 58 static inline intptr_t derived_pointer_value(derived_pointer p) { 59 return static_cast<intptr_t>(p); 60 } 61 62 static inline derived_pointer to_derived_pointer(intptr_t obj) { 63 return static_cast<derived_pointer>(obj); 64 } 65 66 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) { 67 return derived_pointer_value(p) - derived_pointer_value(p1); 68 } 69 70 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) { 71 return static_cast<derived_pointer>(derived_pointer_value(p) + offset); 72 } 73 74 // OopMapStream 75 76 OopMapStream::OopMapStream(const OopMap* oop_map) 77 : _stream(oop_map->write_stream()->buffer()) { 78 _size = oop_map->omv_count(); 79 _position = 0; 80 _valid_omv = false; 81 } 82 83 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) 84 : _stream(oop_map->data_addr()) { 85 _size = oop_map->count(); 86 _position = 0; 87 _valid_omv = false; 88 } 89 90 void OopMapStream::find_next() { 91 if (_position++ < _size) { 92 _omv.read_from(&_stream); 93 _valid_omv = true; 94 return; 95 } 96 _valid_omv = false; 97 } 98 99 100 // OopMap 101 102 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd 103 // slots to hold 4-byte values like ints and floats in the LP64 build. 104 OopMap::OopMap(int frame_size, int arg_count) { 105 // OopMaps are usually quite so small, so pick a small initial size 106 set_write_stream(new CompressedWriteStream(32)); 107 set_omv_count(0); 108 _num_oops = 0; 109 _has_derived_oops = false; 110 _index = -1; 111 112 #ifdef ASSERT 113 _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count; 114 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); 115 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; 116 #endif 117 } 118 119 120 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) { 121 // This constructor does a deep copy 122 // of the source OopMap. 123 set_write_stream(new CompressedWriteStream(source->omv_count() * 2)); 124 set_omv_count(0); 125 set_offset(source->offset()); 126 _num_oops = source->num_oops(); 127 _has_derived_oops = source->has_derived_oops(); 128 _index = -1; 129 130 #ifdef ASSERT 131 _locs_length = source->_locs_length; 132 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); 133 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; 134 #endif 135 136 // We need to copy the entries too. 137 for (OopMapStream oms(source); !oms.is_done(); oms.next()) { 138 OopMapValue omv = oms.current(); 139 omv.write_on(write_stream()); 140 increment_count(); 141 } 142 } 143 144 145 OopMap* OopMap::deep_copy() { 146 return new OopMap(_deep_copy_token, this); 147 } 148 149 void OopMap::copy_data_to(address addr) const { 150 memcpy(addr, write_stream()->buffer(), write_stream()->position()); 151 } 152 153 class OopMapSort { 154 private: 155 const OopMap* _map; 156 OopMapValue* _values; 157 int _count; 158 159 public: 160 OopMapSort(const OopMap* map) : _map(map), _count(0) { 161 _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count()); 162 } 163 164 void sort(); 165 166 void print(); 167 168 void write(CompressedWriteStream* stream) { 169 for (int i = 0; i < _count; ++i) { 170 _values[i].write_on(stream); 171 } 172 } 173 174 private: 175 int find_derived_position(OopMapValue omv, int start) { 176 assert(omv.type() == OopMapValue::derived_oop_value, ""); 177 178 VMReg base = omv.content_reg(); 179 int i = start; 180 181 for (; i < _count; ++i) { 182 if (base == _values[i].reg()) { 183 184 for (int n = i + 1; n < _count; ++n) { 185 if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) { 186 return n; 187 } 188 189 if (derived_cost(_values[i]) > derived_cost(omv)) { 190 return n; 191 } 192 } 193 return _count; 194 } 195 } 196 197 assert(false, "failed to find base"); 198 return -1; 199 } 200 201 int find_position(OopMapValue omv, int start) { 202 assert(omv.type() != OopMapValue::derived_oop_value, ""); 203 204 int i = start; 205 for (; i < _count; ++i) { 206 if (omv_cost(_values[i]) > omv_cost(omv)) { 207 return i; 208 } 209 } 210 assert(i < _map->omv_count(), "bounds check"); 211 return i; 212 } 213 214 void insert(OopMapValue value, int pos) { 215 assert(pos >= 0 && pos < _map->omv_count(), "bounds check"); 216 assert(pos <= _count, "sanity"); 217 218 if (pos < _count) { 219 OopMapValue prev = _values[pos]; 220 221 for (int i = pos; i < _count; ++i) { 222 OopMapValue tmp = _values[i+1]; 223 _values[i+1] = prev; 224 prev = tmp; 225 } 226 } 227 _values[pos] = value; 228 229 ++_count; 230 } 231 232 int omv_cost(OopMapValue omv) { 233 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, ""); 234 return reg_cost(omv.reg()); 235 } 236 237 int reg_cost(VMReg reg) { 238 if (reg->is_reg()) { 239 return 0; 240 } 241 return reg->reg2stack() * VMRegImpl::stack_slot_size; 242 } 243 244 int derived_cost(OopMapValue omv) { 245 return reg_cost(omv.reg()); 246 } 247 }; 248 249 void OopMapSort::sort() { 250 #ifdef ASSERT 251 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 252 OopMapValue omv = oms.current(); 253 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || 254 omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, ""); 255 } 256 #endif 257 258 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 259 if (oms.current().type() == OopMapValue::callee_saved_value) { 260 insert(oms.current(), _count); 261 } 262 } 263 264 int start = _count; 265 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 266 OopMapValue omv = oms.current(); 267 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) { 268 int pos = find_position(omv, start); 269 insert(omv, pos); 270 } 271 } 272 273 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 274 OopMapValue omv = oms.current(); 275 if (omv.type() == OopMapValue::derived_oop_value) { 276 int pos = find_derived_position(omv, start); 277 assert(pos > 0, ""); 278 insert(omv, pos); 279 } 280 } 281 } 282 283 void OopMapSort::print() { 284 for (int i = 0; i < _count; ++i) { 285 OopMapValue omv = _values[i]; 286 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) { 287 if (omv.reg()->is_reg()) { 288 tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value()); 289 } else { 290 tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size); 291 } 292 } else { 293 if (omv.content_reg()->is_reg()) { 294 tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size); 295 } else if (omv.reg()->is_reg()) { 296 tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value()); 297 } else { 298 int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size; 299 int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size; 300 tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset); 301 } 302 } 303 } 304 } 305 306 void OopMap::copy_and_sort_data_to(address addr) const { 307 OopMapSort sort(this); 308 sort.sort(); 309 CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position()); 310 sort.write(stream); 311 312 assert(stream->position() == write_stream()->position(), ""); 313 memcpy(addr, stream->buffer(), stream->position()); 314 } 315 316 int OopMap::heap_size() const { 317 int size = sizeof(OopMap); 318 int align = sizeof(void *) - 1; 319 size += write_stream()->position(); 320 // Align to a reasonable ending point 321 size = ((size+align) & ~align); 322 return size; 323 } 324 325 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd 326 // slots to hold 4-byte values like ints and floats in the LP64 build. 327 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) { 328 329 assert(reg->value() < _locs_length, "too big reg value for stack size"); 330 assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" ); 331 debug_only( _locs_used[reg->value()] = x; ) 332 333 OopMapValue o(reg, x, optional); 334 o.write_on(write_stream()); 335 increment_count(); 336 if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) { 337 increment_num_oops(); 338 } else if (x == OopMapValue::derived_oop_value) { 339 set_has_derived_oops(true); 340 } 341 } 342 343 344 void OopMap::set_oop(VMReg reg) { 345 set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad()); 346 } 347 348 349 void OopMap::set_narrowoop(VMReg reg) { 350 set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad()); 351 } 352 353 354 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) { 355 set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register); 356 } 357 358 359 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) { 360 if( reg == derived_from_local_register ) { 361 // Actually an oop, derived shares storage with base, 362 set_oop(reg); 363 } else { 364 set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register); 365 } 366 } 367 368 // OopMapSet 369 370 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {} 371 372 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) { 373 map->set_offset(pc_offset); 374 375 #ifdef ASSERT 376 if(_list.length() > 0) { 377 OopMap* last = _list.last(); 378 if (last->offset() == map->offset() ) { 379 fatal("OopMap inserted twice"); 380 } 381 if (last->offset() > map->offset()) { 382 tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d", 383 _list.length(),last->offset(),_list.length()+1,map->offset()); 384 } 385 } 386 #endif // ASSERT 387 388 int index = add(map); 389 map->_index = index; 390 return index; 391 } 392 393 class AddDerivedOop : public DerivedOopClosure { 394 public: 395 enum { 396 SkipNull = true, NeedsLock = true 397 }; 398 399 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) { 400 #if COMPILER2_OR_JVMCI 401 DerivedPointerTable::add(derived, base); 402 #endif // COMPILER2_OR_JVMCI 403 } 404 }; 405 406 class ProcessDerivedOop : public DerivedOopClosure { 407 OopClosure* _oop_cl; 408 409 public: 410 ProcessDerivedOop(OopClosure* oop_cl) : 411 _oop_cl(oop_cl) {} 412 413 enum { 414 SkipNull = true, NeedsLock = true 415 }; 416 417 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) { 418 // All derived pointers must be processed before the base pointer of any derived pointer is processed. 419 // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured 420 // offset, if the base pointer is processed in the first derived pointer. 421 derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base)); 422 intptr_t offset = *derived - derived_base; 423 *derived = derived_base; 424 _oop_cl->do_oop((oop*)derived); 425 *derived = *derived + offset; 426 } 427 }; 428 429 class IgnoreDerivedOop : public DerivedOopClosure { 430 OopClosure* _oop_cl; 431 432 public: 433 enum { 434 SkipNull = true, NeedsLock = true 435 }; 436 437 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {} 438 }; 439 440 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) { 441 find_map(fr)->oops_do(fr, reg_map, f, mode); 442 } 443 444 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) { 445 find_map(fr)->oops_do(fr, reg_map, f, df); 446 } 447 448 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map, 449 OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const { 450 assert(derived_oop_fn != nullptr, "sanity"); 451 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn); 452 visitor.oops_do(fr, reg_map, this); 453 } 454 455 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map, 456 OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const { 457 ProcessDerivedOop process_cl(oop_fn); 458 AddDerivedOop add_cl; 459 IgnoreDerivedOop ignore_cl; 460 DerivedOopClosure* derived_cl; 461 switch (derived_mode) { 462 case DerivedPointerIterationMode::_directly: 463 derived_cl = &process_cl; 464 break; 465 case DerivedPointerIterationMode::_with_table: 466 derived_cl = &add_cl; 467 break; 468 case DerivedPointerIterationMode::_ignore: 469 derived_cl = &ignore_cl; 470 break; 471 default: 472 guarantee (false, "unreachable"); 473 } 474 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl); 475 visitor.oops_do(fr, reg_map, this); 476 } 477 478 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const { 479 OopMapValue omv; 480 for (OopMapStream oms(this); !oms.is_done(); oms.next()) { 481 omv = oms.current(); 482 if (fn->handle_type(omv.type())) { 483 fn->do_value(omv.reg(), omv.type()); 484 } 485 } 486 } 487 488 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const { 489 OopMapValue omv; 490 for (OopMapStream oms(this); !oms.is_done(); oms.next()) { 491 omv = oms.current(); 492 if (omv.type() == type) { 493 fn->do_value(omv.reg(), omv.type()); 494 } 495 } 496 } 497 498 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) { 499 for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) { 500 OopMapValue omv = oms.current(); 501 if (omv.type() == OopMapValue::callee_saved_value) { 502 VMReg reg = omv.content_reg(); 503 address loc = fr->oopmapreg_to_location(omv.reg(), reg_map); 504 reg_map->set_location(reg, loc); 505 } 506 } 507 } 508 509 // Update callee-saved register info for the following frame 510 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const { 511 CodeBlob* cb = fr->cb(); 512 assert(cb != nullptr, "no codeblob"); 513 // Any reg might be saved by a safepoint handler (see generate_handler_blob). 514 assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id), 515 "already updated this map; do not 'update' it twice!" ); 516 debug_only(reg_map->_update_for_id = fr->id()); 517 518 // Check if caller must update oop argument 519 assert((reg_map->include_argument_oops() || 520 !cb->caller_must_gc_arguments(reg_map->thread())), 521 "include_argument_oops should already be set"); 522 523 // Scan through oopmap and find location of all callee-saved registers 524 // (we do not do update in place, since info could be overwritten) 525 526 update_register_map1(this, fr, reg_map); 527 } 528 529 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) { 530 return find_map(fr->cb(), fr->pc()); 531 } 532 533 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) { 534 assert(cb != nullptr, "no codeblob"); 535 const ImmutableOopMap* map = cb->oop_map_for_return_address(pc); 536 assert(map != nullptr, "no ptr map found"); 537 return map; 538 } 539 540 // Update callee-saved register info for the following frame 541 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) { 542 find_map(fr)->update_register_map(fr, reg_map); 543 } 544 545 //============================================================================= 546 // Non-Product code 547 548 #ifndef PRODUCT 549 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) { 550 // Print oopmap and regmap 551 tty->print_cr("------ "); 552 CodeBlob* cb = fr->cb(); 553 const ImmutableOopMapSet* maps = cb->oop_maps(); 554 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); 555 map->print(); 556 if( cb->is_nmethod() ) { 557 nmethod* nm = (nmethod*)cb; 558 // native wrappers have no scope data, it is implied 559 if (nm->is_native_method()) { 560 tty->print("bci: 0 (native)"); 561 } else { 562 ScopeDesc* scope = nm->scope_desc_at(fr->pc()); 563 tty->print("bci: %d ",scope->bci()); 564 } 565 } 566 tty->cr(); 567 fr->print_on(tty); 568 tty->print(" "); 569 cb->print_value_on(tty); tty->cr(); 570 if (reg_map != nullptr) { 571 reg_map->print(); 572 } 573 tty->print_cr("------ "); 574 575 } 576 #endif // PRODUCT 577 578 // Printing code is present in product build for -XX:+PrintAssembly. 579 580 static 581 void print_register_type(OopMapValue::oop_types x, VMReg optional, 582 outputStream* st) { 583 switch( x ) { 584 case OopMapValue::oop_value: 585 st->print("Oop"); 586 break; 587 case OopMapValue::narrowoop_value: 588 st->print("NarrowOop"); 589 break; 590 case OopMapValue::callee_saved_value: 591 st->print("Callers_"); 592 optional->print_on(st); 593 break; 594 case OopMapValue::derived_oop_value: 595 st->print("Derived_oop_"); 596 optional->print_on(st); 597 break; 598 default: 599 ShouldNotReachHere(); 600 } 601 } 602 603 void OopMapValue::print_on(outputStream* st) const { 604 reg()->print_on(st); 605 st->print("="); 606 print_register_type(type(),content_reg(),st); 607 st->print(" "); 608 } 609 610 void OopMapValue::print() const { print_on(tty); } 611 612 void ImmutableOopMap::print_on(outputStream* st) const { 613 OopMapValue omv; 614 st->print("ImmutableOopMap {"); 615 for(OopMapStream oms(this); !oms.is_done(); oms.next()) { 616 omv = oms.current(); 617 omv.print_on(st); 618 } 619 st->print("}"); 620 } 621 622 void ImmutableOopMap::print() const { print_on(tty); } 623 624 void OopMap::print_on(outputStream* st) const { 625 OopMapValue omv; 626 st->print("OopMap {"); 627 for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) { 628 omv = oms.current(); 629 omv.print_on(st); 630 } 631 // Print hex offset in addition. 632 st->print("off=%d/0x%x}", (int) offset(), (int) offset()); 633 } 634 635 void OopMap::print() const { print_on(tty); } 636 637 void ImmutableOopMapSet::print_on(outputStream* st) const { 638 const ImmutableOopMap* last = nullptr; 639 const int len = count(); 640 641 st->print_cr("ImmutableOopMapSet contains %d OopMaps", len); 642 643 for (int i = 0; i < len; i++) { 644 const ImmutableOopMapPair* pair = pair_at(i); 645 const ImmutableOopMap* map = pair->get_from(this); 646 if (map != last) { 647 st->cr(); 648 map->print_on(st); 649 st->print(" pc offsets: "); 650 } 651 last = map; 652 st->print("%d ", pair->pc_offset()); 653 } 654 st->cr(); 655 } 656 657 void ImmutableOopMapSet::print() const { print_on(tty); } 658 659 void OopMapSet::print_on(outputStream* st) const { 660 const int len = _list.length(); 661 662 st->print_cr("OopMapSet contains %d OopMaps", len); 663 664 for( int i = 0; i < len; i++) { 665 OopMap* m = at(i); 666 st->print_cr("#%d ",i); 667 m->print_on(st); 668 st->cr(); 669 } 670 st->cr(); 671 } 672 673 void OopMapSet::print() const { print_on(tty); } 674 675 bool OopMap::equals(const OopMap* other) const { 676 if (other->_omv_count != _omv_count) { 677 return false; 678 } 679 if (other->write_stream()->position() != write_stream()->position()) { 680 return false; 681 } 682 if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) { 683 return false; 684 } 685 return true; 686 } 687 688 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const { 689 // we might not have an oopmap at asynchronous (non-safepoint) stackwalks 690 ImmutableOopMapPair* pairs = get_pairs(); 691 for (int i = 0; i < _count; ++i) { 692 if (pairs[i].pc_offset() >= pc_offset) { 693 ImmutableOopMapPair* last = &pairs[i]; 694 return last->pc_offset() == pc_offset ? i : -1; 695 } 696 } 697 return -1; 698 } 699 700 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const { 701 ImmutableOopMapPair* pairs = get_pairs(); 702 ImmutableOopMapPair* last = nullptr; 703 704 for (int i = 0; i < _count; ++i) { 705 if (pairs[i].pc_offset() >= pc_offset) { 706 last = &pairs[i]; 707 break; 708 } 709 } 710 711 // Heal Coverity issue: potential index out of bounds access. 712 guarantee(last != nullptr, "last may not be null"); 713 assert(last->pc_offset() == pc_offset, "oopmap not found"); 714 return last->get_from(this); 715 } 716 717 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) 718 : _count(oopmap->count()), _num_oops(oopmap->num_oops()) { 719 _num_oops = oopmap->num_oops(); 720 _has_derived_oops = oopmap->has_derived_oops(); 721 address addr = data_addr(); 722 oopmap->copy_and_sort_data_to(addr); 723 } 724 725 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const { 726 for (OopMapStream oms(this); !oms.is_done(); oms.next()) { 727 if (oms.current().type() == type) { 728 return true; 729 } 730 } 731 return false; 732 } 733 734 #ifdef ASSERT 735 int ImmutableOopMap::nr_of_bytes() const { 736 OopMapStream oms(this); 737 738 while (!oms.is_done()) { 739 oms.next(); 740 } 741 return sizeof(ImmutableOopMap) + oms.stream_position(); 742 } 743 #endif 744 745 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) { 746 _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size()); 747 } 748 749 int ImmutableOopMapBuilder::size_for(const OopMap* map) const { 750 return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8); 751 } 752 753 int ImmutableOopMapBuilder::heap_size() { 754 int base = sizeof(ImmutableOopMapSet); 755 base = align_up(base, 8); 756 757 // all of ours pc / offset pairs 758 int pairs = _set->size() * sizeof(ImmutableOopMapPair); 759 pairs = align_up(pairs, 8); 760 761 for (int i = 0; i < _set->size(); ++i) { 762 int size = 0; 763 OopMap* map = _set->at(i); 764 765 if (is_empty(map)) { 766 /* only keep a single empty map in the set */ 767 if (has_empty()) { 768 _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty); 769 } else { 770 _empty_offset = _offset; 771 _empty = map; 772 size = size_for(map); 773 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); 774 } 775 } else if (is_last_duplicate(map)) { 776 /* if this entry is identical to the previous one, just point it there */ 777 _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last); 778 } else { 779 /* not empty, not an identical copy of the previous entry */ 780 size = size_for(map); 781 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); 782 _last_offset = _offset; 783 _last = map; 784 } 785 786 assert(_mapping[i]._map == map, "check"); 787 _offset += size; 788 } 789 790 int total = base + pairs + _offset; 791 DEBUG_ONLY(total += 8); 792 _required = total; 793 return total; 794 } 795 796 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { 797 assert(offset < set->nr_of_bytes(), "check"); 798 new ((address) pair) ImmutableOopMapPair(map->offset(), offset); 799 } 800 801 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { 802 fill_pair(pair, map, offset, set); 803 address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap 804 805 new (addr) ImmutableOopMap(map); 806 return size_for(map); 807 } 808 809 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) { 810 ImmutableOopMapPair* pairs = set->get_pairs(); 811 812 for (int i = 0; i < set->count(); ++i) { 813 const OopMap* map = _mapping[i]._map; 814 ImmutableOopMapPair* pair = nullptr; 815 int size = 0; 816 817 if (_mapping[i]._kind == Mapping::OOPMAP_NEW) { 818 size = fill_map(&pairs[i], map, _mapping[i]._offset, set); 819 } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) { 820 fill_pair(&pairs[i], map, _mapping[i]._offset, set); 821 } 822 823 //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset()); 824 //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity"); 825 } 826 } 827 828 #ifdef ASSERT 829 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) { 830 for (int i = 0; i < 8; ++i) { 831 assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check"); 832 } 833 834 for (int i = 0; i < set->count(); ++i) { 835 const ImmutableOopMapPair* pair = set->pair_at(i); 836 assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size"); 837 const ImmutableOopMap* map = pair->get_from(set); 838 int nr_of_bytes = map->nr_of_bytes(); 839 assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size"); 840 } 841 } 842 #endif 843 844 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) { 845 DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8)); 846 847 _new_set = new (buffer) ImmutableOopMapSet(_set, _required); 848 fill(_new_set, _required); 849 850 DEBUG_ONLY(verify(buffer, _required, _new_set)); 851 852 return _new_set; 853 } 854 855 ImmutableOopMapSet* ImmutableOopMapBuilder::build() { 856 _required = heap_size(); 857 858 // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps 859 address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); 860 return generate_into(buffer); 861 } 862 863 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) { 864 ResourceMark mark; 865 ImmutableOopMapBuilder builder(oopmap_set); 866 return builder.build(); 867 } 868 869 void ImmutableOopMapSet::operator delete(void* p) { 870 FREE_C_HEAP_ARRAY(unsigned char, p); 871 } 872 873 //------------------------------DerivedPointerTable--------------------------- 874 875 #if COMPILER2_OR_JVMCI 876 877 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> { 878 derived_pointer* _location; // Location of derived pointer, also pointing to base 879 intptr_t _offset; // Offset from base pointer 880 Entry* volatile _next; 881 882 static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; } 883 884 public: 885 Entry(derived_pointer* location, intptr_t offset) : 886 _location(location), _offset(offset), _next(nullptr) {} 887 888 derived_pointer* location() const { return _location; } 889 intptr_t offset() const { return _offset; } 890 Entry* next() const { return _next; } 891 892 typedef LockFreeStack<Entry, &next_ptr> List; 893 static List* _list; 894 }; 895 896 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr; 897 bool DerivedPointerTable::_active = false; 898 899 bool DerivedPointerTable::is_empty() { 900 return Entry::_list == nullptr || Entry::_list->empty(); 901 } 902 903 void DerivedPointerTable::clear() { 904 // The first time, we create the list. Otherwise it should be 905 // empty. If not, then we have probably forgotton to call 906 // update_pointers after last GC/Scavenge. 907 assert (!_active, "should not be active"); 908 assert(is_empty(), "table not empty"); 909 if (Entry::_list == nullptr) { 910 void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler); 911 Entry::_list = ::new (mem) Entry::List(); 912 } 913 _active = true; 914 } 915 916 void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) { 917 assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop"); 918 assert(derived_loc != (void*)base_loc, "Base and derived in same location"); 919 derived_pointer base_loc_as_derived_pointer = 920 static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc)); 921 assert(*derived_loc != base_loc_as_derived_pointer, "location already added"); 922 assert(Entry::_list != nullptr, "list must exist"); 923 assert(is_active(), "table must be active here"); 924 intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc)); 925 // This assert is invalid because derived pointers can be 926 // arbitrarily far away from their base. 927 // assert(offset >= -1000000, "wrong derived pointer info"); 928 929 if (TraceDerivedPointers) { 930 tty->print_cr( 931 "Add derived pointer@" INTPTR_FORMAT 932 " - Derived: " INTPTR_FORMAT 933 " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")", 934 p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset 935 ); 936 } 937 // Set derived oop location to point to base. 938 *derived_loc = base_loc_as_derived_pointer; 939 Entry* entry = new Entry(derived_loc, offset); 940 Entry::_list->push(*entry); 941 } 942 943 void DerivedPointerTable::update_pointers() { 944 assert(Entry::_list != nullptr, "list must exist"); 945 Entry* entries = Entry::_list->pop_all(); 946 while (entries != nullptr) { 947 Entry* entry = entries; 948 entries = entry->next(); 949 derived_pointer* derived_loc = entry->location(); 950 intptr_t offset = entry->offset(); 951 // The derived oop was setup to point to location of base 952 oop base = **reinterpret_cast<oop**>(derived_loc); 953 assert(Universe::heap()->is_in_or_null(base), "must be an oop"); 954 955 derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base)); 956 *derived_loc = derived_base + offset; 957 assert(*derived_loc - derived_base == offset, "sanity check"); 958 959 // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0); 960 961 if (TraceDerivedPointers) { 962 tty->print_cr("Updating derived pointer@" INTPTR_FORMAT 963 " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")", 964 p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset); 965 } 966 967 // Delete entry 968 delete entry; 969 } 970 assert(Entry::_list->empty(), "invariant"); 971 _active = false; 972 } 973 974 #endif // COMPILER2_OR_JVMCI