1 /* 2 * Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/nmethod.hpp" 29 #include "code/scopeDesc.hpp" 30 #include "compiler/oopMap.inline.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "logging/log.hpp" 33 #include "logging/logStream.hpp" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/iterator.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/compressedOops.hpp" 38 #include "runtime/atomic.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/signature.hpp" 42 #include "runtime/stackWatermarkSet.inline.hpp" 43 #include "utilities/align.hpp" 44 #include "utilities/lockFreeStack.hpp" 45 #ifdef COMPILER1 46 #include "c1/c1_Defs.hpp" 47 #endif 48 #ifdef COMPILER2 49 #include "opto/optoreg.hpp" 50 #endif 51 #if INCLUDE_JVMCI 52 #include "jvmci/jvmci_globals.hpp" 53 #endif 54 55 static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check"); 56 57 static inline intptr_t derived_pointer_value(derived_pointer p) { 58 return static_cast<intptr_t>(p); 59 } 60 61 static inline derived_pointer to_derived_pointer(intptr_t obj) { 62 return static_cast<derived_pointer>(obj); 63 } 64 65 static inline intptr_t operator-(derived_pointer p, derived_pointer p1) { 66 return derived_pointer_value(p) - derived_pointer_value(p1); 67 } 68 69 static inline derived_pointer operator+(derived_pointer p, intptr_t offset) { 70 return static_cast<derived_pointer>(derived_pointer_value(p) + offset); 71 } 72 73 // OopMapStream 74 75 OopMapStream::OopMapStream(const OopMap* oop_map) 76 : _stream(oop_map->write_stream()->buffer()) { 77 _size = oop_map->omv_count(); 78 _position = 0; 79 _valid_omv = false; 80 } 81 82 OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) 83 : _stream(oop_map->data_addr()) { 84 _size = oop_map->count(); 85 _position = 0; 86 _valid_omv = false; 87 } 88 89 void OopMapStream::find_next() { 90 if (_position++ < _size) { 91 _omv.read_from(&_stream); 92 _valid_omv = true; 93 return; 94 } 95 _valid_omv = false; 96 } 97 98 99 // OopMap 100 101 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd 102 // slots to hold 4-byte values like ints and floats in the LP64 build. 103 OopMap::OopMap(int frame_size, int arg_count) { 104 // OopMaps are usually quite so small, so pick a small initial size 105 set_write_stream(new CompressedWriteStream(32)); 106 set_omv_count(0); 107 _num_oops = 0; 108 _has_derived_oops = false; 109 _index = -1; 110 111 #ifdef ASSERT 112 _locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count; 113 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); 114 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; 115 #endif 116 } 117 118 OopMap::OopMap(int data_size) { 119 // OopMaps are usually quite so small, so pick a small initial size 120 set_write_stream(new CompressedWriteStream(data_size)); 121 set_omv_count(0); 122 _num_oops = 0; 123 _has_derived_oops = false; 124 _index = -1; 125 #ifdef ASSERT 126 _locs_length = 0; 127 _locs_used = nullptr; 128 #endif 129 } 130 131 OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) { 132 // This constructor does a deep copy 133 // of the source OopMap. 134 set_write_stream(new CompressedWriteStream(source->omv_count() * 2)); 135 set_omv_count(0); 136 set_offset(source->offset()); 137 _num_oops = source->num_oops(); 138 _has_derived_oops = source->has_derived_oops(); 139 _index = -1; 140 141 #ifdef ASSERT 142 _locs_length = source->_locs_length; 143 _locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length); 144 for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value; 145 #endif 146 147 // We need to copy the entries too. 148 for (OopMapStream oms(source); !oms.is_done(); oms.next()) { 149 OopMapValue omv = oms.current(); 150 omv.write_on(write_stream()); 151 increment_count(); 152 } 153 } 154 155 156 OopMap* OopMap::deep_copy() { 157 return new OopMap(_deep_copy_token, this); 158 } 159 160 void OopMap::copy_data_to(address addr) const { 161 memcpy(addr, write_stream()->buffer(), write_stream()->position()); 162 } 163 164 class OopMapSort { 165 private: 166 const OopMap* _map; 167 OopMapValue* _values; 168 int _count; 169 170 public: 171 OopMapSort(const OopMap* map) : _map(map), _count(0) { 172 _values = NEW_RESOURCE_ARRAY(OopMapValue, _map->omv_count()); 173 } 174 175 void sort(); 176 177 void print(); 178 179 void write(CompressedWriteStream* stream) { 180 for (int i = 0; i < _count; ++i) { 181 _values[i].write_on(stream); 182 } 183 } 184 185 private: 186 int find_derived_position(OopMapValue omv, int start) { 187 assert(omv.type() == OopMapValue::derived_oop_value, ""); 188 189 VMReg base = omv.content_reg(); 190 int i = start; 191 192 for (; i < _count; ++i) { 193 if (base == _values[i].reg()) { 194 195 for (int n = i + 1; n < _count; ++n) { 196 if (_values[i].type() != OopMapValue::derived_oop_value || _values[i].content_reg() != base) { 197 return n; 198 } 199 200 if (derived_cost(_values[i]) > derived_cost(omv)) { 201 return n; 202 } 203 } 204 return _count; 205 } 206 } 207 208 assert(false, "failed to find base"); 209 return -1; 210 } 211 212 int find_position(OopMapValue omv, int start) { 213 assert(omv.type() != OopMapValue::derived_oop_value, ""); 214 215 int i = start; 216 for (; i < _count; ++i) { 217 if (omv_cost(_values[i]) > omv_cost(omv)) { 218 return i; 219 } 220 } 221 assert(i < _map->omv_count(), "bounds check"); 222 return i; 223 } 224 225 void insert(OopMapValue value, int pos) { 226 assert(pos >= 0 && pos < _map->omv_count(), "bounds check"); 227 assert(pos <= _count, "sanity"); 228 229 if (pos < _count) { 230 OopMapValue prev = _values[pos]; 231 232 for (int i = pos; i < _count; ++i) { 233 OopMapValue tmp = _values[i+1]; 234 _values[i+1] = prev; 235 prev = tmp; 236 } 237 } 238 _values[pos] = value; 239 240 ++_count; 241 } 242 243 int omv_cost(OopMapValue omv) { 244 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value, ""); 245 return reg_cost(omv.reg()); 246 } 247 248 int reg_cost(VMReg reg) { 249 if (reg->is_reg()) { 250 return 0; 251 } 252 return reg->reg2stack() * VMRegImpl::stack_slot_size; 253 } 254 255 int derived_cost(OopMapValue omv) { 256 return reg_cost(omv.reg()); 257 } 258 }; 259 260 void OopMapSort::sort() { 261 #ifdef ASSERT 262 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 263 OopMapValue omv = oms.current(); 264 assert(omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value || 265 omv.type() == OopMapValue::derived_oop_value || omv.type() == OopMapValue::callee_saved_value, ""); 266 } 267 #endif 268 269 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 270 if (oms.current().type() == OopMapValue::callee_saved_value) { 271 insert(oms.current(), _count); 272 } 273 } 274 275 int start = _count; 276 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 277 OopMapValue omv = oms.current(); 278 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) { 279 int pos = find_position(omv, start); 280 insert(omv, pos); 281 } 282 } 283 284 for (OopMapStream oms(_map); !oms.is_done(); oms.next()) { 285 OopMapValue omv = oms.current(); 286 if (omv.type() == OopMapValue::derived_oop_value) { 287 int pos = find_derived_position(omv, start); 288 assert(pos > 0, ""); 289 insert(omv, pos); 290 } 291 } 292 } 293 294 void OopMapSort::print() { 295 for (int i = 0; i < _count; ++i) { 296 OopMapValue omv = _values[i]; 297 if (omv.type() == OopMapValue::oop_value || omv.type() == OopMapValue::narrowoop_value) { 298 if (omv.reg()->is_reg()) { 299 tty->print_cr("[%c][%d] -> reg (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->value()); 300 } else { 301 tty->print_cr("[%c][%d] -> stack (%d)", omv.type() == OopMapValue::narrowoop_value ? 'n' : 'o', i, omv.reg()->reg2stack() * VMRegImpl::stack_slot_size); 302 } 303 } else { 304 if (omv.content_reg()->is_reg()) { 305 tty->print_cr("[d][%d] -> reg (%d) stack (%d)", i, omv.content_reg()->value(), omv.reg()->reg2stack() * VMRegImpl::stack_slot_size); 306 } else if (omv.reg()->is_reg()) { 307 tty->print_cr("[d][%d] -> stack (%d) reg (%d)", i, omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size, omv.reg()->value()); 308 } else { 309 int derived_offset = omv.reg()->reg2stack() * VMRegImpl::stack_slot_size; 310 int base_offset = omv.content_reg()->reg2stack() * VMRegImpl::stack_slot_size; 311 tty->print_cr("[d][%d] -> stack (%x) stack (%x)", i, base_offset, derived_offset); 312 } 313 } 314 } 315 } 316 317 void OopMap::copy_and_sort_data_to(address addr) const { 318 OopMapSort sort(this); 319 sort.sort(); 320 CompressedWriteStream* stream = new CompressedWriteStream(_write_stream->position()); 321 sort.write(stream); 322 323 assert(stream->position() == write_stream()->position(), ""); 324 memcpy(addr, stream->buffer(), stream->position()); 325 } 326 327 int OopMap::heap_size() const { 328 int size = sizeof(OopMap); 329 int align = sizeof(void *) - 1; 330 size += write_stream()->position(); 331 // Align to a reasonable ending point 332 size = ((size+align) & ~align); 333 return size; 334 } 335 336 // frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd 337 // slots to hold 4-byte values like ints and floats in the LP64 build. 338 void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) { 339 340 assert(reg->value() < _locs_length, "too big reg value for stack size"); 341 assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" ); 342 debug_only( _locs_used[reg->value()] = x; ) 343 344 OopMapValue o(reg, x, optional); 345 o.write_on(write_stream()); 346 increment_count(); 347 if (x == OopMapValue::oop_value || x == OopMapValue::narrowoop_value) { 348 increment_num_oops(); 349 } else if (x == OopMapValue::derived_oop_value) { 350 set_has_derived_oops(true); 351 } 352 } 353 354 355 void OopMap::set_oop(VMReg reg) { 356 set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad()); 357 } 358 359 360 void OopMap::set_narrowoop(VMReg reg) { 361 set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad()); 362 } 363 364 365 void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) { 366 set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register); 367 } 368 369 370 void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) { 371 if( reg == derived_from_local_register ) { 372 // Actually an oop, derived shares storage with base, 373 set_oop(reg); 374 } else { 375 set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register); 376 } 377 } 378 379 // OopMapSet 380 381 OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {} 382 383 OopMapSet::OopMapSet(int size) : _list(size) {} 384 385 int OopMapSet::add_gc_map(int pc_offset, OopMap *map ) { 386 map->set_offset(pc_offset); 387 388 #ifdef ASSERT 389 if(_list.length() > 0) { 390 OopMap* last = _list.last(); 391 if (last->offset() == map->offset() ) { 392 fatal("OopMap inserted twice"); 393 } 394 if (last->offset() > map->offset()) { 395 tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d", 396 _list.length(),last->offset(),_list.length()+1,map->offset()); 397 } 398 } 399 #endif // ASSERT 400 401 int index = add(map); 402 map->_index = index; 403 return index; 404 } 405 406 class AddDerivedOop : public DerivedOopClosure { 407 public: 408 enum { 409 SkipNull = true, NeedsLock = true 410 }; 411 412 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) { 413 #if COMPILER2_OR_JVMCI 414 DerivedPointerTable::add(derived, base); 415 #endif // COMPILER2_OR_JVMCI 416 } 417 }; 418 419 class ProcessDerivedOop : public DerivedOopClosure { 420 OopClosure* _oop_cl; 421 422 public: 423 ProcessDerivedOop(OopClosure* oop_cl) : 424 _oop_cl(oop_cl) {} 425 426 enum { 427 SkipNull = true, NeedsLock = true 428 }; 429 430 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) { 431 // All derived pointers must be processed before the base pointer of any derived pointer is processed. 432 // Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured 433 // offset, if the base pointer is processed in the first derived pointer. 434 derived_pointer derived_base = to_derived_pointer(*reinterpret_cast<intptr_t*>(base)); 435 intptr_t offset = *derived - derived_base; 436 *derived = derived_base; 437 _oop_cl->do_oop((oop*)derived); 438 *derived = *derived + offset; 439 } 440 }; 441 442 class IgnoreDerivedOop : public DerivedOopClosure { 443 OopClosure* _oop_cl; 444 445 public: 446 enum { 447 SkipNull = true, NeedsLock = true 448 }; 449 450 virtual void do_derived_oop(derived_base* base, derived_pointer* derived) {} 451 }; 452 453 void OopMapSet::oops_do(const frame* fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) { 454 find_map(fr)->oops_do(fr, reg_map, f, mode); 455 } 456 457 void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedOopClosure* df) { 458 find_map(fr)->oops_do(fr, reg_map, f, df); 459 } 460 461 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map, 462 OopClosure* oop_fn, DerivedOopClosure* derived_oop_fn) const { 463 assert(derived_oop_fn != nullptr, "sanity"); 464 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_oop_fn); 465 visitor.oops_do(fr, reg_map, this); 466 } 467 468 void ImmutableOopMap::oops_do(const frame *fr, const RegisterMap *reg_map, 469 OopClosure* oop_fn, DerivedPointerIterationMode derived_mode) const { 470 ProcessDerivedOop process_cl(oop_fn); 471 AddDerivedOop add_cl; 472 IgnoreDerivedOop ignore_cl; 473 DerivedOopClosure* derived_cl; 474 switch (derived_mode) { 475 case DerivedPointerIterationMode::_directly: 476 derived_cl = &process_cl; 477 break; 478 case DerivedPointerIterationMode::_with_table: 479 derived_cl = &add_cl; 480 break; 481 case DerivedPointerIterationMode::_ignore: 482 derived_cl = &ignore_cl; 483 break; 484 default: 485 guarantee (false, "unreachable"); 486 } 487 OopMapDo<OopClosure, DerivedOopClosure, SkipNullValue> visitor(oop_fn, derived_cl); 488 visitor.oops_do(fr, reg_map, this); 489 } 490 491 void ImmutableOopMap::all_type_do(const frame *fr, OopMapClosure* fn) const { 492 OopMapValue omv; 493 for (OopMapStream oms(this); !oms.is_done(); oms.next()) { 494 omv = oms.current(); 495 if (fn->handle_type(omv.type())) { 496 fn->do_value(omv.reg(), omv.type()); 497 } 498 } 499 } 500 501 void ImmutableOopMap::all_type_do(const frame *fr, OopMapValue::oop_types type, OopMapClosure* fn) const { 502 OopMapValue omv; 503 for (OopMapStream oms(this); !oms.is_done(); oms.next()) { 504 omv = oms.current(); 505 if (omv.type() == type) { 506 fn->do_value(omv.reg(), omv.type()); 507 } 508 } 509 } 510 511 static void update_register_map1(const ImmutableOopMap* oopmap, const frame* fr, RegisterMap* reg_map) { 512 for (OopMapStream oms(oopmap); !oms.is_done(); oms.next()) { 513 OopMapValue omv = oms.current(); 514 if (omv.type() == OopMapValue::callee_saved_value) { 515 VMReg reg = omv.content_reg(); 516 address loc = fr->oopmapreg_to_location(omv.reg(), reg_map); 517 reg_map->set_location(reg, loc); 518 } 519 } 520 } 521 522 // Update callee-saved register info for the following frame 523 void ImmutableOopMap::update_register_map(const frame *fr, RegisterMap *reg_map) const { 524 CodeBlob* cb = fr->cb(); 525 assert(cb != nullptr, "no codeblob"); 526 // Any reg might be saved by a safepoint handler (see generate_handler_blob). 527 assert( reg_map->_update_for_id == nullptr || fr->is_older(reg_map->_update_for_id), 528 "already updated this map; do not 'update' it twice!" ); 529 debug_only(reg_map->_update_for_id = fr->id()); 530 531 // Check if caller must update oop argument 532 assert((reg_map->include_argument_oops() || 533 !cb->caller_must_gc_arguments(reg_map->thread())), 534 "include_argument_oops should already be set"); 535 536 // Scan through oopmap and find location of all callee-saved registers 537 // (we do not do update in place, since info could be overwritten) 538 539 update_register_map1(this, fr, reg_map); 540 } 541 542 const ImmutableOopMap* OopMapSet::find_map(const frame *fr) { 543 return find_map(fr->cb(), fr->pc()); 544 } 545 546 const ImmutableOopMap* OopMapSet::find_map(const CodeBlob* cb, address pc) { 547 assert(cb != nullptr, "no codeblob"); 548 const ImmutableOopMap* map = cb->oop_map_for_return_address(pc); 549 assert(map != nullptr, "no ptr map found"); 550 return map; 551 } 552 553 // Update callee-saved register info for the following frame 554 void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) { 555 find_map(fr)->update_register_map(fr, reg_map); 556 } 557 558 //============================================================================= 559 // Non-Product code 560 561 #ifndef PRODUCT 562 void OopMapSet::trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) { 563 // Print oopmap and regmap 564 tty->print_cr("------ "); 565 CodeBlob* cb = fr->cb(); 566 const ImmutableOopMapSet* maps = cb->oop_maps(); 567 const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc()); 568 map->print(); 569 if( cb->is_nmethod() ) { 570 nmethod* nm = (nmethod*)cb; 571 // native wrappers have no scope data, it is implied 572 if (nm->is_native_method()) { 573 tty->print("bci: 0 (native)"); 574 } else { 575 ScopeDesc* scope = nm->scope_desc_at(fr->pc()); 576 tty->print("bci: %d ",scope->bci()); 577 } 578 } 579 tty->cr(); 580 fr->print_on(tty); 581 tty->print(" "); 582 cb->print_value_on(tty); tty->cr(); 583 if (reg_map != nullptr) { 584 reg_map->print(); 585 } 586 tty->print_cr("------ "); 587 588 } 589 #endif // PRODUCT 590 591 // Printing code is present in product build for -XX:+PrintAssembly. 592 593 static 594 void print_register_type(OopMapValue::oop_types x, VMReg optional, 595 outputStream* st) { 596 switch( x ) { 597 case OopMapValue::oop_value: 598 st->print("Oop"); 599 break; 600 case OopMapValue::narrowoop_value: 601 st->print("NarrowOop"); 602 break; 603 case OopMapValue::callee_saved_value: 604 st->print("Callers_"); 605 optional->print_on(st); 606 break; 607 case OopMapValue::derived_oop_value: 608 st->print("Derived_oop_"); 609 optional->print_on(st); 610 break; 611 default: 612 ShouldNotReachHere(); 613 } 614 } 615 616 void OopMapValue::print_on(outputStream* st) const { 617 reg()->print_on(st); 618 st->print("="); 619 print_register_type(type(),content_reg(),st); 620 st->print(" "); 621 } 622 623 void OopMapValue::print() const { print_on(tty); } 624 625 void ImmutableOopMap::print_on(outputStream* st) const { 626 OopMapValue omv; 627 st->print("ImmutableOopMap {"); 628 for(OopMapStream oms(this); !oms.is_done(); oms.next()) { 629 omv = oms.current(); 630 omv.print_on(st); 631 } 632 st->print("}"); 633 } 634 635 void ImmutableOopMap::print() const { print_on(tty); } 636 637 void OopMap::print_on(outputStream* st) const { 638 OopMapValue omv; 639 st->print("OopMap {"); 640 for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) { 641 omv = oms.current(); 642 omv.print_on(st); 643 } 644 // Print hex offset in addition. 645 st->print("off=%d/0x%x}", (int) offset(), (int) offset()); 646 } 647 648 void OopMap::print() const { print_on(tty); } 649 650 void ImmutableOopMapSet::print_on(outputStream* st) const { 651 const ImmutableOopMap* last = nullptr; 652 const int len = count(); 653 654 st->print_cr("ImmutableOopMapSet contains %d OopMaps", len); 655 656 for (int i = 0; i < len; i++) { 657 const ImmutableOopMapPair* pair = pair_at(i); 658 const ImmutableOopMap* map = pair->get_from(this); 659 if (map != last) { 660 st->cr(); 661 map->print_on(st); 662 st->print(" pc offsets: "); 663 } 664 last = map; 665 st->print("%d ", pair->pc_offset()); 666 } 667 st->cr(); 668 } 669 670 void ImmutableOopMapSet::print() const { print_on(tty); } 671 672 void OopMapSet::print_on(outputStream* st) const { 673 const int len = _list.length(); 674 675 st->print_cr("OopMapSet contains %d OopMaps", len); 676 677 for( int i = 0; i < len; i++) { 678 OopMap* m = at(i); 679 st->print_cr("#%d ",i); 680 m->print_on(st); 681 st->cr(); 682 } 683 st->cr(); 684 } 685 686 void OopMapSet::print() const { print_on(tty); } 687 688 bool OopMap::equals(const OopMap* other) const { 689 if (other->_omv_count != _omv_count) { 690 return false; 691 } 692 if (other->write_stream()->position() != write_stream()->position()) { 693 return false; 694 } 695 if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) { 696 return false; 697 } 698 return true; 699 } 700 701 int ImmutableOopMapSet::find_slot_for_offset(int pc_offset) const { 702 // we might not have an oopmap at asynchronous (non-safepoint) stackwalks 703 ImmutableOopMapPair* pairs = get_pairs(); 704 for (int i = 0; i < _count; ++i) { 705 if (pairs[i].pc_offset() >= pc_offset) { 706 ImmutableOopMapPair* last = &pairs[i]; 707 return last->pc_offset() == pc_offset ? i : -1; 708 } 709 } 710 return -1; 711 } 712 713 const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const { 714 ImmutableOopMapPair* pairs = get_pairs(); 715 ImmutableOopMapPair* last = nullptr; 716 717 for (int i = 0; i < _count; ++i) { 718 if (pairs[i].pc_offset() >= pc_offset) { 719 last = &pairs[i]; 720 break; 721 } 722 } 723 724 // Heal Coverity issue: potential index out of bounds access. 725 guarantee(last != nullptr, "last may not be null"); 726 assert(last->pc_offset() == pc_offset, "oopmap not found"); 727 return last->get_from(this); 728 } 729 730 ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) 731 : _count(oopmap->count()), _num_oops(oopmap->num_oops()) { 732 _num_oops = oopmap->num_oops(); 733 _has_derived_oops = oopmap->has_derived_oops(); 734 address addr = data_addr(); 735 oopmap->copy_and_sort_data_to(addr); 736 } 737 738 bool ImmutableOopMap::has_any(OopMapValue::oop_types type) const { 739 for (OopMapStream oms(this); !oms.is_done(); oms.next()) { 740 if (oms.current().type() == type) { 741 return true; 742 } 743 } 744 return false; 745 } 746 747 #ifdef ASSERT 748 int ImmutableOopMap::nr_of_bytes() const { 749 OopMapStream oms(this); 750 751 while (!oms.is_done()) { 752 oms.next(); 753 } 754 return sizeof(ImmutableOopMap) + oms.stream_position(); 755 } 756 #endif 757 758 ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(nullptr), _last(nullptr), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(nullptr) { 759 _mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size()); 760 } 761 762 int ImmutableOopMapBuilder::size_for(const OopMap* map) const { 763 return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8); 764 } 765 766 int ImmutableOopMapBuilder::heap_size() { 767 int base = sizeof(ImmutableOopMapSet); 768 base = align_up(base, 8); 769 770 // all of ours pc / offset pairs 771 int pairs = _set->size() * sizeof(ImmutableOopMapPair); 772 pairs = align_up(pairs, 8); 773 774 for (int i = 0; i < _set->size(); ++i) { 775 int size = 0; 776 OopMap* map = _set->at(i); 777 778 if (is_empty(map)) { 779 /* only keep a single empty map in the set */ 780 if (has_empty()) { 781 _mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty); 782 } else { 783 _empty_offset = _offset; 784 _empty = map; 785 size = size_for(map); 786 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); 787 } 788 } else if (is_last_duplicate(map)) { 789 /* if this entry is identical to the previous one, just point it there */ 790 _mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last); 791 } else { 792 /* not empty, not an identical copy of the previous entry */ 793 size = size_for(map); 794 _mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map); 795 _last_offset = _offset; 796 _last = map; 797 } 798 799 assert(_mapping[i]._map == map, "check"); 800 _offset += size; 801 } 802 803 int total = base + pairs + _offset; 804 DEBUG_ONLY(total += 8); 805 _required = total; 806 return total; 807 } 808 809 void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { 810 assert(offset < set->nr_of_bytes(), "check"); 811 new ((address) pair) ImmutableOopMapPair(map->offset(), offset); 812 } 813 814 int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) { 815 fill_pair(pair, map, offset, set); 816 address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap 817 818 new (addr) ImmutableOopMap(map); 819 return size_for(map); 820 } 821 822 void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) { 823 ImmutableOopMapPair* pairs = set->get_pairs(); 824 825 for (int i = 0; i < set->count(); ++i) { 826 const OopMap* map = _mapping[i]._map; 827 ImmutableOopMapPair* pair = nullptr; 828 int size = 0; 829 830 if (_mapping[i]._kind == Mapping::OOPMAP_NEW) { 831 size = fill_map(&pairs[i], map, _mapping[i]._offset, set); 832 } else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) { 833 fill_pair(&pairs[i], map, _mapping[i]._offset, set); 834 } 835 836 //const ImmutableOopMap* nv = set->find_map_at_offset(map->offset()); 837 //assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity"); 838 } 839 } 840 841 #ifdef ASSERT 842 void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) { 843 for (int i = 0; i < 8; ++i) { 844 assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check"); 845 } 846 847 for (int i = 0; i < set->count(); ++i) { 848 const ImmutableOopMapPair* pair = set->pair_at(i); 849 assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size"); 850 const ImmutableOopMap* map = pair->get_from(set); 851 int nr_of_bytes = map->nr_of_bytes(); 852 assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size"); 853 } 854 } 855 #endif 856 857 ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) { 858 DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8)); 859 860 _new_set = new (buffer) ImmutableOopMapSet(_set, _required); 861 fill(_new_set, _required); 862 863 DEBUG_ONLY(verify(buffer, _required, _new_set)); 864 865 return _new_set; 866 } 867 868 ImmutableOopMapSet* ImmutableOopMapBuilder::build() { 869 _required = heap_size(); 870 871 // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps 872 address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); 873 return generate_into(buffer); 874 } 875 876 ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) { 877 ResourceMark mark; 878 ImmutableOopMapBuilder builder(oopmap_set); 879 return builder.build(); 880 } 881 882 void ImmutableOopMapSet::operator delete(void* p) { 883 FREE_C_HEAP_ARRAY(unsigned char, p); 884 } 885 886 //------------------------------DerivedPointerTable--------------------------- 887 888 #if COMPILER2_OR_JVMCI 889 890 class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> { 891 derived_pointer* _location; // Location of derived pointer, also pointing to base 892 intptr_t _offset; // Offset from base pointer 893 Entry* volatile _next; 894 895 static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; } 896 897 public: 898 Entry(derived_pointer* location, intptr_t offset) : 899 _location(location), _offset(offset), _next(nullptr) {} 900 901 derived_pointer* location() const { return _location; } 902 intptr_t offset() const { return _offset; } 903 Entry* next() const { return _next; } 904 905 typedef LockFreeStack<Entry, &next_ptr> List; 906 static List* _list; 907 }; 908 909 DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = nullptr; 910 bool DerivedPointerTable::_active = false; 911 912 bool DerivedPointerTable::is_empty() { 913 return Entry::_list == nullptr || Entry::_list->empty(); 914 } 915 916 void DerivedPointerTable::clear() { 917 // The first time, we create the list. Otherwise it should be 918 // empty. If not, then we have probably forgotton to call 919 // update_pointers after last GC/Scavenge. 920 assert (!_active, "should not be active"); 921 assert(is_empty(), "table not empty"); 922 if (Entry::_list == nullptr) { 923 void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler); 924 Entry::_list = ::new (mem) Entry::List(); 925 } 926 _active = true; 927 } 928 929 void DerivedPointerTable::add(derived_pointer* derived_loc, derived_base* base_loc) { 930 assert(Universe::heap()->is_in_or_null((void*)*base_loc), "not an oop"); 931 assert(derived_loc != (void*)base_loc, "Base and derived in same location"); 932 derived_pointer base_loc_as_derived_pointer = 933 static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc)); 934 assert(*derived_loc != base_loc_as_derived_pointer, "location already added"); 935 assert(Entry::_list != nullptr, "list must exist"); 936 assert(is_active(), "table must be active here"); 937 intptr_t offset = *derived_loc - to_derived_pointer(*reinterpret_cast<intptr_t*>(base_loc)); 938 // This assert is invalid because derived pointers can be 939 // arbitrarily far away from their base. 940 // assert(offset >= -1000000, "wrong derived pointer info"); 941 942 if (TraceDerivedPointers) { 943 tty->print_cr( 944 "Add derived pointer@" INTPTR_FORMAT 945 " - Derived: " INTPTR_FORMAT 946 " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")", 947 p2i(derived_loc), derived_pointer_value(*derived_loc), intptr_t(*base_loc), p2i(base_loc), offset 948 ); 949 } 950 // Set derived oop location to point to base. 951 *derived_loc = base_loc_as_derived_pointer; 952 Entry* entry = new Entry(derived_loc, offset); 953 Entry::_list->push(*entry); 954 } 955 956 void DerivedPointerTable::update_pointers() { 957 assert(Entry::_list != nullptr, "list must exist"); 958 Entry* entries = Entry::_list->pop_all(); 959 while (entries != nullptr) { 960 Entry* entry = entries; 961 entries = entry->next(); 962 derived_pointer* derived_loc = entry->location(); 963 intptr_t offset = entry->offset(); 964 // The derived oop was setup to point to location of base 965 oop base = **reinterpret_cast<oop**>(derived_loc); 966 assert(Universe::heap()->is_in_or_null(base), "must be an oop"); 967 968 derived_pointer derived_base = to_derived_pointer(cast_from_oop<intptr_t>(base)); 969 *derived_loc = derived_base + offset; 970 assert(*derived_loc - derived_base == offset, "sanity check"); 971 972 // assert(offset >= 0 && offset <= (intptr_t)(base->size() << LogHeapWordSize), "offset: %ld base->size: %zu relative: %d", offset, base->size() << LogHeapWordSize, *(intptr_t*)derived_loc <= 0); 973 974 if (TraceDerivedPointers) { 975 tty->print_cr("Updating derived pointer@" INTPTR_FORMAT 976 " - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")", 977 p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset); 978 } 979 980 // Delete entry 981 delete entry; 982 } 983 assert(Entry::_list->empty(), "invariant"); 984 _active = false; 985 } 986 987 #endif // COMPILER2_OR_JVMCI