1 /* 2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/cdsConfig.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionaryShared.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compilationPolicy.hpp" 30 #include "compiler/compilerDefinitions.inline.hpp" 31 #include "compiler/compilerOracle.hpp" 32 #include "interpreter/bytecode.hpp" 33 #include "interpreter/bytecodeStream.hpp" 34 #include "interpreter/linkResolver.hpp" 35 #include "memory/metaspaceClosure.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/klass.inline.hpp" 38 #include "oops/method.inline.hpp" 39 #include "oops/methodData.inline.hpp" 40 #include "prims/jvmtiRedefineClasses.hpp" 41 #include "runtime/atomic.hpp" 42 #include "runtime/deoptimization.hpp" 43 #include "runtime/handles.inline.hpp" 44 #include "runtime/orderAccess.hpp" 45 #include "runtime/safepointVerifiers.hpp" 46 #include "runtime/signature.hpp" 47 #include "utilities/align.hpp" 48 #include "utilities/checkedCast.hpp" 49 #include "utilities/copy.hpp" 50 51 // ================================================================== 52 // DataLayout 53 // 54 // Overlay for generic profiling data. 55 56 // Some types of data layouts need a length field. 57 bool DataLayout::needs_array_len(u1 tag) { 58 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 59 } 60 61 // Perform generic initialization of the data. More specific 62 // initialization occurs in overrides of ProfileData::post_initialize. 63 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 64 DataLayout temp; 65 temp._header._bits = (intptr_t)0; 66 temp._header._struct._tag = tag; 67 temp._header._struct._bci = bci; 68 // Write the header using a single intptr_t write. This ensures that if the layout is 69 // reinitialized readers will never see the transient state where the header is 0. 70 _header = temp._header; 71 72 for (int i = 0; i < cell_count; i++) { 73 set_cell_at(i, (intptr_t)0); 74 } 75 if (needs_array_len(tag)) { 76 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 77 } 78 if (tag == call_type_data_tag) { 79 CallTypeData::initialize(this, cell_count); 80 } else if (tag == virtual_call_type_data_tag) { 81 VirtualCallTypeData::initialize(this, cell_count); 82 } 83 } 84 85 void DataLayout::clean_weak_klass_links(bool always_clean) { 86 ResourceMark m; 87 data_in()->clean_weak_klass_links(always_clean); 88 } 89 90 91 // ================================================================== 92 // ProfileData 93 // 94 // A ProfileData object is created to refer to a section of profiling 95 // data in a structured way. 96 97 // Constructor for invalid ProfileData. 98 ProfileData::ProfileData() { 99 _data = nullptr; 100 } 101 102 char* ProfileData::print_data_on_helper(const MethodData* md) const { 103 DataLayout* dp = md->extra_data_base(); 104 DataLayout* end = md->args_data_limit(); 105 stringStream ss; 106 for (;; dp = MethodData::next_extra(dp)) { 107 assert(dp < end, "moved past end of extra data"); 108 switch(dp->tag()) { 109 case DataLayout::speculative_trap_data_tag: 110 if (dp->bci() == bci()) { 111 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 112 int trap = data->trap_state(); 113 char buf[100]; 114 ss.print("trap/"); 115 data->method()->print_short_name(&ss); 116 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 117 } 118 break; 119 case DataLayout::bit_data_tag: 120 break; 121 case DataLayout::no_tag: 122 case DataLayout::arg_info_data_tag: 123 return ss.as_string(); 124 break; 125 default: 126 fatal("unexpected tag %d", dp->tag()); 127 } 128 } 129 return nullptr; 130 } 131 132 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 133 print_data_on(st, print_data_on_helper(md)); 134 } 135 136 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 137 st->print("bci: %d ", bci()); 138 st->fill_to(tab_width_one + 1); 139 st->print("%s", name); 140 tab(st); 141 int trap = trap_state(); 142 if (trap != 0) { 143 char buf[100]; 144 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 145 } 146 if (extra != nullptr) { 147 st->print("%s", extra); 148 } 149 int flags = data()->flags(); 150 if (flags != 0) { 151 st->print("flags(%d) ", flags); 152 } 153 } 154 155 void ProfileData::tab(outputStream* st, bool first) const { 156 st->fill_to(first ? tab_width_one : tab_width_two); 157 } 158 159 // ================================================================== 160 // BitData 161 // 162 // A BitData corresponds to a one-bit flag. This is used to indicate 163 // whether a checkcast bytecode has seen a null value. 164 165 166 void BitData::print_data_on(outputStream* st, const char* extra) const { 167 print_shared(st, "BitData", extra); 168 st->cr(); 169 } 170 171 // ================================================================== 172 // CounterData 173 // 174 // A CounterData corresponds to a simple counter. 175 176 void CounterData::print_data_on(outputStream* st, const char* extra) const { 177 print_shared(st, "CounterData", extra); 178 st->print_cr("count(%u)", count()); 179 } 180 181 // ================================================================== 182 // JumpData 183 // 184 // A JumpData is used to access profiling information for a direct 185 // branch. It is a counter, used for counting the number of branches, 186 // plus a data displacement, used for realigning the data pointer to 187 // the corresponding target bci. 188 189 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 190 assert(stream->bci() == bci(), "wrong pos"); 191 int target; 192 Bytecodes::Code c = stream->code(); 193 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 194 target = stream->dest_w(); 195 } else { 196 target = stream->dest(); 197 } 198 int my_di = mdo->dp_to_di(dp()); 199 int target_di = mdo->bci_to_di(target); 200 int offset = target_di - my_di; 201 set_displacement(offset); 202 } 203 204 void JumpData::print_data_on(outputStream* st, const char* extra) const { 205 print_shared(st, "JumpData", extra); 206 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 207 } 208 209 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 210 // Parameter profiling include the receiver 211 int args_count = include_receiver ? 1 : 0; 212 ResourceMark rm; 213 ReferenceArgumentCount rac(signature); 214 args_count += rac.count(); 215 args_count = MIN2(args_count, max); 216 return args_count * per_arg_cell_count; 217 } 218 219 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 220 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 221 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 222 const methodHandle m = stream->method(); 223 int bci = stream->bci(); 224 Bytecode_invoke inv(m, bci); 225 int args_cell = 0; 226 if (MethodData::profile_arguments_for_invoke(m, bci)) { 227 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 228 } 229 int ret_cell = 0; 230 if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) { 231 ret_cell = ReturnTypeEntry::static_cell_count(); 232 } 233 int header_cell = 0; 234 if (args_cell + ret_cell > 0) { 235 header_cell = header_cell_count(); 236 } 237 238 return header_cell + args_cell + ret_cell; 239 } 240 241 class ArgumentOffsetComputer : public SignatureIterator { 242 private: 243 int _max; 244 int _offset; 245 GrowableArray<int> _offsets; 246 247 friend class SignatureIterator; // so do_parameters_on can call do_type 248 void do_type(BasicType type) { 249 if (is_reference_type(type) && _offsets.length() < _max) { 250 _offsets.push(_offset); 251 } 252 _offset += parameter_type_word_count(type); 253 } 254 255 public: 256 ArgumentOffsetComputer(Symbol* signature, int max) 257 : SignatureIterator(signature), 258 _max(max), _offset(0), 259 _offsets(max) { 260 do_parameters_on(this); // non-virtual template execution 261 } 262 263 int off_at(int i) const { return _offsets.at(i); } 264 }; 265 266 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 267 ResourceMark rm; 268 int start = 0; 269 // Parameter profiling include the receiver 270 if (include_receiver && has_receiver) { 271 set_stack_slot(0, 0); 272 set_type(0, type_none()); 273 start += 1; 274 } 275 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 276 for (int i = start; i < _number_of_entries; i++) { 277 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 278 set_type(i, type_none()); 279 } 280 } 281 282 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 283 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 284 Bytecode_invoke inv(stream->method(), stream->bci()); 285 286 if (has_arguments()) { 287 #ifdef ASSERT 288 ResourceMark rm; 289 ReferenceArgumentCount rac(inv.signature()); 290 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 291 assert(count > 0, "room for args type but none found?"); 292 check_number_of_arguments(count); 293 #endif 294 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 295 } 296 297 if (has_return()) { 298 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 299 _ret.post_initialize(); 300 } 301 } 302 303 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 304 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 305 Bytecode_invoke inv(stream->method(), stream->bci()); 306 307 if (has_arguments()) { 308 #ifdef ASSERT 309 ResourceMark rm; 310 ReferenceArgumentCount rac(inv.signature()); 311 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 312 assert(count > 0, "room for args type but none found?"); 313 check_number_of_arguments(count); 314 #endif 315 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 316 } 317 318 if (has_return()) { 319 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 320 _ret.post_initialize(); 321 } 322 } 323 324 static bool is_excluded(Klass* k) { 325 #if INCLUDE_CDS 326 if (SafepointSynchronize::is_at_safepoint() && 327 CDSConfig::is_dumping_archive() && 328 CDSConfig::current_thread_is_vm_or_dumper()) { 329 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) { 330 log_debug(aot, training)("Purged %s from MDO: unloaded class", k->name()->as_C_string()); 331 return true; 332 } else { 333 bool excluded = SystemDictionaryShared::should_be_excluded(k); 334 if (excluded) { 335 log_debug(aot, training)("Purged %s from MDO: excluded class", k->name()->as_C_string()); 336 } 337 return excluded; 338 } 339 } 340 #endif 341 return false; 342 } 343 344 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { 345 for (int i = 0; i < _number_of_entries; i++) { 346 intptr_t p = type(i); 347 Klass* k = (Klass*)klass_part(p); 348 if (k != nullptr) { 349 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) { 350 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? 351 } 352 if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) { 353 set_type(i, with_status((Klass*)nullptr, p)); 354 } 355 } 356 } 357 } 358 359 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) { 360 for (int i = 0; i < _number_of_entries; i++) { 361 Klass** k = (Klass**)type_adr(i); // tagged 362 it->push(k); 363 } 364 } 365 366 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) { 367 intptr_t p = type(); 368 Klass* k = (Klass*)klass_part(p); 369 if (k != nullptr) { 370 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) { 371 return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? 372 } 373 if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) { 374 set_type(with_status((Klass*)nullptr, p)); 375 } 376 } 377 } 378 379 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) { 380 Klass** k = (Klass**)type_adr(); // tagged 381 it->push(k); 382 } 383 384 bool TypeEntriesAtCall::return_profiling_enabled() { 385 return MethodData::profile_return(); 386 } 387 388 bool TypeEntriesAtCall::arguments_profiling_enabled() { 389 return MethodData::profile_arguments(); 390 } 391 392 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 393 if (is_type_none(k)) { 394 st->print("none"); 395 } else if (is_type_unknown(k)) { 396 st->print("unknown"); 397 } else { 398 valid_klass(k)->print_value_on(st); 399 } 400 if (was_null_seen(k)) { 401 st->print(" (null seen)"); 402 } 403 } 404 405 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 406 for (int i = 0; i < _number_of_entries; i++) { 407 _pd->tab(st); 408 st->print("%d: stack(%u) ", i, stack_slot(i)); 409 print_klass(st, type(i)); 410 st->cr(); 411 } 412 } 413 414 void ReturnTypeEntry::print_data_on(outputStream* st) const { 415 _pd->tab(st); 416 print_klass(st, type()); 417 st->cr(); 418 } 419 420 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 421 CounterData::print_data_on(st, extra); 422 if (has_arguments()) { 423 tab(st, true); 424 st->print("argument types"); 425 _args.print_data_on(st); 426 } 427 if (has_return()) { 428 tab(st, true); 429 st->print("return type"); 430 _ret.print_data_on(st); 431 } 432 } 433 434 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 435 VirtualCallData::print_data_on(st, extra); 436 if (has_arguments()) { 437 tab(st, true); 438 st->print("argument types"); 439 _args.print_data_on(st); 440 } 441 if (has_return()) { 442 tab(st, true); 443 st->print("return type"); 444 _ret.print_data_on(st); 445 } 446 } 447 448 // ================================================================== 449 // ReceiverTypeData 450 // 451 // A ReceiverTypeData is used to access profiling information about a 452 // dynamic type check. It consists of a counter which counts the total times 453 // that the check is reached, and a series of (Klass*, count) pairs 454 // which are used to store a type profile for the receiver of the check. 455 456 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { 457 for (uint row = 0; row < row_limit(); row++) { 458 Klass* p = receiver(row); 459 if (p != nullptr) { 460 if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) { 461 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? 462 } 463 if (always_clean || !p->is_loader_present_and_alive() || is_excluded(p)) { 464 clear_row(row); 465 } 466 } 467 } 468 } 469 470 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) { 471 for (uint row = 0; row < row_limit(); row++) { 472 Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row)); 473 it->push(recv); 474 } 475 } 476 477 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 478 uint row; 479 int entries = 0; 480 for (row = 0; row < row_limit(); row++) { 481 if (receiver(row) != nullptr) entries++; 482 } 483 st->print_cr("count(%u) entries(%u)", count(), entries); 484 int total = count(); 485 for (row = 0; row < row_limit(); row++) { 486 if (receiver(row) != nullptr) { 487 total += receiver_count(row); 488 } 489 } 490 for (row = 0; row < row_limit(); row++) { 491 if (receiver(row) != nullptr) { 492 tab(st); 493 receiver(row)->print_value_on(st); 494 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 495 } 496 } 497 } 498 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 499 print_shared(st, "ReceiverTypeData", extra); 500 print_receiver_data_on(st); 501 } 502 503 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 504 print_shared(st, "VirtualCallData", extra); 505 print_receiver_data_on(st); 506 } 507 508 // ================================================================== 509 // RetData 510 // 511 // A RetData is used to access profiling information for a ret bytecode. 512 // It is composed of a count of the number of times that the ret has 513 // been executed, followed by a series of triples of the form 514 // (bci, count, di) which count the number of times that some bci was the 515 // target of the ret and cache a corresponding displacement. 516 517 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 518 for (uint row = 0; row < row_limit(); row++) { 519 set_bci_displacement(row, -1); 520 set_bci(row, no_bci); 521 } 522 // release so other threads see a consistent state. bci is used as 523 // a valid flag for bci_displacement. 524 OrderAccess::release(); 525 } 526 527 // This routine needs to atomically update the RetData structure, so the 528 // caller needs to hold the RetData_lock before it gets here. Since taking 529 // the lock can block (and allow GC) and since RetData is a ProfileData is a 530 // wrapper around a derived oop, taking the lock in _this_ method will 531 // basically cause the 'this' pointer's _data field to contain junk after the 532 // lock. We require the caller to take the lock before making the ProfileData 533 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 534 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 535 // First find the mdp which corresponds to the return bci. 536 address mdp = h_mdo->bci_to_dp(return_bci); 537 538 // Now check to see if any of the cache slots are open. 539 for (uint row = 0; row < row_limit(); row++) { 540 if (bci(row) == no_bci) { 541 set_bci_displacement(row, checked_cast<int>(mdp - dp())); 542 set_bci_count(row, DataLayout::counter_increment); 543 // Barrier to ensure displacement is written before the bci; allows 544 // the interpreter to read displacement without fear of race condition. 545 release_set_bci(row, return_bci); 546 break; 547 } 548 } 549 return mdp; 550 } 551 552 void RetData::print_data_on(outputStream* st, const char* extra) const { 553 print_shared(st, "RetData", extra); 554 uint row; 555 int entries = 0; 556 for (row = 0; row < row_limit(); row++) { 557 if (bci(row) != no_bci) entries++; 558 } 559 st->print_cr("count(%u) entries(%u)", count(), entries); 560 for (row = 0; row < row_limit(); row++) { 561 if (bci(row) != no_bci) { 562 tab(st); 563 st->print_cr("bci(%d: count(%u) displacement(%d))", 564 bci(row), bci_count(row), bci_displacement(row)); 565 } 566 } 567 } 568 569 // ================================================================== 570 // BranchData 571 // 572 // A BranchData is used to access profiling data for a two-way branch. 573 // It consists of taken and not_taken counts as well as a data displacement 574 // for the taken case. 575 576 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 577 assert(stream->bci() == bci(), "wrong pos"); 578 int target = stream->dest(); 579 int my_di = mdo->dp_to_di(dp()); 580 int target_di = mdo->bci_to_di(target); 581 int offset = target_di - my_di; 582 set_displacement(offset); 583 } 584 585 void BranchData::print_data_on(outputStream* st, const char* extra) const { 586 print_shared(st, "BranchData", extra); 587 st->print_cr("taken(%u) displacement(%d)", 588 taken(), displacement()); 589 tab(st); 590 st->print_cr("not taken(%u)", not_taken()); 591 } 592 593 // ================================================================== 594 // MultiBranchData 595 // 596 // A MultiBranchData is used to access profiling information for 597 // a multi-way branch (*switch bytecodes). It consists of a series 598 // of (count, displacement) pairs, which count the number of times each 599 // case was taken and specify the data displacement for each branch target. 600 601 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 602 int cell_count = 0; 603 if (stream->code() == Bytecodes::_tableswitch) { 604 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 605 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 606 } else { 607 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 608 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 609 } 610 return cell_count; 611 } 612 613 void MultiBranchData::post_initialize(BytecodeStream* stream, 614 MethodData* mdo) { 615 assert(stream->bci() == bci(), "wrong pos"); 616 int target; 617 int my_di; 618 int target_di; 619 int offset; 620 if (stream->code() == Bytecodes::_tableswitch) { 621 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 622 int len = sw.length(); 623 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 624 for (int count = 0; count < len; count++) { 625 target = sw.dest_offset_at(count) + bci(); 626 my_di = mdo->dp_to_di(dp()); 627 target_di = mdo->bci_to_di(target); 628 offset = target_di - my_di; 629 set_displacement_at(count, offset); 630 } 631 target = sw.default_offset() + bci(); 632 my_di = mdo->dp_to_di(dp()); 633 target_di = mdo->bci_to_di(target); 634 offset = target_di - my_di; 635 set_default_displacement(offset); 636 637 } else { 638 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 639 int npairs = sw.number_of_pairs(); 640 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 641 for (int count = 0; count < npairs; count++) { 642 LookupswitchPair pair = sw.pair_at(count); 643 target = pair.offset() + bci(); 644 my_di = mdo->dp_to_di(dp()); 645 target_di = mdo->bci_to_di(target); 646 offset = target_di - my_di; 647 set_displacement_at(count, offset); 648 } 649 target = sw.default_offset() + bci(); 650 my_di = mdo->dp_to_di(dp()); 651 target_di = mdo->bci_to_di(target); 652 offset = target_di - my_di; 653 set_default_displacement(offset); 654 } 655 } 656 657 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 658 print_shared(st, "MultiBranchData", extra); 659 st->print_cr("default_count(%u) displacement(%d)", 660 default_count(), default_displacement()); 661 int cases = number_of_cases(); 662 for (int i = 0; i < cases; i++) { 663 tab(st); 664 st->print_cr("count(%u) displacement(%d)", 665 count_at(i), displacement_at(i)); 666 } 667 } 668 669 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 670 print_shared(st, "ArgInfoData", extra); 671 int nargs = number_of_args(); 672 for (int i = 0; i < nargs; i++) { 673 st->print(" 0x%x", arg_modified(i)); 674 } 675 st->cr(); 676 } 677 678 int ParametersTypeData::compute_cell_count(Method* m) { 679 if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) { 680 return 0; 681 } 682 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 683 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 684 if (obj_args > 0) { 685 return obj_args + 1; // 1 cell for array len 686 } 687 return 0; 688 } 689 690 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 691 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 692 } 693 694 bool ParametersTypeData::profiling_enabled() { 695 return MethodData::profile_parameters(); 696 } 697 698 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 699 print_shared(st, "ParametersTypeData", extra); 700 tab(st); 701 _parameters.print_data_on(st); 702 st->cr(); 703 } 704 705 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) { 706 Method** m = (Method**)intptr_at_adr(speculative_trap_method); 707 it->push(m); 708 } 709 710 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 711 print_shared(st, "SpeculativeTrapData", extra); 712 tab(st); 713 method()->print_short_name(st); 714 st->cr(); 715 } 716 717 // ================================================================== 718 // MethodData* 719 // 720 // A MethodData* holds information which has been collected about 721 // a method. 722 723 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 724 assert(!THREAD->owns_locks(), "Should not own any locks"); 725 int size = MethodData::compute_allocation_size_in_words(method); 726 727 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 728 MethodData(method); 729 } 730 731 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 732 switch (code) { 733 case Bytecodes::_checkcast: 734 case Bytecodes::_instanceof: 735 case Bytecodes::_aastore: 736 if (TypeProfileCasts) { 737 return ReceiverTypeData::static_cell_count(); 738 } else { 739 return BitData::static_cell_count(); 740 } 741 case Bytecodes::_invokespecial: 742 case Bytecodes::_invokestatic: 743 if (MethodData::profile_arguments() || MethodData::profile_return()) { 744 return variable_cell_count; 745 } else { 746 return CounterData::static_cell_count(); 747 } 748 case Bytecodes::_goto: 749 case Bytecodes::_goto_w: 750 case Bytecodes::_jsr: 751 case Bytecodes::_jsr_w: 752 return JumpData::static_cell_count(); 753 case Bytecodes::_invokevirtual: 754 case Bytecodes::_invokeinterface: 755 if (MethodData::profile_arguments() || MethodData::profile_return()) { 756 return variable_cell_count; 757 } else { 758 return VirtualCallData::static_cell_count(); 759 } 760 case Bytecodes::_invokedynamic: 761 if (MethodData::profile_arguments() || MethodData::profile_return()) { 762 return variable_cell_count; 763 } else { 764 return CounterData::static_cell_count(); 765 } 766 case Bytecodes::_ret: 767 return RetData::static_cell_count(); 768 case Bytecodes::_ifeq: 769 case Bytecodes::_ifne: 770 case Bytecodes::_iflt: 771 case Bytecodes::_ifge: 772 case Bytecodes::_ifgt: 773 case Bytecodes::_ifle: 774 case Bytecodes::_if_icmpeq: 775 case Bytecodes::_if_icmpne: 776 case Bytecodes::_if_icmplt: 777 case Bytecodes::_if_icmpge: 778 case Bytecodes::_if_icmpgt: 779 case Bytecodes::_if_icmple: 780 case Bytecodes::_if_acmpeq: 781 case Bytecodes::_if_acmpne: 782 case Bytecodes::_ifnull: 783 case Bytecodes::_ifnonnull: 784 return BranchData::static_cell_count(); 785 case Bytecodes::_lookupswitch: 786 case Bytecodes::_tableswitch: 787 return variable_cell_count; 788 default: 789 return no_profile_data; 790 } 791 } 792 793 // Compute the size of the profiling information corresponding to 794 // the current bytecode. 795 int MethodData::compute_data_size(BytecodeStream* stream) { 796 int cell_count = bytecode_cell_count(stream->code()); 797 if (cell_count == no_profile_data) { 798 return 0; 799 } 800 if (cell_count == variable_cell_count) { 801 switch (stream->code()) { 802 case Bytecodes::_lookupswitch: 803 case Bytecodes::_tableswitch: 804 cell_count = MultiBranchData::compute_cell_count(stream); 805 break; 806 case Bytecodes::_invokespecial: 807 case Bytecodes::_invokestatic: 808 case Bytecodes::_invokedynamic: 809 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 810 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 811 profile_return_for_invoke(stream->method(), stream->bci())) { 812 cell_count = CallTypeData::compute_cell_count(stream); 813 } else { 814 cell_count = CounterData::static_cell_count(); 815 } 816 break; 817 case Bytecodes::_invokevirtual: 818 case Bytecodes::_invokeinterface: { 819 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 820 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 821 profile_return_for_invoke(stream->method(), stream->bci())) { 822 cell_count = VirtualCallTypeData::compute_cell_count(stream); 823 } else { 824 cell_count = VirtualCallData::static_cell_count(); 825 } 826 break; 827 } 828 default: 829 fatal("unexpected bytecode for var length profile data"); 830 } 831 } 832 // Note: cell_count might be zero, meaning that there is just 833 // a DataLayout header, with no extra cells. 834 assert(cell_count >= 0, "sanity"); 835 return DataLayout::compute_size_in_bytes(cell_count); 836 } 837 838 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 839 // Bytecodes for which we may use speculation 840 switch (code) { 841 case Bytecodes::_checkcast: 842 case Bytecodes::_instanceof: 843 case Bytecodes::_aastore: 844 case Bytecodes::_invokevirtual: 845 case Bytecodes::_invokeinterface: 846 case Bytecodes::_if_acmpeq: 847 case Bytecodes::_if_acmpne: 848 case Bytecodes::_ifnull: 849 case Bytecodes::_ifnonnull: 850 case Bytecodes::_invokestatic: 851 #ifdef COMPILER2 852 if (CompilerConfig::is_c2_enabled()) { 853 return UseTypeSpeculation; 854 } 855 #endif 856 default: 857 return false; 858 } 859 return false; 860 } 861 862 #if INCLUDE_JVMCI 863 864 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() { 865 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow); 866 } 867 868 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) { 869 memcpy(data(), speculation, speculation_len); 870 } 871 872 // A heuristic check to detect nmethods that outlive a failed speculations list. 873 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) { 874 jlong head = (jlong)(address) *failed_speculations_address; 875 if ((head & 0x1) == 0x1) { 876 stringStream st; 877 if (nm != nullptr) { 878 st.print("%d", nm->compile_id()); 879 Method* method = nm->method(); 880 st.print_raw("{"); 881 if (method != nullptr) { 882 method->print_name(&st); 883 } else { 884 const char* jvmci_name = nm->jvmci_name(); 885 if (jvmci_name != nullptr) { 886 st.print_raw(jvmci_name); 887 } 888 } 889 st.print_raw("}"); 890 } else { 891 st.print("<unknown>"); 892 } 893 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string()); 894 } 895 } 896 897 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) { 898 assert(failed_speculations_address != nullptr, "must be"); 899 size_t fs_size = sizeof(FailedSpeculation) + speculation_len; 900 901 guarantee_failed_speculations_alive(nm, failed_speculations_address); 902 903 FailedSpeculation** cursor = failed_speculations_address; 904 FailedSpeculation* fs = nullptr; 905 do { 906 if (*cursor == nullptr) { 907 if (fs == nullptr) { 908 // lazily allocate FailedSpeculation 909 fs = new (fs_size) FailedSpeculation(speculation, speculation_len); 910 if (fs == nullptr) { 911 // no memory -> ignore failed speculation 912 return false; 913 } 914 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned"); 915 } 916 FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs); 917 if (old_fs == nullptr) { 918 // Successfully appended fs to end of the list 919 return true; 920 } 921 } 922 guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation"); 923 // check if the current entry matches this thread's failed speculation 924 if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) { 925 if (fs != nullptr) { 926 delete fs; 927 } 928 return false; 929 } 930 cursor = (*cursor)->next_adr(); 931 } while (true); 932 } 933 934 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) { 935 assert(failed_speculations_address != nullptr, "must be"); 936 FailedSpeculation* fs = *failed_speculations_address; 937 while (fs != nullptr) { 938 FailedSpeculation* next = fs->next(); 939 delete fs; 940 fs = next; 941 } 942 943 // Write an unaligned value to failed_speculations_address to denote 944 // that it is no longer a valid pointer. This is allows for the check 945 // in add_failed_speculation against adding to a freed failed 946 // speculations list. 947 long* head = (long*) failed_speculations_address; 948 (*head) = (*head) | 0x1; 949 } 950 #endif // INCLUDE_JVMCI 951 952 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 953 #if INCLUDE_JVMCI 954 if (ProfileTraps) { 955 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 956 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 957 958 // Make sure we have a minimum number of extra data slots to 959 // allocate SpeculativeTrapData entries. We would want to have one 960 // entry per compilation that inlines this method and for which 961 // some type speculation assumption fails. So the room we need for 962 // the SpeculativeTrapData entries doesn't directly depend on the 963 // size of the method. Because it's hard to estimate, we reserve 964 // space for an arbitrary number of entries. 965 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 966 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 967 968 return MAX2(extra_data_count, spec_data_count); 969 } else { 970 return 0; 971 } 972 #else // INCLUDE_JVMCI 973 if (ProfileTraps) { 974 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 975 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 976 // If the method is large, let the extra BCIs grow numerous (to ~1%). 977 int one_percent_of_data 978 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 979 if (extra_data_count < one_percent_of_data) 980 extra_data_count = one_percent_of_data; 981 if (extra_data_count > empty_bc_count) 982 extra_data_count = empty_bc_count; // no need for more 983 984 // Make sure we have a minimum number of extra data slots to 985 // allocate SpeculativeTrapData entries. We would want to have one 986 // entry per compilation that inlines this method and for which 987 // some type speculation assumption fails. So the room we need for 988 // the SpeculativeTrapData entries doesn't directly depend on the 989 // size of the method. Because it's hard to estimate, we reserve 990 // space for an arbitrary number of entries. 991 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 992 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 993 994 return MAX2(extra_data_count, spec_data_count); 995 } else { 996 return 0; 997 } 998 #endif // INCLUDE_JVMCI 999 } 1000 1001 // Compute the size of the MethodData* necessary to store 1002 // profiling information about a given method. Size is in bytes. 1003 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 1004 int data_size = 0; 1005 BytecodeStream stream(method); 1006 Bytecodes::Code c; 1007 int empty_bc_count = 0; // number of bytecodes lacking data 1008 bool needs_speculative_traps = false; 1009 while ((c = stream.next()) >= 0) { 1010 int size_in_bytes = compute_data_size(&stream); 1011 data_size += size_in_bytes; 1012 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1013 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1014 } 1015 int object_size = in_bytes(data_offset()) + data_size; 1016 1017 // Add some extra DataLayout cells (at least one) to track stray traps. 1018 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1019 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 1020 1021 // Add a cell to record information about modified arguments. 1022 int arg_size = method->size_of_parameters(); 1023 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 1024 1025 // Reserve room for an area of the MDO dedicated to profiling of 1026 // parameters 1027 int args_cell = ParametersTypeData::compute_cell_count(method()); 1028 if (args_cell > 0) { 1029 object_size += DataLayout::compute_size_in_bytes(args_cell); 1030 } 1031 1032 if (ProfileExceptionHandlers && method()->has_exception_handler()) { 1033 int num_exception_handlers = method()->exception_table_length(); 1034 object_size += num_exception_handlers * single_exception_handler_data_size(); 1035 } 1036 1037 return object_size; 1038 } 1039 1040 // Compute the size of the MethodData* necessary to store 1041 // profiling information about a given method. Size is in words 1042 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 1043 int byte_size = compute_allocation_size_in_bytes(method); 1044 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 1045 return align_metadata_size(word_size); 1046 } 1047 1048 // Initialize an individual data segment. Returns the size of 1049 // the segment in bytes. 1050 int MethodData::initialize_data(BytecodeStream* stream, 1051 int data_index) { 1052 int cell_count = -1; 1053 u1 tag = DataLayout::no_tag; 1054 DataLayout* data_layout = data_layout_at(data_index); 1055 Bytecodes::Code c = stream->code(); 1056 switch (c) { 1057 case Bytecodes::_checkcast: 1058 case Bytecodes::_instanceof: 1059 case Bytecodes::_aastore: 1060 if (TypeProfileCasts) { 1061 cell_count = ReceiverTypeData::static_cell_count(); 1062 tag = DataLayout::receiver_type_data_tag; 1063 } else { 1064 cell_count = BitData::static_cell_count(); 1065 tag = DataLayout::bit_data_tag; 1066 } 1067 break; 1068 case Bytecodes::_invokespecial: 1069 case Bytecodes::_invokestatic: { 1070 int counter_data_cell_count = CounterData::static_cell_count(); 1071 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1072 profile_return_for_invoke(stream->method(), stream->bci())) { 1073 cell_count = CallTypeData::compute_cell_count(stream); 1074 } else { 1075 cell_count = counter_data_cell_count; 1076 } 1077 if (cell_count > counter_data_cell_count) { 1078 tag = DataLayout::call_type_data_tag; 1079 } else { 1080 tag = DataLayout::counter_data_tag; 1081 } 1082 break; 1083 } 1084 case Bytecodes::_goto: 1085 case Bytecodes::_goto_w: 1086 case Bytecodes::_jsr: 1087 case Bytecodes::_jsr_w: 1088 cell_count = JumpData::static_cell_count(); 1089 tag = DataLayout::jump_data_tag; 1090 break; 1091 case Bytecodes::_invokevirtual: 1092 case Bytecodes::_invokeinterface: { 1093 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 1094 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1095 profile_return_for_invoke(stream->method(), stream->bci())) { 1096 cell_count = VirtualCallTypeData::compute_cell_count(stream); 1097 } else { 1098 cell_count = virtual_call_data_cell_count; 1099 } 1100 if (cell_count > virtual_call_data_cell_count) { 1101 tag = DataLayout::virtual_call_type_data_tag; 1102 } else { 1103 tag = DataLayout::virtual_call_data_tag; 1104 } 1105 break; 1106 } 1107 case Bytecodes::_invokedynamic: { 1108 // %%% should make a type profile for any invokedynamic that takes a ref argument 1109 int counter_data_cell_count = CounterData::static_cell_count(); 1110 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1111 profile_return_for_invoke(stream->method(), stream->bci())) { 1112 cell_count = CallTypeData::compute_cell_count(stream); 1113 } else { 1114 cell_count = counter_data_cell_count; 1115 } 1116 if (cell_count > counter_data_cell_count) { 1117 tag = DataLayout::call_type_data_tag; 1118 } else { 1119 tag = DataLayout::counter_data_tag; 1120 } 1121 break; 1122 } 1123 case Bytecodes::_ret: 1124 cell_count = RetData::static_cell_count(); 1125 tag = DataLayout::ret_data_tag; 1126 break; 1127 case Bytecodes::_ifeq: 1128 case Bytecodes::_ifne: 1129 case Bytecodes::_iflt: 1130 case Bytecodes::_ifge: 1131 case Bytecodes::_ifgt: 1132 case Bytecodes::_ifle: 1133 case Bytecodes::_if_icmpeq: 1134 case Bytecodes::_if_icmpne: 1135 case Bytecodes::_if_icmplt: 1136 case Bytecodes::_if_icmpge: 1137 case Bytecodes::_if_icmpgt: 1138 case Bytecodes::_if_icmple: 1139 case Bytecodes::_if_acmpeq: 1140 case Bytecodes::_if_acmpne: 1141 case Bytecodes::_ifnull: 1142 case Bytecodes::_ifnonnull: 1143 cell_count = BranchData::static_cell_count(); 1144 tag = DataLayout::branch_data_tag; 1145 break; 1146 case Bytecodes::_lookupswitch: 1147 case Bytecodes::_tableswitch: 1148 cell_count = MultiBranchData::compute_cell_count(stream); 1149 tag = DataLayout::multi_branch_data_tag; 1150 break; 1151 default: 1152 break; 1153 } 1154 assert(tag == DataLayout::multi_branch_data_tag || 1155 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1156 (tag == DataLayout::call_type_data_tag || 1157 tag == DataLayout::counter_data_tag || 1158 tag == DataLayout::virtual_call_type_data_tag || 1159 tag == DataLayout::virtual_call_data_tag)) || 1160 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1161 if (cell_count >= 0) { 1162 assert(tag != DataLayout::no_tag, "bad tag"); 1163 assert(bytecode_has_profile(c), "agree w/ BHP"); 1164 data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count); 1165 return DataLayout::compute_size_in_bytes(cell_count); 1166 } else { 1167 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1168 return 0; 1169 } 1170 } 1171 1172 // Get the data at an arbitrary (sort of) data index. 1173 ProfileData* MethodData::data_at(int data_index) const { 1174 if (out_of_bounds(data_index)) { 1175 return nullptr; 1176 } 1177 DataLayout* data_layout = data_layout_at(data_index); 1178 return data_layout->data_in(); 1179 } 1180 1181 int DataLayout::cell_count() { 1182 switch (tag()) { 1183 case DataLayout::no_tag: 1184 default: 1185 ShouldNotReachHere(); 1186 return 0; 1187 case DataLayout::bit_data_tag: 1188 return BitData::static_cell_count(); 1189 case DataLayout::counter_data_tag: 1190 return CounterData::static_cell_count(); 1191 case DataLayout::jump_data_tag: 1192 return JumpData::static_cell_count(); 1193 case DataLayout::receiver_type_data_tag: 1194 return ReceiverTypeData::static_cell_count(); 1195 case DataLayout::virtual_call_data_tag: 1196 return VirtualCallData::static_cell_count(); 1197 case DataLayout::ret_data_tag: 1198 return RetData::static_cell_count(); 1199 case DataLayout::branch_data_tag: 1200 return BranchData::static_cell_count(); 1201 case DataLayout::multi_branch_data_tag: 1202 return ((new MultiBranchData(this))->cell_count()); 1203 case DataLayout::arg_info_data_tag: 1204 return ((new ArgInfoData(this))->cell_count()); 1205 case DataLayout::call_type_data_tag: 1206 return ((new CallTypeData(this))->cell_count()); 1207 case DataLayout::virtual_call_type_data_tag: 1208 return ((new VirtualCallTypeData(this))->cell_count()); 1209 case DataLayout::parameters_type_data_tag: 1210 return ((new ParametersTypeData(this))->cell_count()); 1211 case DataLayout::speculative_trap_data_tag: 1212 return SpeculativeTrapData::static_cell_count(); 1213 } 1214 } 1215 ProfileData* DataLayout::data_in() { 1216 switch (tag()) { 1217 case DataLayout::no_tag: 1218 default: 1219 ShouldNotReachHere(); 1220 return nullptr; 1221 case DataLayout::bit_data_tag: 1222 return new BitData(this); 1223 case DataLayout::counter_data_tag: 1224 return new CounterData(this); 1225 case DataLayout::jump_data_tag: 1226 return new JumpData(this); 1227 case DataLayout::receiver_type_data_tag: 1228 return new ReceiverTypeData(this); 1229 case DataLayout::virtual_call_data_tag: 1230 return new VirtualCallData(this); 1231 case DataLayout::ret_data_tag: 1232 return new RetData(this); 1233 case DataLayout::branch_data_tag: 1234 return new BranchData(this); 1235 case DataLayout::multi_branch_data_tag: 1236 return new MultiBranchData(this); 1237 case DataLayout::arg_info_data_tag: 1238 return new ArgInfoData(this); 1239 case DataLayout::call_type_data_tag: 1240 return new CallTypeData(this); 1241 case DataLayout::virtual_call_type_data_tag: 1242 return new VirtualCallTypeData(this); 1243 case DataLayout::parameters_type_data_tag: 1244 return new ParametersTypeData(this); 1245 case DataLayout::speculative_trap_data_tag: 1246 return new SpeculativeTrapData(this); 1247 } 1248 } 1249 1250 // Iteration over data. 1251 ProfileData* MethodData::next_data(ProfileData* current) const { 1252 int current_index = dp_to_di(current->dp()); 1253 int next_index = current_index + current->size_in_bytes(); 1254 ProfileData* next = data_at(next_index); 1255 return next; 1256 } 1257 1258 DataLayout* MethodData::next_data_layout(DataLayout* current) const { 1259 int current_index = dp_to_di((address)current); 1260 int next_index = current_index + current->size_in_bytes(); 1261 if (out_of_bounds(next_index)) { 1262 return nullptr; 1263 } 1264 DataLayout* next = data_layout_at(next_index); 1265 return next; 1266 } 1267 1268 // Give each of the data entries a chance to perform specific 1269 // data initialization. 1270 void MethodData::post_initialize(BytecodeStream* stream) { 1271 ResourceMark rm; 1272 ProfileData* data; 1273 for (data = first_data(); is_valid(data); data = next_data(data)) { 1274 stream->set_start(data->bci()); 1275 stream->next(); 1276 data->post_initialize(stream, this); 1277 } 1278 if (_parameters_type_data_di != no_parameters) { 1279 parameters_type_data()->post_initialize(nullptr, this); 1280 } 1281 } 1282 1283 // Initialize the MethodData* corresponding to a given method. 1284 MethodData::MethodData(const methodHandle& method) 1285 : _method(method()), 1286 // Holds Compile_lock 1287 _compiler_counters(), 1288 _parameters_type_data_di(parameters_uninitialized) { 1289 _extra_data_lock = nullptr; 1290 initialize(); 1291 } 1292 1293 MethodData::MethodData() { 1294 assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); 1295 } 1296 1297 // Reinitialize the storage of an existing MDO at a safepoint. Doing it this way will ensure it's 1298 // not being accessed while the contents are being rewritten. 1299 class VM_ReinitializeMDO: public VM_Operation { 1300 private: 1301 MethodData* _mdo; 1302 public: 1303 VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {} 1304 VMOp_Type type() const { return VMOp_ReinitializeMDO; } 1305 void doit() { 1306 // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held 1307 // over a safepoint. This means that we don't actually need to acquire the lock. 1308 _mdo->initialize(); 1309 } 1310 bool allow_nested_vm_operations() const { return true; } 1311 }; 1312 1313 void MethodData::reinitialize() { 1314 VM_ReinitializeMDO op(this); 1315 VMThread::execute(&op); 1316 } 1317 1318 1319 void MethodData::initialize() { 1320 Thread* thread = Thread::current(); 1321 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1322 ResourceMark rm(thread); 1323 1324 init(); 1325 1326 // Go through the bytecodes and allocate and initialize the 1327 // corresponding data cells. 1328 int data_size = 0; 1329 int empty_bc_count = 0; // number of bytecodes lacking data 1330 _data[0] = 0; // apparently not set below. 1331 BytecodeStream stream(methodHandle(thread, method())); 1332 Bytecodes::Code c; 1333 bool needs_speculative_traps = false; 1334 while ((c = stream.next()) >= 0) { 1335 int size_in_bytes = initialize_data(&stream, data_size); 1336 data_size += size_in_bytes; 1337 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1338 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1339 } 1340 _data_size = data_size; 1341 int object_size = in_bytes(data_offset()) + data_size; 1342 1343 // Add some extra DataLayout cells (at least one) to track stray traps. 1344 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1345 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1346 1347 // Let's zero the space for the extra data 1348 if (extra_size > 0) { 1349 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1350 } 1351 1352 // Add a cell to record information about modified arguments. 1353 // Set up _args_modified array after traps cells so that 1354 // the code for traps cells works. 1355 DataLayout *dp = data_layout_at(data_size + extra_size); 1356 1357 int arg_size = method()->size_of_parameters(); 1358 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1359 1360 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1361 object_size += extra_size + arg_data_size; 1362 1363 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1364 // If we are profiling parameters, we reserved an area near the end 1365 // of the MDO after the slots for bytecodes (because there's no bci 1366 // for method entry so they don't fit with the framework for the 1367 // profiling of bytecodes). We store the offset within the MDO of 1368 // this area (or -1 if no parameter is profiled) 1369 int parm_data_size = 0; 1370 if (parms_cell > 0) { 1371 parm_data_size = DataLayout::compute_size_in_bytes(parms_cell); 1372 object_size += parm_data_size; 1373 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1374 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1375 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1376 } else { 1377 _parameters_type_data_di = no_parameters; 1378 } 1379 1380 _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size; 1381 if (ProfileExceptionHandlers && method()->has_exception_handler()) { 1382 int num_exception_handlers = method()->exception_table_length(); 1383 object_size += num_exception_handlers * single_exception_handler_data_size(); 1384 ExceptionTableElement* exception_handlers = method()->exception_table_start(); 1385 for (int i = 0; i < num_exception_handlers; i++) { 1386 DataLayout *dp = exception_handler_data_at(i); 1387 dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count()); 1388 } 1389 } 1390 1391 // Set an initial hint. Don't use set_hint_di() because 1392 // first_di() may be out of bounds if data_size is 0. 1393 // In that situation, _hint_di is never used, but at 1394 // least well-defined. 1395 _hint_di = first_di(); 1396 1397 post_initialize(&stream); 1398 1399 assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size"); 1400 set_size(object_size); 1401 } 1402 1403 void MethodData::init() { 1404 _compiler_counters = CompilerCounters(); // reset compiler counters 1405 _invocation_counter.init(); 1406 _backedge_counter.init(); 1407 _invocation_counter_start = 0; 1408 _backedge_counter_start = 0; 1409 1410 // Set per-method invoke- and backedge mask. 1411 double scale = 1.0; 1412 methodHandle mh(Thread::current(), _method); 1413 CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale); 1414 _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1415 _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1416 1417 _tenure_traps = 0; 1418 _num_loops = 0; 1419 _num_blocks = 0; 1420 _would_profile = unknown; 1421 1422 #if INCLUDE_JVMCI 1423 _jvmci_ir_size = 0; 1424 _failed_speculations = nullptr; 1425 #endif 1426 1427 // Initialize escape flags. 1428 clear_escape_info(); 1429 } 1430 1431 bool MethodData::is_mature() const { 1432 return CompilationPolicy::is_mature(const_cast<MethodData*>(this)); 1433 } 1434 1435 // Translate a bci to its corresponding data index (di). 1436 address MethodData::bci_to_dp(int bci) { 1437 ResourceMark rm; 1438 DataLayout* data = data_layout_before(bci); 1439 DataLayout* prev = nullptr; 1440 for ( ; is_valid(data); data = next_data_layout(data)) { 1441 if (data->bci() >= bci) { 1442 if (data->bci() == bci) set_hint_di(dp_to_di((address)data)); 1443 else if (prev != nullptr) set_hint_di(dp_to_di((address)prev)); 1444 return (address)data; 1445 } 1446 prev = data; 1447 } 1448 return (address)limit_data_position(); 1449 } 1450 1451 // Translate a bci to its corresponding data, or null. 1452 ProfileData* MethodData::bci_to_data(int bci) { 1453 check_extra_data_locked(); 1454 1455 DataLayout* data = data_layout_before(bci); 1456 for ( ; is_valid(data); data = next_data_layout(data)) { 1457 if (data->bci() == bci) { 1458 set_hint_di(dp_to_di((address)data)); 1459 return data->data_in(); 1460 } else if (data->bci() > bci) { 1461 break; 1462 } 1463 } 1464 return bci_to_extra_data(bci, nullptr, false); 1465 } 1466 1467 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) { 1468 assert(ProfileExceptionHandlers, "not profiling"); 1469 for (int i = 0; i < num_exception_handler_data(); i++) { 1470 DataLayout* exception_handler_data = exception_handler_data_at(i); 1471 if (exception_handler_data->bci() == bci) { 1472 return exception_handler_data; 1473 } 1474 } 1475 return nullptr; 1476 } 1477 1478 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) { 1479 DataLayout* data = exception_handler_bci_to_data_helper(bci); 1480 return data != nullptr ? new BitData(data) : nullptr; 1481 } 1482 1483 BitData MethodData::exception_handler_bci_to_data(int bci) { 1484 DataLayout* data = exception_handler_bci_to_data_helper(bci); 1485 assert(data != nullptr, "invalid bci"); 1486 return BitData(data); 1487 } 1488 1489 DataLayout* MethodData::next_extra(DataLayout* dp) { 1490 int nb_cells = 0; 1491 switch(dp->tag()) { 1492 case DataLayout::bit_data_tag: 1493 case DataLayout::no_tag: 1494 nb_cells = BitData::static_cell_count(); 1495 break; 1496 case DataLayout::speculative_trap_data_tag: 1497 nb_cells = SpeculativeTrapData::static_cell_count(); 1498 break; 1499 default: 1500 fatal("unexpected tag %d", dp->tag()); 1501 } 1502 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1503 } 1504 1505 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) { 1506 check_extra_data_locked(); 1507 1508 DataLayout* end = args_data_limit(); 1509 1510 for (;; dp = next_extra(dp)) { 1511 assert(dp < end, "moved past end of extra data"); 1512 // No need for "Atomic::load_acquire" ops, 1513 // since the data structure is monotonic. 1514 switch(dp->tag()) { 1515 case DataLayout::no_tag: 1516 return nullptr; 1517 case DataLayout::arg_info_data_tag: 1518 dp = end; 1519 return nullptr; // ArgInfoData is at the end of extra data section. 1520 case DataLayout::bit_data_tag: 1521 if (m == nullptr && dp->bci() == bci) { 1522 return new BitData(dp); 1523 } 1524 break; 1525 case DataLayout::speculative_trap_data_tag: 1526 if (m != nullptr) { 1527 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1528 if (dp->bci() == bci) { 1529 assert(data->method() != nullptr, "method must be set"); 1530 if (data->method() == m) { 1531 return data; 1532 } 1533 } 1534 } 1535 break; 1536 default: 1537 fatal("unexpected tag %d", dp->tag()); 1538 } 1539 } 1540 return nullptr; 1541 } 1542 1543 1544 // Translate a bci to its corresponding extra data, or null. 1545 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1546 check_extra_data_locked(); 1547 1548 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1549 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1550 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1551 "code needs to be adjusted"); 1552 1553 // Do not create one of these if method has been redefined. 1554 if (m != nullptr && m->is_old()) { 1555 return nullptr; 1556 } 1557 1558 DataLayout* dp = extra_data_base(); 1559 DataLayout* end = args_data_limit(); 1560 1561 // Find if already exists 1562 ProfileData* result = bci_to_extra_data_find(bci, m, dp); 1563 if (result != nullptr || dp >= end) { 1564 return result; 1565 } 1566 1567 if (create_if_missing) { 1568 // Not found -> Allocate 1569 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free"); 1570 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1571 u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1572 // SpeculativeTrapData is 2 slots. Make sure we have room. 1573 if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) { 1574 return nullptr; 1575 } 1576 DataLayout temp; 1577 temp.initialize(tag, checked_cast<u2>(bci), 0); 1578 1579 dp->set_header(temp.header()); 1580 assert(dp->tag() == tag, "sane"); 1581 assert(dp->bci() == bci, "no concurrent allocation"); 1582 if (tag == DataLayout::bit_data_tag) { 1583 return new BitData(dp); 1584 } else { 1585 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1586 data->set_method(m); 1587 return data; 1588 } 1589 } 1590 return nullptr; 1591 } 1592 1593 ArgInfoData *MethodData::arg_info() { 1594 DataLayout* dp = extra_data_base(); 1595 DataLayout* end = args_data_limit(); 1596 for (; dp < end; dp = next_extra(dp)) { 1597 if (dp->tag() == DataLayout::arg_info_data_tag) 1598 return new ArgInfoData(dp); 1599 } 1600 return nullptr; 1601 } 1602 1603 // Printing 1604 1605 void MethodData::print_on(outputStream* st) const { 1606 assert(is_methodData(), "should be method data"); 1607 st->print("method data for "); 1608 method()->print_value_on(st); 1609 st->cr(); 1610 print_data_on(st); 1611 } 1612 1613 void MethodData::print_value_on(outputStream* st) const { 1614 assert(is_methodData(), "should be method data"); 1615 st->print("method data for "); 1616 method()->print_value_on(st); 1617 } 1618 1619 void MethodData::print_data_on(outputStream* st) const { 1620 Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock(); 1621 ConditionalMutexLocker ml(lock, !lock->owned_by_self(), 1622 Mutex::_no_safepoint_check_flag); 1623 ResourceMark rm; 1624 ProfileData* data = first_data(); 1625 if (_parameters_type_data_di != no_parameters) { 1626 parameters_type_data()->print_data_on(st); 1627 } 1628 for ( ; is_valid(data); data = next_data(data)) { 1629 st->print("%d", dp_to_di(data->dp())); 1630 st->fill_to(6); 1631 data->print_data_on(st, this); 1632 } 1633 1634 st->print_cr("--- Extra data:"); 1635 DataLayout* dp = extra_data_base(); 1636 DataLayout* end = args_data_limit(); 1637 for (;; dp = next_extra(dp)) { 1638 assert(dp < end, "moved past end of extra data"); 1639 // No need for "Atomic::load_acquire" ops, 1640 // since the data structure is monotonic. 1641 switch(dp->tag()) { 1642 case DataLayout::no_tag: 1643 continue; 1644 case DataLayout::bit_data_tag: 1645 data = new BitData(dp); 1646 break; 1647 case DataLayout::speculative_trap_data_tag: 1648 data = new SpeculativeTrapData(dp); 1649 break; 1650 case DataLayout::arg_info_data_tag: 1651 data = new ArgInfoData(dp); 1652 dp = end; // ArgInfoData is at the end of extra data section. 1653 break; 1654 default: 1655 fatal("unexpected tag %d", dp->tag()); 1656 } 1657 st->print("%d", dp_to_di(data->dp())); 1658 st->fill_to(6); 1659 data->print_data_on(st); 1660 if (dp >= end) return; 1661 } 1662 } 1663 1664 // Verification 1665 1666 void MethodData::verify_on(outputStream* st) { 1667 guarantee(is_methodData(), "object must be method data"); 1668 // guarantee(m->is_perm(), "should be in permspace"); 1669 this->verify_data_on(st); 1670 } 1671 1672 void MethodData::verify_data_on(outputStream* st) { 1673 NEEDS_CLEANUP; 1674 // not yet implemented. 1675 } 1676 1677 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1678 if (m->is_compiled_lambda_form()) { 1679 return true; 1680 } 1681 1682 Bytecode_invoke inv(m , bci); 1683 return inv.is_invokedynamic() || inv.is_invokehandle(); 1684 } 1685 1686 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1687 Bytecode_invoke inv(m , bci); 1688 if (inv.is_invokevirtual()) { 1689 Symbol* klass = inv.klass(); 1690 if (klass == vmSymbols::jdk_internal_misc_Unsafe() || 1691 klass == vmSymbols::sun_misc_Unsafe() || 1692 klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) { 1693 Symbol* name = inv.name(); 1694 if (name->starts_with("get") || name->starts_with("put")) { 1695 return true; 1696 } 1697 } 1698 } 1699 return false; 1700 } 1701 1702 int MethodData::profile_arguments_flag() { 1703 return TypeProfileLevel % 10; 1704 } 1705 1706 bool MethodData::profile_arguments() { 1707 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0; 1708 } 1709 1710 bool MethodData::profile_arguments_jsr292_only() { 1711 return profile_arguments_flag() == type_profile_jsr292; 1712 } 1713 1714 bool MethodData::profile_all_arguments() { 1715 return profile_arguments_flag() == type_profile_all; 1716 } 1717 1718 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1719 if (!profile_arguments()) { 1720 return false; 1721 } 1722 1723 if (profile_all_arguments()) { 1724 return true; 1725 } 1726 1727 if (profile_unsafe(m, bci)) { 1728 return true; 1729 } 1730 1731 assert(profile_arguments_jsr292_only(), "inconsistent"); 1732 return profile_jsr292(m, bci); 1733 } 1734 1735 int MethodData::profile_return_flag() { 1736 return (TypeProfileLevel % 100) / 10; 1737 } 1738 1739 bool MethodData::profile_return() { 1740 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1741 } 1742 1743 bool MethodData::profile_return_jsr292_only() { 1744 return profile_return_flag() == type_profile_jsr292; 1745 } 1746 1747 bool MethodData::profile_all_return() { 1748 return profile_return_flag() == type_profile_all; 1749 } 1750 1751 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1752 if (!profile_return()) { 1753 return false; 1754 } 1755 1756 if (profile_all_return()) { 1757 return true; 1758 } 1759 1760 assert(profile_return_jsr292_only(), "inconsistent"); 1761 return profile_jsr292(m, bci); 1762 } 1763 1764 int MethodData::profile_parameters_flag() { 1765 return TypeProfileLevel / 100; 1766 } 1767 1768 bool MethodData::profile_parameters() { 1769 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1770 } 1771 1772 bool MethodData::profile_parameters_jsr292_only() { 1773 return profile_parameters_flag() == type_profile_jsr292; 1774 } 1775 1776 bool MethodData::profile_all_parameters() { 1777 return profile_parameters_flag() == type_profile_all; 1778 } 1779 1780 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1781 if (!profile_parameters()) { 1782 return false; 1783 } 1784 1785 if (profile_all_parameters()) { 1786 return true; 1787 } 1788 1789 assert(profile_parameters_jsr292_only(), "inconsistent"); 1790 return m->is_compiled_lambda_form(); 1791 } 1792 1793 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1794 log_trace(aot, training)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string()); 1795 it->push(&_method); 1796 if (_parameters_type_data_di != no_parameters) { 1797 parameters_type_data()->metaspace_pointers_do(it); 1798 } 1799 for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) { 1800 data->metaspace_pointers_do(it); 1801 } 1802 for (DataLayout* dp = extra_data_base(); 1803 dp < extra_data_limit(); 1804 dp = MethodData::next_extra(dp)) { 1805 if (dp->tag() == DataLayout::speculative_trap_data_tag) { 1806 ResourceMark rm; 1807 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1808 data->metaspace_pointers_do(it); 1809 } else if (dp->tag() == DataLayout::no_tag || 1810 dp->tag() == DataLayout::arg_info_data_tag) { 1811 break; 1812 } 1813 } 1814 } 1815 1816 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1817 check_extra_data_locked(); 1818 1819 if (shift == 0) { 1820 return; 1821 } 1822 if (!reset) { 1823 // Move all cells of trap entry at dp left by "shift" cells 1824 intptr_t* start = (intptr_t*)dp; 1825 intptr_t* end = (intptr_t*)next_extra(dp); 1826 for (intptr_t* ptr = start; ptr < end; ptr++) { 1827 *(ptr-shift) = *ptr; 1828 } 1829 } else { 1830 // Reset "shift" cells stopping at dp 1831 intptr_t* start = ((intptr_t*)dp) - shift; 1832 intptr_t* end = (intptr_t*)dp; 1833 for (intptr_t* ptr = start; ptr < end; ptr++) { 1834 *ptr = 0; 1835 } 1836 } 1837 } 1838 1839 // Check for entries that reference an unloaded method 1840 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1841 bool _always_clean; 1842 public: 1843 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {} 1844 bool is_live(Method* m) { 1845 if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) { 1846 return true; // TODO: treat as unloaded instead? 1847 } 1848 return !(_always_clean) && m->method_holder()->is_loader_alive(); 1849 } 1850 }; 1851 1852 // Check for entries that reference a redefined method 1853 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1854 public: 1855 CleanExtraDataMethodClosure() {} 1856 bool is_live(Method* m) { return !m->is_old(); } 1857 }; 1858 1859 Mutex* MethodData::extra_data_lock() { 1860 Mutex* lock = Atomic::load(&_extra_data_lock); 1861 if (lock == nullptr) { 1862 // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint 1863 lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock"); 1864 Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock); 1865 if (old != nullptr) { 1866 // Another thread created the lock before us. Use that lock instead. 1867 delete lock; 1868 return old; 1869 } 1870 } 1871 return lock; 1872 } 1873 1874 // Remove SpeculativeTrapData entries that reference an unloaded or 1875 // redefined method 1876 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1877 check_extra_data_locked(); 1878 1879 DataLayout* dp = extra_data_base(); 1880 DataLayout* end = args_data_limit(); 1881 1882 int shift = 0; 1883 for (; dp < end; dp = next_extra(dp)) { 1884 switch(dp->tag()) { 1885 case DataLayout::speculative_trap_data_tag: { 1886 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1887 Method* m = data->method(); 1888 assert(m != nullptr, "should have a method"); 1889 if (is_excluded(m->method_holder()) || !cl->is_live(m)) { 1890 // "shift" accumulates the number of cells for dead 1891 // SpeculativeTrapData entries that have been seen so 1892 // far. Following entries must be shifted left by that many 1893 // cells to remove the dead SpeculativeTrapData entries. 1894 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1895 } else { 1896 // Shift this entry left if it follows dead 1897 // SpeculativeTrapData entries 1898 clean_extra_data_helper(dp, shift); 1899 } 1900 break; 1901 } 1902 case DataLayout::bit_data_tag: 1903 // Shift this entry left if it follows dead SpeculativeTrapData 1904 // entries 1905 clean_extra_data_helper(dp, shift); 1906 continue; 1907 case DataLayout::no_tag: 1908 case DataLayout::arg_info_data_tag: 1909 // We are at end of the live trap entries. The previous "shift" 1910 // cells contain entries that are either dead or were shifted 1911 // left. They need to be reset to no_tag 1912 clean_extra_data_helper(dp, shift, true); 1913 return; 1914 default: 1915 fatal("unexpected tag %d", dp->tag()); 1916 } 1917 } 1918 } 1919 1920 // Verify there's no unloaded or redefined method referenced by a 1921 // SpeculativeTrapData entry 1922 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1923 check_extra_data_locked(); 1924 1925 #ifdef ASSERT 1926 DataLayout* dp = extra_data_base(); 1927 DataLayout* end = args_data_limit(); 1928 1929 for (; dp < end; dp = next_extra(dp)) { 1930 switch(dp->tag()) { 1931 case DataLayout::speculative_trap_data_tag: { 1932 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1933 Method* m = data->method(); 1934 assert(m != nullptr && cl->is_live(m), "Method should exist"); 1935 break; 1936 } 1937 case DataLayout::bit_data_tag: 1938 continue; 1939 case DataLayout::no_tag: 1940 case DataLayout::arg_info_data_tag: 1941 return; 1942 default: 1943 fatal("unexpected tag %d", dp->tag()); 1944 } 1945 } 1946 #endif 1947 } 1948 1949 void MethodData::clean_method_data(bool always_clean) { 1950 ResourceMark rm; 1951 for (ProfileData* data = first_data(); 1952 is_valid(data); 1953 data = next_data(data)) { 1954 data->clean_weak_klass_links(always_clean); 1955 } 1956 ParametersTypeData* parameters = parameters_type_data(); 1957 if (parameters != nullptr) { 1958 parameters->clean_weak_klass_links(always_clean); 1959 } 1960 1961 CleanExtraDataKlassClosure cl(always_clean); 1962 1963 // Lock to modify extra data, and prevent Safepoint from breaking the lock 1964 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag); 1965 1966 clean_extra_data(&cl); 1967 verify_extra_data_clean(&cl); 1968 } 1969 1970 // This is called during redefinition to clean all "old" redefined 1971 // methods out of MethodData for all methods. 1972 void MethodData::clean_weak_method_links() { 1973 ResourceMark rm; 1974 CleanExtraDataMethodClosure cl; 1975 1976 // Lock to modify extra data, and prevent Safepoint from breaking the lock 1977 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag); 1978 1979 clean_extra_data(&cl); 1980 verify_extra_data_clean(&cl); 1981 } 1982 1983 void MethodData::deallocate_contents(ClassLoaderData* loader_data) { 1984 release_C_heap_structures(); 1985 } 1986 1987 void MethodData::release_C_heap_structures() { 1988 #if INCLUDE_JVMCI 1989 FailedSpeculation::free_failed_speculations(get_failed_speculations_address()); 1990 #endif 1991 } 1992 1993 #if INCLUDE_CDS 1994 void MethodData::remove_unshareable_info() { 1995 _extra_data_lock = nullptr; 1996 } 1997 1998 void MethodData::restore_unshareable_info(TRAPS) { 1999 //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock"); 2000 } 2001 #endif // INCLUDE_CDS 2002 2003 #ifdef ASSERT 2004 void MethodData::check_extra_data_locked() const { 2005 // Cast const away, just to be able to verify the lock 2006 // Usually we only want non-const accesses on the lock, 2007 // so this here is an exception. 2008 MethodData* self = (MethodData*)this; 2009 assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock"); 2010 assert(!Thread::current()->is_Java_thread() || 2011 JavaThread::current()->is_in_no_safepoint_scope(), 2012 "JavaThread must have NoSafepointVerifier inside lock scope"); 2013 } 2014 #endif