1 /* 2 * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/cdsConfig.hpp" 27 #include "ci/ciMethodData.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compilationPolicy.hpp" 30 #include "compiler/compilerDefinitions.inline.hpp" 31 #include "compiler/compilerOracle.hpp" 32 #include "interpreter/bytecode.hpp" 33 #include "interpreter/bytecodeStream.hpp" 34 #include "interpreter/linkResolver.hpp" 35 #include "memory/metaspaceClosure.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/klass.inline.hpp" 38 #include "oops/methodData.inline.hpp" 39 #include "prims/jvmtiRedefineClasses.hpp" 40 #include "runtime/atomic.hpp" 41 #include "runtime/deoptimization.hpp" 42 #include "runtime/handles.inline.hpp" 43 #include "runtime/orderAccess.hpp" 44 #include "runtime/safepointVerifiers.hpp" 45 #include "runtime/signature.hpp" 46 #include "utilities/align.hpp" 47 #include "utilities/checkedCast.hpp" 48 #include "utilities/copy.hpp" 49 50 // ================================================================== 51 // DataLayout 52 // 53 // Overlay for generic profiling data. 54 55 // Some types of data layouts need a length field. 56 bool DataLayout::needs_array_len(u1 tag) { 57 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 58 } 59 60 // Perform generic initialization of the data. More specific 61 // initialization occurs in overrides of ProfileData::post_initialize. 62 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 63 _header._bits = (intptr_t)0; 64 _header._struct._tag = tag; 65 _header._struct._bci = bci; 66 for (int i = 0; i < cell_count; i++) { 67 set_cell_at(i, (intptr_t)0); 68 } 69 if (needs_array_len(tag)) { 70 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 71 } 72 if (tag == call_type_data_tag) { 73 CallTypeData::initialize(this, cell_count); 74 } else if (tag == virtual_call_type_data_tag) { 75 VirtualCallTypeData::initialize(this, cell_count); 76 } 77 } 78 79 void DataLayout::clean_weak_klass_links(bool always_clean) { 80 ResourceMark m; 81 data_in()->clean_weak_klass_links(always_clean); 82 } 83 84 85 // ================================================================== 86 // ProfileData 87 // 88 // A ProfileData object is created to refer to a section of profiling 89 // data in a structured way. 90 91 // Constructor for invalid ProfileData. 92 ProfileData::ProfileData() { 93 _data = nullptr; 94 } 95 96 char* ProfileData::print_data_on_helper(const MethodData* md) const { 97 DataLayout* dp = md->extra_data_base(); 98 DataLayout* end = md->args_data_limit(); 99 stringStream ss; 100 for (;; dp = MethodData::next_extra(dp)) { 101 assert(dp < end, "moved past end of extra data"); 102 switch(dp->tag()) { 103 case DataLayout::speculative_trap_data_tag: 104 if (dp->bci() == bci()) { 105 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 106 int trap = data->trap_state(); 107 char buf[100]; 108 ss.print("trap/"); 109 data->method()->print_short_name(&ss); 110 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 111 } 112 break; 113 case DataLayout::bit_data_tag: 114 break; 115 case DataLayout::no_tag: 116 case DataLayout::arg_info_data_tag: 117 return ss.as_string(); 118 break; 119 default: 120 fatal("unexpected tag %d", dp->tag()); 121 } 122 } 123 return nullptr; 124 } 125 126 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 127 print_data_on(st, print_data_on_helper(md)); 128 } 129 130 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 131 st->print("bci: %d ", bci()); 132 st->fill_to(tab_width_one + 1); 133 st->print("%s", name); 134 tab(st); 135 int trap = trap_state(); 136 if (trap != 0) { 137 char buf[100]; 138 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 139 } 140 if (extra != nullptr) { 141 st->print("%s", extra); 142 } 143 int flags = data()->flags(); 144 if (flags != 0) { 145 st->print("flags(%d) ", flags); 146 } 147 } 148 149 void ProfileData::tab(outputStream* st, bool first) const { 150 st->fill_to(first ? tab_width_one : tab_width_two); 151 } 152 153 // ================================================================== 154 // BitData 155 // 156 // A BitData corresponds to a one-bit flag. This is used to indicate 157 // whether a checkcast bytecode has seen a null value. 158 159 160 void BitData::print_data_on(outputStream* st, const char* extra) const { 161 print_shared(st, "BitData", extra); 162 st->cr(); 163 } 164 165 // ================================================================== 166 // CounterData 167 // 168 // A CounterData corresponds to a simple counter. 169 170 void CounterData::print_data_on(outputStream* st, const char* extra) const { 171 print_shared(st, "CounterData", extra); 172 st->print_cr("count(%u)", count()); 173 } 174 175 // ================================================================== 176 // JumpData 177 // 178 // A JumpData is used to access profiling information for a direct 179 // branch. It is a counter, used for counting the number of branches, 180 // plus a data displacement, used for realigning the data pointer to 181 // the corresponding target bci. 182 183 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 184 assert(stream->bci() == bci(), "wrong pos"); 185 int target; 186 Bytecodes::Code c = stream->code(); 187 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 188 target = stream->dest_w(); 189 } else { 190 target = stream->dest(); 191 } 192 int my_di = mdo->dp_to_di(dp()); 193 int target_di = mdo->bci_to_di(target); 194 int offset = target_di - my_di; 195 set_displacement(offset); 196 } 197 198 void JumpData::print_data_on(outputStream* st, const char* extra) const { 199 print_shared(st, "JumpData", extra); 200 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 201 } 202 203 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 204 // Parameter profiling include the receiver 205 int args_count = include_receiver ? 1 : 0; 206 ResourceMark rm; 207 ReferenceArgumentCount rac(signature); 208 args_count += rac.count(); 209 args_count = MIN2(args_count, max); 210 return args_count * per_arg_cell_count; 211 } 212 213 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 214 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 215 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 216 const methodHandle m = stream->method(); 217 int bci = stream->bci(); 218 Bytecode_invoke inv(m, bci); 219 int args_cell = 0; 220 if (MethodData::profile_arguments_for_invoke(m, bci)) { 221 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 222 } 223 int ret_cell = 0; 224 if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) { 225 ret_cell = ReturnTypeEntry::static_cell_count(); 226 } 227 int header_cell = 0; 228 if (args_cell + ret_cell > 0) { 229 header_cell = header_cell_count(); 230 } 231 232 return header_cell + args_cell + ret_cell; 233 } 234 235 class ArgumentOffsetComputer : public SignatureIterator { 236 private: 237 int _max; 238 int _offset; 239 GrowableArray<int> _offsets; 240 241 friend class SignatureIterator; // so do_parameters_on can call do_type 242 void do_type(BasicType type) { 243 if (is_reference_type(type) && _offsets.length() < _max) { 244 _offsets.push(_offset); 245 } 246 _offset += parameter_type_word_count(type); 247 } 248 249 public: 250 ArgumentOffsetComputer(Symbol* signature, int max) 251 : SignatureIterator(signature), 252 _max(max), _offset(0), 253 _offsets(max) { 254 do_parameters_on(this); // non-virtual template execution 255 } 256 257 int off_at(int i) const { return _offsets.at(i); } 258 }; 259 260 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 261 ResourceMark rm; 262 int start = 0; 263 // Parameter profiling include the receiver 264 if (include_receiver && has_receiver) { 265 set_stack_slot(0, 0); 266 set_type(0, type_none()); 267 start += 1; 268 } 269 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 270 for (int i = start; i < _number_of_entries; i++) { 271 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 272 set_type(i, type_none()); 273 } 274 } 275 276 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 277 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 278 Bytecode_invoke inv(stream->method(), stream->bci()); 279 280 if (has_arguments()) { 281 #ifdef ASSERT 282 ResourceMark rm; 283 ReferenceArgumentCount rac(inv.signature()); 284 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 285 assert(count > 0, "room for args type but none found?"); 286 check_number_of_arguments(count); 287 #endif 288 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 289 } 290 291 if (has_return()) { 292 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 293 _ret.post_initialize(); 294 } 295 } 296 297 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 298 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 299 Bytecode_invoke inv(stream->method(), stream->bci()); 300 301 if (has_arguments()) { 302 #ifdef ASSERT 303 ResourceMark rm; 304 ReferenceArgumentCount rac(inv.signature()); 305 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 306 assert(count > 0, "room for args type but none found?"); 307 check_number_of_arguments(count); 308 #endif 309 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 310 } 311 312 if (has_return()) { 313 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 314 _ret.post_initialize(); 315 } 316 } 317 318 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { 319 for (int i = 0; i < _number_of_entries; i++) { 320 intptr_t p = type(i); 321 Klass* k = (Klass*)klass_part(p); 322 if (k != nullptr) { 323 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) { 324 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? 325 } 326 if (always_clean || !k->is_loader_alive()) { 327 set_type(i, with_status((Klass*)nullptr, p)); 328 } 329 } 330 } 331 } 332 333 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) { 334 for (int i = 0; i < _number_of_entries; i++) { 335 set_type(i, klass_part(type(i))); // reset tag; FIXME: properly handle tagged pointers 336 Klass** k = (Klass**)type_adr(i); 337 it->push(k); 338 // it->push_tagged(k); 339 } 340 } 341 342 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) { 343 intptr_t p = type(); 344 Klass* k = (Klass*)klass_part(p); 345 if (k != nullptr) { 346 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) { 347 return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? 348 } 349 if (always_clean || !k->is_loader_alive()) { 350 set_type(with_status((Klass*)nullptr, p)); 351 } 352 } 353 } 354 355 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) { 356 Klass** k = (Klass**)type_adr(); // tagged 357 set_type(klass_part(type())); // reset tag; FIXME: properly handle tagged pointers 358 it->push(k); 359 // it->push_tagged(k); 360 } 361 362 bool TypeEntriesAtCall::return_profiling_enabled() { 363 return MethodData::profile_return(); 364 } 365 366 bool TypeEntriesAtCall::arguments_profiling_enabled() { 367 return MethodData::profile_arguments(); 368 } 369 370 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 371 if (is_type_none(k)) { 372 st->print("none"); 373 } else if (is_type_unknown(k)) { 374 st->print("unknown"); 375 } else { 376 valid_klass(k)->print_value_on(st); 377 } 378 if (was_null_seen(k)) { 379 st->print(" (null seen)"); 380 } 381 } 382 383 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 384 for (int i = 0; i < _number_of_entries; i++) { 385 _pd->tab(st); 386 st->print("%d: stack(%u) ", i, stack_slot(i)); 387 print_klass(st, type(i)); 388 st->cr(); 389 } 390 } 391 392 void ReturnTypeEntry::print_data_on(outputStream* st) const { 393 _pd->tab(st); 394 print_klass(st, type()); 395 st->cr(); 396 } 397 398 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 399 CounterData::print_data_on(st, extra); 400 if (has_arguments()) { 401 tab(st, true); 402 st->print("argument types"); 403 _args.print_data_on(st); 404 } 405 if (has_return()) { 406 tab(st, true); 407 st->print("return type"); 408 _ret.print_data_on(st); 409 } 410 } 411 412 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 413 VirtualCallData::print_data_on(st, extra); 414 if (has_arguments()) { 415 tab(st, true); 416 st->print("argument types"); 417 _args.print_data_on(st); 418 } 419 if (has_return()) { 420 tab(st, true); 421 st->print("return type"); 422 _ret.print_data_on(st); 423 } 424 } 425 426 // ================================================================== 427 // ReceiverTypeData 428 // 429 // A ReceiverTypeData is used to access profiling information about a 430 // dynamic type check. It consists of a counter which counts the total times 431 // that the check is reached, and a series of (Klass*, count) pairs 432 // which are used to store a type profile for the receiver of the check. 433 434 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { 435 for (uint row = 0; row < row_limit(); row++) { 436 Klass* p = receiver(row); 437 if (p != nullptr) { 438 if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) { 439 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead? 440 } 441 if (always_clean || !p->is_loader_alive()) { 442 clear_row(row); 443 } 444 } 445 } 446 } 447 448 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) { 449 for (uint row = 0; row < row_limit(); row++) { 450 Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row)); 451 it->push(recv); 452 } 453 } 454 455 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 456 uint row; 457 int entries = 0; 458 for (row = 0; row < row_limit(); row++) { 459 if (receiver(row) != nullptr) entries++; 460 } 461 st->print_cr("count(%u) entries(%u)", count(), entries); 462 int total = count(); 463 for (row = 0; row < row_limit(); row++) { 464 if (receiver(row) != nullptr) { 465 total += receiver_count(row); 466 } 467 } 468 for (row = 0; row < row_limit(); row++) { 469 if (receiver(row) != nullptr) { 470 tab(st); 471 receiver(row)->print_value_on(st); 472 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 473 } 474 } 475 } 476 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 477 print_shared(st, "ReceiverTypeData", extra); 478 print_receiver_data_on(st); 479 } 480 481 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 482 print_shared(st, "VirtualCallData", extra); 483 print_receiver_data_on(st); 484 } 485 486 // ================================================================== 487 // RetData 488 // 489 // A RetData is used to access profiling information for a ret bytecode. 490 // It is composed of a count of the number of times that the ret has 491 // been executed, followed by a series of triples of the form 492 // (bci, count, di) which count the number of times that some bci was the 493 // target of the ret and cache a corresponding displacement. 494 495 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 496 for (uint row = 0; row < row_limit(); row++) { 497 set_bci_displacement(row, -1); 498 set_bci(row, no_bci); 499 } 500 // release so other threads see a consistent state. bci is used as 501 // a valid flag for bci_displacement. 502 OrderAccess::release(); 503 } 504 505 // This routine needs to atomically update the RetData structure, so the 506 // caller needs to hold the RetData_lock before it gets here. Since taking 507 // the lock can block (and allow GC) and since RetData is a ProfileData is a 508 // wrapper around a derived oop, taking the lock in _this_ method will 509 // basically cause the 'this' pointer's _data field to contain junk after the 510 // lock. We require the caller to take the lock before making the ProfileData 511 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 512 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 513 // First find the mdp which corresponds to the return bci. 514 address mdp = h_mdo->bci_to_dp(return_bci); 515 516 // Now check to see if any of the cache slots are open. 517 for (uint row = 0; row < row_limit(); row++) { 518 if (bci(row) == no_bci) { 519 set_bci_displacement(row, checked_cast<int>(mdp - dp())); 520 set_bci_count(row, DataLayout::counter_increment); 521 // Barrier to ensure displacement is written before the bci; allows 522 // the interpreter to read displacement without fear of race condition. 523 release_set_bci(row, return_bci); 524 break; 525 } 526 } 527 return mdp; 528 } 529 530 void RetData::print_data_on(outputStream* st, const char* extra) const { 531 print_shared(st, "RetData", extra); 532 uint row; 533 int entries = 0; 534 for (row = 0; row < row_limit(); row++) { 535 if (bci(row) != no_bci) entries++; 536 } 537 st->print_cr("count(%u) entries(%u)", count(), entries); 538 for (row = 0; row < row_limit(); row++) { 539 if (bci(row) != no_bci) { 540 tab(st); 541 st->print_cr("bci(%d: count(%u) displacement(%d))", 542 bci(row), bci_count(row), bci_displacement(row)); 543 } 544 } 545 } 546 547 // ================================================================== 548 // BranchData 549 // 550 // A BranchData is used to access profiling data for a two-way branch. 551 // It consists of taken and not_taken counts as well as a data displacement 552 // for the taken case. 553 554 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 555 assert(stream->bci() == bci(), "wrong pos"); 556 int target = stream->dest(); 557 int my_di = mdo->dp_to_di(dp()); 558 int target_di = mdo->bci_to_di(target); 559 int offset = target_di - my_di; 560 set_displacement(offset); 561 } 562 563 void BranchData::print_data_on(outputStream* st, const char* extra) const { 564 print_shared(st, "BranchData", extra); 565 st->print_cr("taken(%u) displacement(%d)", 566 taken(), displacement()); 567 tab(st); 568 st->print_cr("not taken(%u)", not_taken()); 569 } 570 571 // ================================================================== 572 // MultiBranchData 573 // 574 // A MultiBranchData is used to access profiling information for 575 // a multi-way branch (*switch bytecodes). It consists of a series 576 // of (count, displacement) pairs, which count the number of times each 577 // case was taken and specify the data displacement for each branch target. 578 579 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 580 int cell_count = 0; 581 if (stream->code() == Bytecodes::_tableswitch) { 582 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 583 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 584 } else { 585 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 586 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 587 } 588 return cell_count; 589 } 590 591 void MultiBranchData::post_initialize(BytecodeStream* stream, 592 MethodData* mdo) { 593 assert(stream->bci() == bci(), "wrong pos"); 594 int target; 595 int my_di; 596 int target_di; 597 int offset; 598 if (stream->code() == Bytecodes::_tableswitch) { 599 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 600 int len = sw.length(); 601 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 602 for (int count = 0; count < len; count++) { 603 target = sw.dest_offset_at(count) + bci(); 604 my_di = mdo->dp_to_di(dp()); 605 target_di = mdo->bci_to_di(target); 606 offset = target_di - my_di; 607 set_displacement_at(count, offset); 608 } 609 target = sw.default_offset() + bci(); 610 my_di = mdo->dp_to_di(dp()); 611 target_di = mdo->bci_to_di(target); 612 offset = target_di - my_di; 613 set_default_displacement(offset); 614 615 } else { 616 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 617 int npairs = sw.number_of_pairs(); 618 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 619 for (int count = 0; count < npairs; count++) { 620 LookupswitchPair pair = sw.pair_at(count); 621 target = pair.offset() + bci(); 622 my_di = mdo->dp_to_di(dp()); 623 target_di = mdo->bci_to_di(target); 624 offset = target_di - my_di; 625 set_displacement_at(count, offset); 626 } 627 target = sw.default_offset() + bci(); 628 my_di = mdo->dp_to_di(dp()); 629 target_di = mdo->bci_to_di(target); 630 offset = target_di - my_di; 631 set_default_displacement(offset); 632 } 633 } 634 635 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 636 print_shared(st, "MultiBranchData", extra); 637 st->print_cr("default_count(%u) displacement(%d)", 638 default_count(), default_displacement()); 639 int cases = number_of_cases(); 640 for (int i = 0; i < cases; i++) { 641 tab(st); 642 st->print_cr("count(%u) displacement(%d)", 643 count_at(i), displacement_at(i)); 644 } 645 } 646 647 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 648 print_shared(st, "ArgInfoData", extra); 649 int nargs = number_of_args(); 650 for (int i = 0; i < nargs; i++) { 651 st->print(" 0x%x", arg_modified(i)); 652 } 653 st->cr(); 654 } 655 656 int ParametersTypeData::compute_cell_count(Method* m) { 657 if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) { 658 return 0; 659 } 660 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 661 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 662 if (obj_args > 0) { 663 return obj_args + 1; // 1 cell for array len 664 } 665 return 0; 666 } 667 668 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 669 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 670 } 671 672 bool ParametersTypeData::profiling_enabled() { 673 return MethodData::profile_parameters(); 674 } 675 676 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 677 print_shared(st, "ParametersTypeData", extra); 678 tab(st); 679 _parameters.print_data_on(st); 680 st->cr(); 681 } 682 683 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) { 684 Method** m = (Method**)intptr_at_adr(speculative_trap_method); 685 it->push(m); 686 } 687 688 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 689 print_shared(st, "SpeculativeTrapData", extra); 690 tab(st); 691 method()->print_short_name(st); 692 st->cr(); 693 } 694 695 // ================================================================== 696 // MethodData* 697 // 698 // A MethodData* holds information which has been collected about 699 // a method. 700 701 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 702 assert(!THREAD->owns_locks(), "Should not own any locks"); 703 int size = MethodData::compute_allocation_size_in_words(method); 704 705 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 706 MethodData(method); 707 } 708 709 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 710 switch (code) { 711 case Bytecodes::_checkcast: 712 case Bytecodes::_instanceof: 713 case Bytecodes::_aastore: 714 if (TypeProfileCasts) { 715 return ReceiverTypeData::static_cell_count(); 716 } else { 717 return BitData::static_cell_count(); 718 } 719 case Bytecodes::_invokespecial: 720 case Bytecodes::_invokestatic: 721 if (MethodData::profile_arguments() || MethodData::profile_return()) { 722 return variable_cell_count; 723 } else { 724 return CounterData::static_cell_count(); 725 } 726 case Bytecodes::_goto: 727 case Bytecodes::_goto_w: 728 case Bytecodes::_jsr: 729 case Bytecodes::_jsr_w: 730 return JumpData::static_cell_count(); 731 case Bytecodes::_invokevirtual: 732 case Bytecodes::_invokeinterface: 733 if (MethodData::profile_arguments() || MethodData::profile_return()) { 734 return variable_cell_count; 735 } else { 736 return VirtualCallData::static_cell_count(); 737 } 738 case Bytecodes::_invokedynamic: 739 if (MethodData::profile_arguments() || MethodData::profile_return()) { 740 return variable_cell_count; 741 } else { 742 return CounterData::static_cell_count(); 743 } 744 case Bytecodes::_ret: 745 return RetData::static_cell_count(); 746 case Bytecodes::_ifeq: 747 case Bytecodes::_ifne: 748 case Bytecodes::_iflt: 749 case Bytecodes::_ifge: 750 case Bytecodes::_ifgt: 751 case Bytecodes::_ifle: 752 case Bytecodes::_if_icmpeq: 753 case Bytecodes::_if_icmpne: 754 case Bytecodes::_if_icmplt: 755 case Bytecodes::_if_icmpge: 756 case Bytecodes::_if_icmpgt: 757 case Bytecodes::_if_icmple: 758 case Bytecodes::_if_acmpeq: 759 case Bytecodes::_if_acmpne: 760 case Bytecodes::_ifnull: 761 case Bytecodes::_ifnonnull: 762 return BranchData::static_cell_count(); 763 case Bytecodes::_lookupswitch: 764 case Bytecodes::_tableswitch: 765 return variable_cell_count; 766 default: 767 return no_profile_data; 768 } 769 } 770 771 // Compute the size of the profiling information corresponding to 772 // the current bytecode. 773 int MethodData::compute_data_size(BytecodeStream* stream) { 774 int cell_count = bytecode_cell_count(stream->code()); 775 if (cell_count == no_profile_data) { 776 return 0; 777 } 778 if (cell_count == variable_cell_count) { 779 switch (stream->code()) { 780 case Bytecodes::_lookupswitch: 781 case Bytecodes::_tableswitch: 782 cell_count = MultiBranchData::compute_cell_count(stream); 783 break; 784 case Bytecodes::_invokespecial: 785 case Bytecodes::_invokestatic: 786 case Bytecodes::_invokedynamic: 787 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 788 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 789 profile_return_for_invoke(stream->method(), stream->bci())) { 790 cell_count = CallTypeData::compute_cell_count(stream); 791 } else { 792 cell_count = CounterData::static_cell_count(); 793 } 794 break; 795 case Bytecodes::_invokevirtual: 796 case Bytecodes::_invokeinterface: { 797 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 798 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 799 profile_return_for_invoke(stream->method(), stream->bci())) { 800 cell_count = VirtualCallTypeData::compute_cell_count(stream); 801 } else { 802 cell_count = VirtualCallData::static_cell_count(); 803 } 804 break; 805 } 806 default: 807 fatal("unexpected bytecode for var length profile data"); 808 } 809 } 810 // Note: cell_count might be zero, meaning that there is just 811 // a DataLayout header, with no extra cells. 812 assert(cell_count >= 0, "sanity"); 813 return DataLayout::compute_size_in_bytes(cell_count); 814 } 815 816 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 817 // Bytecodes for which we may use speculation 818 switch (code) { 819 case Bytecodes::_checkcast: 820 case Bytecodes::_instanceof: 821 case Bytecodes::_aastore: 822 case Bytecodes::_invokevirtual: 823 case Bytecodes::_invokeinterface: 824 case Bytecodes::_if_acmpeq: 825 case Bytecodes::_if_acmpne: 826 case Bytecodes::_ifnull: 827 case Bytecodes::_ifnonnull: 828 case Bytecodes::_invokestatic: 829 #ifdef COMPILER2 830 if (CompilerConfig::is_c2_enabled()) { 831 return UseTypeSpeculation; 832 } 833 #endif 834 default: 835 return false; 836 } 837 return false; 838 } 839 840 #if INCLUDE_JVMCI 841 842 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() { 843 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow); 844 } 845 846 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) { 847 memcpy(data(), speculation, speculation_len); 848 } 849 850 // A heuristic check to detect nmethods that outlive a failed speculations list. 851 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) { 852 jlong head = (jlong)(address) *failed_speculations_address; 853 if ((head & 0x1) == 0x1) { 854 stringStream st; 855 if (nm != nullptr) { 856 st.print("%d", nm->compile_id()); 857 Method* method = nm->method(); 858 st.print_raw("{"); 859 if (method != nullptr) { 860 method->print_name(&st); 861 } else { 862 const char* jvmci_name = nm->jvmci_name(); 863 if (jvmci_name != nullptr) { 864 st.print_raw(jvmci_name); 865 } 866 } 867 st.print_raw("}"); 868 } else { 869 st.print("<unknown>"); 870 } 871 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string()); 872 } 873 } 874 875 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) { 876 assert(failed_speculations_address != nullptr, "must be"); 877 size_t fs_size = sizeof(FailedSpeculation) + speculation_len; 878 879 guarantee_failed_speculations_alive(nm, failed_speculations_address); 880 881 FailedSpeculation** cursor = failed_speculations_address; 882 FailedSpeculation* fs = nullptr; 883 do { 884 if (*cursor == nullptr) { 885 if (fs == nullptr) { 886 // lazily allocate FailedSpeculation 887 fs = new (fs_size) FailedSpeculation(speculation, speculation_len); 888 if (fs == nullptr) { 889 // no memory -> ignore failed speculation 890 return false; 891 } 892 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned"); 893 } 894 FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs); 895 if (old_fs == nullptr) { 896 // Successfully appended fs to end of the list 897 return true; 898 } 899 } 900 guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation"); 901 // check if the current entry matches this thread's failed speculation 902 if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) { 903 if (fs != nullptr) { 904 delete fs; 905 } 906 return false; 907 } 908 cursor = (*cursor)->next_adr(); 909 } while (true); 910 } 911 912 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) { 913 assert(failed_speculations_address != nullptr, "must be"); 914 FailedSpeculation* fs = *failed_speculations_address; 915 while (fs != nullptr) { 916 FailedSpeculation* next = fs->next(); 917 delete fs; 918 fs = next; 919 } 920 921 // Write an unaligned value to failed_speculations_address to denote 922 // that it is no longer a valid pointer. This is allows for the check 923 // in add_failed_speculation against adding to a freed failed 924 // speculations list. 925 long* head = (long*) failed_speculations_address; 926 (*head) = (*head) | 0x1; 927 } 928 #endif // INCLUDE_JVMCI 929 930 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 931 #if INCLUDE_JVMCI 932 if (ProfileTraps) { 933 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 934 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 935 936 // Make sure we have a minimum number of extra data slots to 937 // allocate SpeculativeTrapData entries. We would want to have one 938 // entry per compilation that inlines this method and for which 939 // some type speculation assumption fails. So the room we need for 940 // the SpeculativeTrapData entries doesn't directly depend on the 941 // size of the method. Because it's hard to estimate, we reserve 942 // space for an arbitrary number of entries. 943 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 944 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 945 946 return MAX2(extra_data_count, spec_data_count); 947 } else { 948 return 0; 949 } 950 #else // INCLUDE_JVMCI 951 if (ProfileTraps) { 952 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 953 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 954 // If the method is large, let the extra BCIs grow numerous (to ~1%). 955 int one_percent_of_data 956 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 957 if (extra_data_count < one_percent_of_data) 958 extra_data_count = one_percent_of_data; 959 if (extra_data_count > empty_bc_count) 960 extra_data_count = empty_bc_count; // no need for more 961 962 // Make sure we have a minimum number of extra data slots to 963 // allocate SpeculativeTrapData entries. We would want to have one 964 // entry per compilation that inlines this method and for which 965 // some type speculation assumption fails. So the room we need for 966 // the SpeculativeTrapData entries doesn't directly depend on the 967 // size of the method. Because it's hard to estimate, we reserve 968 // space for an arbitrary number of entries. 969 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 970 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 971 972 return MAX2(extra_data_count, spec_data_count); 973 } else { 974 return 0; 975 } 976 #endif // INCLUDE_JVMCI 977 } 978 979 // Compute the size of the MethodData* necessary to store 980 // profiling information about a given method. Size is in bytes. 981 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 982 int data_size = 0; 983 BytecodeStream stream(method); 984 Bytecodes::Code c; 985 int empty_bc_count = 0; // number of bytecodes lacking data 986 bool needs_speculative_traps = false; 987 while ((c = stream.next()) >= 0) { 988 int size_in_bytes = compute_data_size(&stream); 989 data_size += size_in_bytes; 990 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 991 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 992 } 993 int object_size = in_bytes(data_offset()) + data_size; 994 995 // Add some extra DataLayout cells (at least one) to track stray traps. 996 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 997 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 998 999 // Add a cell to record information about modified arguments. 1000 int arg_size = method->size_of_parameters(); 1001 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 1002 1003 // Reserve room for an area of the MDO dedicated to profiling of 1004 // parameters 1005 int args_cell = ParametersTypeData::compute_cell_count(method()); 1006 if (args_cell > 0) { 1007 object_size += DataLayout::compute_size_in_bytes(args_cell); 1008 } 1009 1010 if (ProfileExceptionHandlers && method()->has_exception_handler()) { 1011 int num_exception_handlers = method()->exception_table_length(); 1012 object_size += num_exception_handlers * single_exception_handler_data_size(); 1013 } 1014 1015 return object_size; 1016 } 1017 1018 // Compute the size of the MethodData* necessary to store 1019 // profiling information about a given method. Size is in words 1020 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 1021 int byte_size = compute_allocation_size_in_bytes(method); 1022 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 1023 return align_metadata_size(word_size); 1024 } 1025 1026 // Initialize an individual data segment. Returns the size of 1027 // the segment in bytes. 1028 int MethodData::initialize_data(BytecodeStream* stream, 1029 int data_index) { 1030 int cell_count = -1; 1031 u1 tag = DataLayout::no_tag; 1032 DataLayout* data_layout = data_layout_at(data_index); 1033 Bytecodes::Code c = stream->code(); 1034 switch (c) { 1035 case Bytecodes::_checkcast: 1036 case Bytecodes::_instanceof: 1037 case Bytecodes::_aastore: 1038 if (TypeProfileCasts) { 1039 cell_count = ReceiverTypeData::static_cell_count(); 1040 tag = DataLayout::receiver_type_data_tag; 1041 } else { 1042 cell_count = BitData::static_cell_count(); 1043 tag = DataLayout::bit_data_tag; 1044 } 1045 break; 1046 case Bytecodes::_invokespecial: 1047 case Bytecodes::_invokestatic: { 1048 int counter_data_cell_count = CounterData::static_cell_count(); 1049 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1050 profile_return_for_invoke(stream->method(), stream->bci())) { 1051 cell_count = CallTypeData::compute_cell_count(stream); 1052 } else { 1053 cell_count = counter_data_cell_count; 1054 } 1055 if (cell_count > counter_data_cell_count) { 1056 tag = DataLayout::call_type_data_tag; 1057 } else { 1058 tag = DataLayout::counter_data_tag; 1059 } 1060 break; 1061 } 1062 case Bytecodes::_goto: 1063 case Bytecodes::_goto_w: 1064 case Bytecodes::_jsr: 1065 case Bytecodes::_jsr_w: 1066 cell_count = JumpData::static_cell_count(); 1067 tag = DataLayout::jump_data_tag; 1068 break; 1069 case Bytecodes::_invokevirtual: 1070 case Bytecodes::_invokeinterface: { 1071 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 1072 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1073 profile_return_for_invoke(stream->method(), stream->bci())) { 1074 cell_count = VirtualCallTypeData::compute_cell_count(stream); 1075 } else { 1076 cell_count = virtual_call_data_cell_count; 1077 } 1078 if (cell_count > virtual_call_data_cell_count) { 1079 tag = DataLayout::virtual_call_type_data_tag; 1080 } else { 1081 tag = DataLayout::virtual_call_data_tag; 1082 } 1083 break; 1084 } 1085 case Bytecodes::_invokedynamic: { 1086 // %%% should make a type profile for any invokedynamic that takes a ref argument 1087 int counter_data_cell_count = CounterData::static_cell_count(); 1088 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1089 profile_return_for_invoke(stream->method(), stream->bci())) { 1090 cell_count = CallTypeData::compute_cell_count(stream); 1091 } else { 1092 cell_count = counter_data_cell_count; 1093 } 1094 if (cell_count > counter_data_cell_count) { 1095 tag = DataLayout::call_type_data_tag; 1096 } else { 1097 tag = DataLayout::counter_data_tag; 1098 } 1099 break; 1100 } 1101 case Bytecodes::_ret: 1102 cell_count = RetData::static_cell_count(); 1103 tag = DataLayout::ret_data_tag; 1104 break; 1105 case Bytecodes::_ifeq: 1106 case Bytecodes::_ifne: 1107 case Bytecodes::_iflt: 1108 case Bytecodes::_ifge: 1109 case Bytecodes::_ifgt: 1110 case Bytecodes::_ifle: 1111 case Bytecodes::_if_icmpeq: 1112 case Bytecodes::_if_icmpne: 1113 case Bytecodes::_if_icmplt: 1114 case Bytecodes::_if_icmpge: 1115 case Bytecodes::_if_icmpgt: 1116 case Bytecodes::_if_icmple: 1117 case Bytecodes::_if_acmpeq: 1118 case Bytecodes::_if_acmpne: 1119 case Bytecodes::_ifnull: 1120 case Bytecodes::_ifnonnull: 1121 cell_count = BranchData::static_cell_count(); 1122 tag = DataLayout::branch_data_tag; 1123 break; 1124 case Bytecodes::_lookupswitch: 1125 case Bytecodes::_tableswitch: 1126 cell_count = MultiBranchData::compute_cell_count(stream); 1127 tag = DataLayout::multi_branch_data_tag; 1128 break; 1129 default: 1130 break; 1131 } 1132 assert(tag == DataLayout::multi_branch_data_tag || 1133 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1134 (tag == DataLayout::call_type_data_tag || 1135 tag == DataLayout::counter_data_tag || 1136 tag == DataLayout::virtual_call_type_data_tag || 1137 tag == DataLayout::virtual_call_data_tag)) || 1138 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1139 if (cell_count >= 0) { 1140 assert(tag != DataLayout::no_tag, "bad tag"); 1141 assert(bytecode_has_profile(c), "agree w/ BHP"); 1142 data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count); 1143 return DataLayout::compute_size_in_bytes(cell_count); 1144 } else { 1145 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1146 return 0; 1147 } 1148 } 1149 1150 // Get the data at an arbitrary (sort of) data index. 1151 ProfileData* MethodData::data_at(int data_index) const { 1152 if (out_of_bounds(data_index)) { 1153 return nullptr; 1154 } 1155 DataLayout* data_layout = data_layout_at(data_index); 1156 return data_layout->data_in(); 1157 } 1158 1159 int DataLayout::cell_count() { 1160 switch (tag()) { 1161 case DataLayout::no_tag: 1162 default: 1163 ShouldNotReachHere(); 1164 return 0; 1165 case DataLayout::bit_data_tag: 1166 return BitData::static_cell_count(); 1167 case DataLayout::counter_data_tag: 1168 return CounterData::static_cell_count(); 1169 case DataLayout::jump_data_tag: 1170 return JumpData::static_cell_count(); 1171 case DataLayout::receiver_type_data_tag: 1172 return ReceiverTypeData::static_cell_count(); 1173 case DataLayout::virtual_call_data_tag: 1174 return VirtualCallData::static_cell_count(); 1175 case DataLayout::ret_data_tag: 1176 return RetData::static_cell_count(); 1177 case DataLayout::branch_data_tag: 1178 return BranchData::static_cell_count(); 1179 case DataLayout::multi_branch_data_tag: 1180 return ((new MultiBranchData(this))->cell_count()); 1181 case DataLayout::arg_info_data_tag: 1182 return ((new ArgInfoData(this))->cell_count()); 1183 case DataLayout::call_type_data_tag: 1184 return ((new CallTypeData(this))->cell_count()); 1185 case DataLayout::virtual_call_type_data_tag: 1186 return ((new VirtualCallTypeData(this))->cell_count()); 1187 case DataLayout::parameters_type_data_tag: 1188 return ((new ParametersTypeData(this))->cell_count()); 1189 case DataLayout::speculative_trap_data_tag: 1190 return SpeculativeTrapData::static_cell_count(); 1191 } 1192 } 1193 ProfileData* DataLayout::data_in() { 1194 switch (tag()) { 1195 case DataLayout::no_tag: 1196 default: 1197 ShouldNotReachHere(); 1198 return nullptr; 1199 case DataLayout::bit_data_tag: 1200 return new BitData(this); 1201 case DataLayout::counter_data_tag: 1202 return new CounterData(this); 1203 case DataLayout::jump_data_tag: 1204 return new JumpData(this); 1205 case DataLayout::receiver_type_data_tag: 1206 return new ReceiverTypeData(this); 1207 case DataLayout::virtual_call_data_tag: 1208 return new VirtualCallData(this); 1209 case DataLayout::ret_data_tag: 1210 return new RetData(this); 1211 case DataLayout::branch_data_tag: 1212 return new BranchData(this); 1213 case DataLayout::multi_branch_data_tag: 1214 return new MultiBranchData(this); 1215 case DataLayout::arg_info_data_tag: 1216 return new ArgInfoData(this); 1217 case DataLayout::call_type_data_tag: 1218 return new CallTypeData(this); 1219 case DataLayout::virtual_call_type_data_tag: 1220 return new VirtualCallTypeData(this); 1221 case DataLayout::parameters_type_data_tag: 1222 return new ParametersTypeData(this); 1223 case DataLayout::speculative_trap_data_tag: 1224 return new SpeculativeTrapData(this); 1225 } 1226 } 1227 1228 // Iteration over data. 1229 ProfileData* MethodData::next_data(ProfileData* current) const { 1230 int current_index = dp_to_di(current->dp()); 1231 int next_index = current_index + current->size_in_bytes(); 1232 ProfileData* next = data_at(next_index); 1233 return next; 1234 } 1235 1236 DataLayout* MethodData::next_data_layout(DataLayout* current) const { 1237 int current_index = dp_to_di((address)current); 1238 int next_index = current_index + current->size_in_bytes(); 1239 if (out_of_bounds(next_index)) { 1240 return nullptr; 1241 } 1242 DataLayout* next = data_layout_at(next_index); 1243 return next; 1244 } 1245 1246 // Give each of the data entries a chance to perform specific 1247 // data initialization. 1248 void MethodData::post_initialize(BytecodeStream* stream) { 1249 ResourceMark rm; 1250 ProfileData* data; 1251 for (data = first_data(); is_valid(data); data = next_data(data)) { 1252 stream->set_start(data->bci()); 1253 stream->next(); 1254 data->post_initialize(stream, this); 1255 } 1256 if (_parameters_type_data_di != no_parameters) { 1257 parameters_type_data()->post_initialize(nullptr, this); 1258 } 1259 } 1260 1261 // Initialize the MethodData* corresponding to a given method. 1262 MethodData::MethodData(const methodHandle& method) 1263 : _method(method()), 1264 // Holds Compile_lock 1265 _compiler_counters(), 1266 _parameters_type_data_di(parameters_uninitialized) { 1267 _extra_data_lock = nullptr; 1268 initialize(); 1269 } 1270 1271 MethodData::MethodData() { 1272 assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS"); 1273 } 1274 1275 void MethodData::initialize() { 1276 Thread* thread = Thread::current(); 1277 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1278 ResourceMark rm(thread); 1279 1280 init(); 1281 set_creation_mileage(mileage_of(method())); 1282 1283 // Go through the bytecodes and allocate and initialize the 1284 // corresponding data cells. 1285 int data_size = 0; 1286 int empty_bc_count = 0; // number of bytecodes lacking data 1287 _data[0] = 0; // apparently not set below. 1288 BytecodeStream stream(methodHandle(thread, method())); 1289 Bytecodes::Code c; 1290 bool needs_speculative_traps = false; 1291 while ((c = stream.next()) >= 0) { 1292 int size_in_bytes = initialize_data(&stream, data_size); 1293 data_size += size_in_bytes; 1294 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1295 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1296 } 1297 _data_size = data_size; 1298 int object_size = in_bytes(data_offset()) + data_size; 1299 1300 // Add some extra DataLayout cells (at least one) to track stray traps. 1301 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1302 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1303 1304 // Let's zero the space for the extra data 1305 if (extra_size > 0) { 1306 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1307 } 1308 1309 // Add a cell to record information about modified arguments. 1310 // Set up _args_modified array after traps cells so that 1311 // the code for traps cells works. 1312 DataLayout *dp = data_layout_at(data_size + extra_size); 1313 1314 int arg_size = method()->size_of_parameters(); 1315 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1316 1317 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1318 object_size += extra_size + arg_data_size; 1319 1320 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1321 // If we are profiling parameters, we reserved an area near the end 1322 // of the MDO after the slots for bytecodes (because there's no bci 1323 // for method entry so they don't fit with the framework for the 1324 // profiling of bytecodes). We store the offset within the MDO of 1325 // this area (or -1 if no parameter is profiled) 1326 int parm_data_size = 0; 1327 if (parms_cell > 0) { 1328 parm_data_size = DataLayout::compute_size_in_bytes(parms_cell); 1329 object_size += parm_data_size; 1330 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1331 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1332 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1333 } else { 1334 _parameters_type_data_di = no_parameters; 1335 } 1336 1337 _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size; 1338 if (ProfileExceptionHandlers && method()->has_exception_handler()) { 1339 int num_exception_handlers = method()->exception_table_length(); 1340 object_size += num_exception_handlers * single_exception_handler_data_size(); 1341 ExceptionTableElement* exception_handlers = method()->exception_table_start(); 1342 for (int i = 0; i < num_exception_handlers; i++) { 1343 DataLayout *dp = exception_handler_data_at(i); 1344 dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count()); 1345 } 1346 } 1347 1348 // Set an initial hint. Don't use set_hint_di() because 1349 // first_di() may be out of bounds if data_size is 0. 1350 // In that situation, _hint_di is never used, but at 1351 // least well-defined. 1352 _hint_di = first_di(); 1353 1354 post_initialize(&stream); 1355 1356 assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size"); 1357 set_size(object_size); 1358 } 1359 1360 void MethodData::init() { 1361 _compiler_counters = CompilerCounters(); // reset compiler counters 1362 _invocation_counter.init(); 1363 _backedge_counter.init(); 1364 _invocation_counter_start = 0; 1365 _backedge_counter_start = 0; 1366 1367 // Set per-method invoke- and backedge mask. 1368 double scale = 1.0; 1369 methodHandle mh(Thread::current(), _method); 1370 CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale); 1371 _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1372 _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1373 1374 _tenure_traps = 0; 1375 _num_loops = 0; 1376 _num_blocks = 0; 1377 _would_profile = unknown; 1378 1379 #if INCLUDE_JVMCI 1380 _jvmci_ir_size = 0; 1381 _failed_speculations = nullptr; 1382 #endif 1383 1384 // Initialize escape flags. 1385 clear_escape_info(); 1386 } 1387 1388 // Get a measure of how much mileage the method has on it. 1389 int MethodData::mileage_of(Method* method) { 1390 return MAX2(method->invocation_count(), method->backedge_count()); 1391 } 1392 1393 bool MethodData::is_mature() const { 1394 return CompilationPolicy::is_mature((MethodData*)this); 1395 } 1396 1397 // Translate a bci to its corresponding data index (di). 1398 address MethodData::bci_to_dp(int bci) { 1399 ResourceMark rm; 1400 DataLayout* data = data_layout_before(bci); 1401 DataLayout* prev = nullptr; 1402 for ( ; is_valid(data); data = next_data_layout(data)) { 1403 if (data->bci() >= bci) { 1404 if (data->bci() == bci) set_hint_di(dp_to_di((address)data)); 1405 else if (prev != nullptr) set_hint_di(dp_to_di((address)prev)); 1406 return (address)data; 1407 } 1408 prev = data; 1409 } 1410 return (address)limit_data_position(); 1411 } 1412 1413 // Translate a bci to its corresponding data, or null. 1414 ProfileData* MethodData::bci_to_data(int bci) { 1415 check_extra_data_locked(); 1416 1417 DataLayout* data = data_layout_before(bci); 1418 for ( ; is_valid(data); data = next_data_layout(data)) { 1419 if (data->bci() == bci) { 1420 set_hint_di(dp_to_di((address)data)); 1421 return data->data_in(); 1422 } else if (data->bci() > bci) { 1423 break; 1424 } 1425 } 1426 return bci_to_extra_data(bci, nullptr, false); 1427 } 1428 1429 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) { 1430 assert(ProfileExceptionHandlers, "not profiling"); 1431 for (int i = 0; i < num_exception_handler_data(); i++) { 1432 DataLayout* exception_handler_data = exception_handler_data_at(i); 1433 if (exception_handler_data->bci() == bci) { 1434 return exception_handler_data; 1435 } 1436 } 1437 return nullptr; 1438 } 1439 1440 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) { 1441 DataLayout* data = exception_handler_bci_to_data_helper(bci); 1442 return data != nullptr ? new BitData(data) : nullptr; 1443 } 1444 1445 BitData MethodData::exception_handler_bci_to_data(int bci) { 1446 DataLayout* data = exception_handler_bci_to_data_helper(bci); 1447 assert(data != nullptr, "invalid bci"); 1448 return BitData(data); 1449 } 1450 1451 DataLayout* MethodData::next_extra(DataLayout* dp) { 1452 int nb_cells = 0; 1453 switch(dp->tag()) { 1454 case DataLayout::bit_data_tag: 1455 case DataLayout::no_tag: 1456 nb_cells = BitData::static_cell_count(); 1457 break; 1458 case DataLayout::speculative_trap_data_tag: 1459 nb_cells = SpeculativeTrapData::static_cell_count(); 1460 break; 1461 default: 1462 fatal("unexpected tag %d", dp->tag()); 1463 } 1464 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1465 } 1466 1467 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) { 1468 check_extra_data_locked(); 1469 1470 DataLayout* end = args_data_limit(); 1471 1472 for (;; dp = next_extra(dp)) { 1473 assert(dp < end, "moved past end of extra data"); 1474 // No need for "Atomic::load_acquire" ops, 1475 // since the data structure is monotonic. 1476 switch(dp->tag()) { 1477 case DataLayout::no_tag: 1478 return nullptr; 1479 case DataLayout::arg_info_data_tag: 1480 dp = end; 1481 return nullptr; // ArgInfoData is at the end of extra data section. 1482 case DataLayout::bit_data_tag: 1483 if (m == nullptr && dp->bci() == bci) { 1484 return new BitData(dp); 1485 } 1486 break; 1487 case DataLayout::speculative_trap_data_tag: 1488 if (m != nullptr) { 1489 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1490 if (dp->bci() == bci) { 1491 assert(data->method() != nullptr, "method must be set"); 1492 if (data->method() == m) { 1493 return data; 1494 } 1495 } 1496 } 1497 break; 1498 default: 1499 fatal("unexpected tag %d", dp->tag()); 1500 } 1501 } 1502 return nullptr; 1503 } 1504 1505 1506 // Translate a bci to its corresponding extra data, or null. 1507 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1508 check_extra_data_locked(); 1509 1510 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1511 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1512 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1513 "code needs to be adjusted"); 1514 1515 // Do not create one of these if method has been redefined. 1516 if (m != nullptr && m->is_old()) { 1517 return nullptr; 1518 } 1519 1520 DataLayout* dp = extra_data_base(); 1521 DataLayout* end = args_data_limit(); 1522 1523 // Find if already exists 1524 ProfileData* result = bci_to_extra_data_find(bci, m, dp); 1525 if (result != nullptr || dp >= end) { 1526 return result; 1527 } 1528 1529 if (create_if_missing) { 1530 // Not found -> Allocate 1531 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free"); 1532 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1533 u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1534 // SpeculativeTrapData is 2 slots. Make sure we have room. 1535 if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) { 1536 return nullptr; 1537 } 1538 DataLayout temp; 1539 temp.initialize(tag, checked_cast<u2>(bci), 0); 1540 1541 dp->set_header(temp.header()); 1542 assert(dp->tag() == tag, "sane"); 1543 assert(dp->bci() == bci, "no concurrent allocation"); 1544 if (tag == DataLayout::bit_data_tag) { 1545 return new BitData(dp); 1546 } else { 1547 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1548 data->set_method(m); 1549 return data; 1550 } 1551 } 1552 return nullptr; 1553 } 1554 1555 ArgInfoData *MethodData::arg_info() { 1556 DataLayout* dp = extra_data_base(); 1557 DataLayout* end = args_data_limit(); 1558 for (; dp < end; dp = next_extra(dp)) { 1559 if (dp->tag() == DataLayout::arg_info_data_tag) 1560 return new ArgInfoData(dp); 1561 } 1562 return nullptr; 1563 } 1564 1565 // Printing 1566 1567 void MethodData::print_on(outputStream* st) const { 1568 assert(is_methodData(), "should be method data"); 1569 st->print("method data for "); 1570 method()->print_value_on(st); 1571 st->cr(); 1572 print_data_on(st); 1573 } 1574 1575 void MethodData::print_value_on(outputStream* st) const { 1576 assert(is_methodData(), "should be method data"); 1577 st->print("method data for "); 1578 method()->print_value_on(st); 1579 } 1580 1581 void MethodData::print_data_on(outputStream* st) const { 1582 ResourceMark rm; 1583 ProfileData* data = first_data(); 1584 if (_parameters_type_data_di != no_parameters) { 1585 parameters_type_data()->print_data_on(st); 1586 } 1587 for ( ; is_valid(data); data = next_data(data)) { 1588 st->print("%d", dp_to_di(data->dp())); 1589 st->fill_to(6); 1590 data->print_data_on(st, this); 1591 } 1592 st->print_cr("--- Extra data:"); 1593 DataLayout* dp = extra_data_base(); 1594 DataLayout* end = args_data_limit(); 1595 for (;; dp = next_extra(dp)) { 1596 assert(dp < end, "moved past end of extra data"); 1597 // No need for "Atomic::load_acquire" ops, 1598 // since the data structure is monotonic. 1599 switch(dp->tag()) { 1600 case DataLayout::no_tag: 1601 continue; 1602 case DataLayout::bit_data_tag: 1603 data = new BitData(dp); 1604 break; 1605 case DataLayout::speculative_trap_data_tag: 1606 data = new SpeculativeTrapData(dp); 1607 break; 1608 case DataLayout::arg_info_data_tag: 1609 data = new ArgInfoData(dp); 1610 dp = end; // ArgInfoData is at the end of extra data section. 1611 break; 1612 default: 1613 fatal("unexpected tag %d", dp->tag()); 1614 } 1615 st->print("%d", dp_to_di(data->dp())); 1616 st->fill_to(6); 1617 data->print_data_on(st); 1618 if (dp >= end) return; 1619 } 1620 } 1621 1622 // Verification 1623 1624 void MethodData::verify_on(outputStream* st) { 1625 guarantee(is_methodData(), "object must be method data"); 1626 // guarantee(m->is_perm(), "should be in permspace"); 1627 this->verify_data_on(st); 1628 } 1629 1630 void MethodData::verify_data_on(outputStream* st) { 1631 NEEDS_CLEANUP; 1632 // not yet implemented. 1633 } 1634 1635 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1636 if (m->is_compiled_lambda_form()) { 1637 return true; 1638 } 1639 1640 Bytecode_invoke inv(m , bci); 1641 return inv.is_invokedynamic() || inv.is_invokehandle(); 1642 } 1643 1644 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1645 Bytecode_invoke inv(m , bci); 1646 if (inv.is_invokevirtual()) { 1647 Symbol* klass = inv.klass(); 1648 if (klass == vmSymbols::jdk_internal_misc_Unsafe() || 1649 klass == vmSymbols::sun_misc_Unsafe() || 1650 klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) { 1651 Symbol* name = inv.name(); 1652 if (name->starts_with("get") || name->starts_with("put")) { 1653 return true; 1654 } 1655 } 1656 } 1657 return false; 1658 } 1659 1660 int MethodData::profile_arguments_flag() { 1661 return TypeProfileLevel % 10; 1662 } 1663 1664 bool MethodData::profile_arguments() { 1665 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0; 1666 } 1667 1668 bool MethodData::profile_arguments_jsr292_only() { 1669 return profile_arguments_flag() == type_profile_jsr292; 1670 } 1671 1672 bool MethodData::profile_all_arguments() { 1673 return profile_arguments_flag() == type_profile_all; 1674 } 1675 1676 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1677 if (!profile_arguments()) { 1678 return false; 1679 } 1680 1681 if (profile_all_arguments()) { 1682 return true; 1683 } 1684 1685 if (profile_unsafe(m, bci)) { 1686 return true; 1687 } 1688 1689 assert(profile_arguments_jsr292_only(), "inconsistent"); 1690 return profile_jsr292(m, bci); 1691 } 1692 1693 int MethodData::profile_return_flag() { 1694 return (TypeProfileLevel % 100) / 10; 1695 } 1696 1697 bool MethodData::profile_return() { 1698 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1699 } 1700 1701 bool MethodData::profile_return_jsr292_only() { 1702 return profile_return_flag() == type_profile_jsr292; 1703 } 1704 1705 bool MethodData::profile_all_return() { 1706 return profile_return_flag() == type_profile_all; 1707 } 1708 1709 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1710 if (!profile_return()) { 1711 return false; 1712 } 1713 1714 if (profile_all_return()) { 1715 return true; 1716 } 1717 1718 assert(profile_return_jsr292_only(), "inconsistent"); 1719 return profile_jsr292(m, bci); 1720 } 1721 1722 int MethodData::profile_parameters_flag() { 1723 return TypeProfileLevel / 100; 1724 } 1725 1726 bool MethodData::profile_parameters() { 1727 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1728 } 1729 1730 bool MethodData::profile_parameters_jsr292_only() { 1731 return profile_parameters_flag() == type_profile_jsr292; 1732 } 1733 1734 bool MethodData::profile_all_parameters() { 1735 return profile_parameters_flag() == type_profile_all; 1736 } 1737 1738 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1739 if (!profile_parameters()) { 1740 return false; 1741 } 1742 1743 if (profile_all_parameters()) { 1744 return true; 1745 } 1746 1747 assert(profile_parameters_jsr292_only(), "inconsistent"); 1748 return m->is_compiled_lambda_form(); 1749 } 1750 1751 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1752 log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string()); 1753 it->push(&_method); 1754 if (_parameters_type_data_di != no_parameters) { 1755 parameters_type_data()->metaspace_pointers_do(it); 1756 } 1757 for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) { 1758 data->metaspace_pointers_do(it); 1759 } 1760 for (DataLayout* dp = extra_data_base(); 1761 dp < extra_data_limit(); 1762 dp = MethodData::next_extra(dp)) { 1763 if (dp->tag() == DataLayout::speculative_trap_data_tag) { 1764 ResourceMark rm; 1765 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1766 data->metaspace_pointers_do(it); 1767 } else if (dp->tag() == DataLayout::no_tag || 1768 dp->tag() == DataLayout::arg_info_data_tag) { 1769 break; 1770 } 1771 } 1772 } 1773 1774 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1775 check_extra_data_locked(); 1776 1777 if (shift == 0) { 1778 return; 1779 } 1780 if (!reset) { 1781 // Move all cells of trap entry at dp left by "shift" cells 1782 intptr_t* start = (intptr_t*)dp; 1783 intptr_t* end = (intptr_t*)next_extra(dp); 1784 for (intptr_t* ptr = start; ptr < end; ptr++) { 1785 *(ptr-shift) = *ptr; 1786 } 1787 } else { 1788 // Reset "shift" cells stopping at dp 1789 intptr_t* start = ((intptr_t*)dp) - shift; 1790 intptr_t* end = (intptr_t*)dp; 1791 for (intptr_t* ptr = start; ptr < end; ptr++) { 1792 *ptr = 0; 1793 } 1794 } 1795 } 1796 1797 // Check for entries that reference an unloaded method 1798 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1799 bool _always_clean; 1800 public: 1801 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {} 1802 bool is_live(Method* m) { 1803 if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) { 1804 return true; // TODO: treat as unloaded instead? 1805 } 1806 return !(_always_clean) && m->method_holder()->is_loader_alive(); 1807 } 1808 }; 1809 1810 // Check for entries that reference a redefined method 1811 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1812 public: 1813 CleanExtraDataMethodClosure() {} 1814 bool is_live(Method* m) { return !m->is_old(); } 1815 }; 1816 1817 Mutex* MethodData::extra_data_lock() { 1818 Mutex* lock = Atomic::load(&_extra_data_lock); 1819 if (lock == nullptr) { 1820 lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock"); 1821 Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock); 1822 if (old != nullptr) { 1823 // Another thread created the lock before us. Use that lock instead. 1824 delete lock; 1825 return old; 1826 } 1827 } 1828 return lock; 1829 } 1830 1831 // Remove SpeculativeTrapData entries that reference an unloaded or 1832 // redefined method 1833 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1834 check_extra_data_locked(); 1835 1836 DataLayout* dp = extra_data_base(); 1837 DataLayout* end = args_data_limit(); 1838 1839 int shift = 0; 1840 for (; dp < end; dp = next_extra(dp)) { 1841 switch(dp->tag()) { 1842 case DataLayout::speculative_trap_data_tag: { 1843 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1844 Method* m = data->method(); 1845 assert(m != nullptr, "should have a method"); 1846 if (!cl->is_live(m)) { 1847 // "shift" accumulates the number of cells for dead 1848 // SpeculativeTrapData entries that have been seen so 1849 // far. Following entries must be shifted left by that many 1850 // cells to remove the dead SpeculativeTrapData entries. 1851 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1852 } else { 1853 // Shift this entry left if it follows dead 1854 // SpeculativeTrapData entries 1855 clean_extra_data_helper(dp, shift); 1856 } 1857 break; 1858 } 1859 case DataLayout::bit_data_tag: 1860 // Shift this entry left if it follows dead SpeculativeTrapData 1861 // entries 1862 clean_extra_data_helper(dp, shift); 1863 continue; 1864 case DataLayout::no_tag: 1865 case DataLayout::arg_info_data_tag: 1866 // We are at end of the live trap entries. The previous "shift" 1867 // cells contain entries that are either dead or were shifted 1868 // left. They need to be reset to no_tag 1869 clean_extra_data_helper(dp, shift, true); 1870 return; 1871 default: 1872 fatal("unexpected tag %d", dp->tag()); 1873 } 1874 } 1875 } 1876 1877 // Verify there's no unloaded or redefined method referenced by a 1878 // SpeculativeTrapData entry 1879 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1880 check_extra_data_locked(); 1881 1882 #ifdef ASSERT 1883 DataLayout* dp = extra_data_base(); 1884 DataLayout* end = args_data_limit(); 1885 1886 for (; dp < end; dp = next_extra(dp)) { 1887 switch(dp->tag()) { 1888 case DataLayout::speculative_trap_data_tag: { 1889 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1890 Method* m = data->method(); 1891 assert(m != nullptr && cl->is_live(m), "Method should exist"); 1892 break; 1893 } 1894 case DataLayout::bit_data_tag: 1895 continue; 1896 case DataLayout::no_tag: 1897 case DataLayout::arg_info_data_tag: 1898 return; 1899 default: 1900 fatal("unexpected tag %d", dp->tag()); 1901 } 1902 } 1903 #endif 1904 } 1905 1906 void MethodData::clean_method_data(bool always_clean) { 1907 ResourceMark rm; 1908 for (ProfileData* data = first_data(); 1909 is_valid(data); 1910 data = next_data(data)) { 1911 data->clean_weak_klass_links(always_clean); 1912 } 1913 ParametersTypeData* parameters = parameters_type_data(); 1914 if (parameters != nullptr) { 1915 parameters->clean_weak_klass_links(always_clean); 1916 } 1917 1918 CleanExtraDataKlassClosure cl(always_clean); 1919 1920 // Lock to modify extra data, and prevent Safepoint from breaking the lock 1921 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag); 1922 1923 clean_extra_data(&cl); 1924 verify_extra_data_clean(&cl); 1925 } 1926 1927 // This is called during redefinition to clean all "old" redefined 1928 // methods out of MethodData for all methods. 1929 void MethodData::clean_weak_method_links() { 1930 ResourceMark rm; 1931 CleanExtraDataMethodClosure cl; 1932 1933 // Lock to modify extra data, and prevent Safepoint from breaking the lock 1934 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag); 1935 1936 clean_extra_data(&cl); 1937 verify_extra_data_clean(&cl); 1938 } 1939 1940 void MethodData::deallocate_contents(ClassLoaderData* loader_data) { 1941 release_C_heap_structures(); 1942 } 1943 1944 void MethodData::release_C_heap_structures() { 1945 #if INCLUDE_JVMCI 1946 FailedSpeculation::free_failed_speculations(get_failed_speculations_address()); 1947 #endif 1948 } 1949 1950 #if INCLUDE_CDS 1951 void MethodData::remove_unshareable_info() { 1952 _extra_data_lock = nullptr; 1953 } 1954 1955 void MethodData::restore_unshareable_info(TRAPS) { 1956 //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock"); 1957 } 1958 #endif // INCLUDE_CDS 1959 1960 #ifdef ASSERT 1961 void MethodData::check_extra_data_locked() const { 1962 // Cast const away, just to be able to verify the lock 1963 // Usually we only want non-const accesses on the lock, 1964 // so this here is an exception. 1965 MethodData* self = (MethodData*)this; 1966 assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock"); 1967 assert(!Thread::current()->is_Java_thread() || 1968 JavaThread::current()->is_in_no_safepoint_scope(), 1969 "JavaThread must have NoSafepointVerifier inside lock scope"); 1970 } 1971 #endif