1 /* 2 * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "compiler/compilationPolicy.hpp" 29 #include "compiler/compilerOracle.hpp" 30 #include "interpreter/bytecode.hpp" 31 #include "interpreter/bytecodeStream.hpp" 32 #include "interpreter/linkResolver.hpp" 33 #include "memory/metaspaceClosure.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "oops/klass.inline.hpp" 36 #include "oops/methodData.inline.hpp" 37 #include "prims/jvmtiRedefineClasses.hpp" 38 #include "runtime/atomic.hpp" 39 #include "runtime/deoptimization.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/orderAccess.hpp" 42 #include "runtime/safepointVerifiers.hpp" 43 #include "runtime/signature.hpp" 44 #include "utilities/align.hpp" 45 #include "utilities/copy.hpp" 46 47 // ================================================================== 48 // DataLayout 49 // 50 // Overlay for generic profiling data. 51 52 // Some types of data layouts need a length field. 53 bool DataLayout::needs_array_len(u1 tag) { 54 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 55 } 56 57 // Perform generic initialization of the data. More specific 58 // initialization occurs in overrides of ProfileData::post_initialize. 59 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 60 _header._bits = (intptr_t)0; 61 _header._struct._tag = tag; 62 _header._struct._bci = bci; 63 for (int i = 0; i < cell_count; i++) { 64 set_cell_at(i, (intptr_t)0); 65 } 66 if (needs_array_len(tag)) { 67 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 68 } 69 if (tag == call_type_data_tag) { 70 CallTypeData::initialize(this, cell_count); 71 } else if (tag == virtual_call_type_data_tag) { 72 VirtualCallTypeData::initialize(this, cell_count); 73 } 74 } 75 76 void DataLayout::clean_weak_klass_links(bool always_clean) { 77 ResourceMark m; 78 data_in()->clean_weak_klass_links(always_clean); 79 } 80 81 82 // ================================================================== 83 // ProfileData 84 // 85 // A ProfileData object is created to refer to a section of profiling 86 // data in a structured way. 87 88 // Constructor for invalid ProfileData. 89 ProfileData::ProfileData() { 90 _data = NULL; 91 } 92 93 char* ProfileData::print_data_on_helper(const MethodData* md) const { 94 DataLayout* dp = md->extra_data_base(); 95 DataLayout* end = md->args_data_limit(); 96 stringStream ss; 97 for (;; dp = MethodData::next_extra(dp)) { 98 assert(dp < end, "moved past end of extra data"); 99 switch(dp->tag()) { 100 case DataLayout::speculative_trap_data_tag: 101 if (dp->bci() == bci()) { 102 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 103 int trap = data->trap_state(); 104 char buf[100]; 105 ss.print("trap/"); 106 data->method()->print_short_name(&ss); 107 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 108 } 109 break; 110 case DataLayout::bit_data_tag: 111 break; 112 case DataLayout::no_tag: 113 case DataLayout::arg_info_data_tag: 114 return ss.as_string(); 115 break; 116 default: 117 fatal("unexpected tag %d", dp->tag()); 118 } 119 } 120 return NULL; 121 } 122 123 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 124 print_data_on(st, print_data_on_helper(md)); 125 } 126 127 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 128 st->print("bci: %d", bci()); 129 st->fill_to(tab_width_one); 130 st->print("%s", name); 131 tab(st); 132 int trap = trap_state(); 133 if (trap != 0) { 134 char buf[100]; 135 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 136 } 137 if (extra != NULL) { 138 st->print("%s", extra); 139 } 140 int flags = data()->flags(); 141 if (flags != 0) { 142 st->print("flags(%d) ", flags); 143 } 144 } 145 146 void ProfileData::tab(outputStream* st, bool first) const { 147 st->fill_to(first ? tab_width_one : tab_width_two); 148 } 149 150 // ================================================================== 151 // BitData 152 // 153 // A BitData corresponds to a one-bit flag. This is used to indicate 154 // whether a checkcast bytecode has seen a null value. 155 156 157 void BitData::print_data_on(outputStream* st, const char* extra) const { 158 print_shared(st, "BitData", extra); 159 st->cr(); 160 } 161 162 // ================================================================== 163 // CounterData 164 // 165 // A CounterData corresponds to a simple counter. 166 167 void CounterData::print_data_on(outputStream* st, const char* extra) const { 168 print_shared(st, "CounterData", extra); 169 st->print_cr("count(%u)", count()); 170 } 171 172 // ================================================================== 173 // JumpData 174 // 175 // A JumpData is used to access profiling information for a direct 176 // branch. It is a counter, used for counting the number of branches, 177 // plus a data displacement, used for realigning the data pointer to 178 // the corresponding target bci. 179 180 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 181 assert(stream->bci() == bci(), "wrong pos"); 182 int target; 183 Bytecodes::Code c = stream->code(); 184 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 185 target = stream->dest_w(); 186 } else { 187 target = stream->dest(); 188 } 189 int my_di = mdo->dp_to_di(dp()); 190 int target_di = mdo->bci_to_di(target); 191 int offset = target_di - my_di; 192 set_displacement(offset); 193 } 194 195 void JumpData::print_data_on(outputStream* st, const char* extra) const { 196 print_shared(st, "JumpData", extra); 197 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 198 } 199 200 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 201 // Parameter profiling include the receiver 202 int args_count = include_receiver ? 1 : 0; 203 ResourceMark rm; 204 ReferenceArgumentCount rac(signature); 205 args_count += rac.count(); 206 args_count = MIN2(args_count, max); 207 return args_count * per_arg_cell_count; 208 } 209 210 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 211 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 212 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 213 const methodHandle m = stream->method(); 214 int bci = stream->bci(); 215 Bytecode_invoke inv(m, bci); 216 int args_cell = 0; 217 if (MethodData::profile_arguments_for_invoke(m, bci)) { 218 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 219 } 220 int ret_cell = 0; 221 if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) { 222 ret_cell = ReturnTypeEntry::static_cell_count(); 223 } 224 int header_cell = 0; 225 if (args_cell + ret_cell > 0) { 226 header_cell = header_cell_count(); 227 } 228 229 return header_cell + args_cell + ret_cell; 230 } 231 232 class ArgumentOffsetComputer : public SignatureIterator { 233 private: 234 int _max; 235 int _offset; 236 GrowableArray<int> _offsets; 237 238 friend class SignatureIterator; // so do_parameters_on can call do_type 239 void do_type(BasicType type) { 240 if (is_reference_type(type) && _offsets.length() < _max) { 241 _offsets.push(_offset); 242 } 243 _offset += parameter_type_word_count(type); 244 } 245 246 public: 247 ArgumentOffsetComputer(Symbol* signature, int max) 248 : SignatureIterator(signature), 249 _max(max), _offset(0), 250 _offsets(max) { 251 do_parameters_on(this); // non-virtual template execution 252 } 253 254 int off_at(int i) const { return _offsets.at(i); } 255 }; 256 257 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 258 ResourceMark rm; 259 int start = 0; 260 // Parameter profiling include the receiver 261 if (include_receiver && has_receiver) { 262 set_stack_slot(0, 0); 263 set_type(0, type_none()); 264 start += 1; 265 } 266 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 267 for (int i = start; i < _number_of_entries; i++) { 268 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 269 set_type(i, type_none()); 270 } 271 } 272 273 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 274 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 275 Bytecode_invoke inv(stream->method(), stream->bci()); 276 277 if (has_arguments()) { 278 #ifdef ASSERT 279 ResourceMark rm; 280 ReferenceArgumentCount rac(inv.signature()); 281 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 282 assert(count > 0, "room for args type but none found?"); 283 check_number_of_arguments(count); 284 #endif 285 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 286 } 287 288 if (has_return()) { 289 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 290 _ret.post_initialize(); 291 } 292 } 293 294 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 295 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 296 Bytecode_invoke inv(stream->method(), stream->bci()); 297 298 if (has_arguments()) { 299 #ifdef ASSERT 300 ResourceMark rm; 301 ReferenceArgumentCount rac(inv.signature()); 302 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 303 assert(count > 0, "room for args type but none found?"); 304 check_number_of_arguments(count); 305 #endif 306 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 307 } 308 309 if (has_return()) { 310 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 311 _ret.post_initialize(); 312 } 313 } 314 315 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { 316 for (int i = 0; i < _number_of_entries; i++) { 317 intptr_t p = type(i); 318 Klass* k = (Klass*)klass_part(p); 319 if (k != NULL && (always_clean || !k->is_loader_alive())) { 320 set_type(i, with_status((Klass*)NULL, p)); 321 } 322 } 323 } 324 325 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) { 326 intptr_t p = type(); 327 Klass* k = (Klass*)klass_part(p); 328 if (k != NULL && (always_clean || !k->is_loader_alive())) { 329 set_type(with_status((Klass*)NULL, p)); 330 } 331 } 332 333 bool TypeEntriesAtCall::return_profiling_enabled() { 334 return MethodData::profile_return(); 335 } 336 337 bool TypeEntriesAtCall::arguments_profiling_enabled() { 338 return MethodData::profile_arguments(); 339 } 340 341 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 342 if (is_type_none(k)) { 343 st->print("none"); 344 } else if (is_type_unknown(k)) { 345 st->print("unknown"); 346 } else { 347 valid_klass(k)->print_value_on(st); 348 } 349 if (was_null_seen(k)) { 350 st->print(" (null seen)"); 351 } 352 } 353 354 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 355 for (int i = 0; i < _number_of_entries; i++) { 356 _pd->tab(st); 357 st->print("%d: stack(%u) ", i, stack_slot(i)); 358 print_klass(st, type(i)); 359 st->cr(); 360 } 361 } 362 363 void ReturnTypeEntry::print_data_on(outputStream* st) const { 364 _pd->tab(st); 365 print_klass(st, type()); 366 st->cr(); 367 } 368 369 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 370 CounterData::print_data_on(st, extra); 371 if (has_arguments()) { 372 tab(st, true); 373 st->print("argument types"); 374 _args.print_data_on(st); 375 } 376 if (has_return()) { 377 tab(st, true); 378 st->print("return type"); 379 _ret.print_data_on(st); 380 } 381 } 382 383 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 384 VirtualCallData::print_data_on(st, extra); 385 if (has_arguments()) { 386 tab(st, true); 387 st->print("argument types"); 388 _args.print_data_on(st); 389 } 390 if (has_return()) { 391 tab(st, true); 392 st->print("return type"); 393 _ret.print_data_on(st); 394 } 395 } 396 397 // ================================================================== 398 // ReceiverTypeData 399 // 400 // A ReceiverTypeData is used to access profiling information about a 401 // dynamic type check. It consists of a counter which counts the total times 402 // that the check is reached, and a series of (Klass*, count) pairs 403 // which are used to store a type profile for the receiver of the check. 404 405 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { 406 for (uint row = 0; row < row_limit(); row++) { 407 Klass* p = receiver(row); 408 if (p != NULL && (always_clean || !p->is_loader_alive())) { 409 clear_row(row); 410 } 411 } 412 } 413 414 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 415 uint row; 416 int entries = 0; 417 for (row = 0; row < row_limit(); row++) { 418 if (receiver(row) != NULL) entries++; 419 } 420 #if INCLUDE_JVMCI 421 st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries); 422 #else 423 st->print_cr("count(%u) entries(%u)", count(), entries); 424 #endif 425 int total = count(); 426 for (row = 0; row < row_limit(); row++) { 427 if (receiver(row) != NULL) { 428 total += receiver_count(row); 429 } 430 } 431 for (row = 0; row < row_limit(); row++) { 432 if (receiver(row) != NULL) { 433 tab(st); 434 receiver(row)->print_value_on(st); 435 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 436 } 437 } 438 } 439 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 440 print_shared(st, "ReceiverTypeData", extra); 441 print_receiver_data_on(st); 442 } 443 444 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 445 print_shared(st, "VirtualCallData", extra); 446 print_receiver_data_on(st); 447 } 448 449 // ================================================================== 450 // RetData 451 // 452 // A RetData is used to access profiling information for a ret bytecode. 453 // It is composed of a count of the number of times that the ret has 454 // been executed, followed by a series of triples of the form 455 // (bci, count, di) which count the number of times that some bci was the 456 // target of the ret and cache a corresponding displacement. 457 458 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 459 for (uint row = 0; row < row_limit(); row++) { 460 set_bci_displacement(row, -1); 461 set_bci(row, no_bci); 462 } 463 // release so other threads see a consistent state. bci is used as 464 // a valid flag for bci_displacement. 465 OrderAccess::release(); 466 } 467 468 // This routine needs to atomically update the RetData structure, so the 469 // caller needs to hold the RetData_lock before it gets here. Since taking 470 // the lock can block (and allow GC) and since RetData is a ProfileData is a 471 // wrapper around a derived oop, taking the lock in _this_ method will 472 // basically cause the 'this' pointer's _data field to contain junk after the 473 // lock. We require the caller to take the lock before making the ProfileData 474 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 475 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 476 // First find the mdp which corresponds to the return bci. 477 address mdp = h_mdo->bci_to_dp(return_bci); 478 479 // Now check to see if any of the cache slots are open. 480 for (uint row = 0; row < row_limit(); row++) { 481 if (bci(row) == no_bci) { 482 set_bci_displacement(row, mdp - dp()); 483 set_bci_count(row, DataLayout::counter_increment); 484 // Barrier to ensure displacement is written before the bci; allows 485 // the interpreter to read displacement without fear of race condition. 486 release_set_bci(row, return_bci); 487 break; 488 } 489 } 490 return mdp; 491 } 492 493 void RetData::print_data_on(outputStream* st, const char* extra) const { 494 print_shared(st, "RetData", extra); 495 uint row; 496 int entries = 0; 497 for (row = 0; row < row_limit(); row++) { 498 if (bci(row) != no_bci) entries++; 499 } 500 st->print_cr("count(%u) entries(%u)", count(), entries); 501 for (row = 0; row < row_limit(); row++) { 502 if (bci(row) != no_bci) { 503 tab(st); 504 st->print_cr("bci(%d: count(%u) displacement(%d))", 505 bci(row), bci_count(row), bci_displacement(row)); 506 } 507 } 508 } 509 510 // ================================================================== 511 // BranchData 512 // 513 // A BranchData is used to access profiling data for a two-way branch. 514 // It consists of taken and not_taken counts as well as a data displacement 515 // for the taken case. 516 517 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 518 assert(stream->bci() == bci(), "wrong pos"); 519 int target = stream->dest(); 520 int my_di = mdo->dp_to_di(dp()); 521 int target_di = mdo->bci_to_di(target); 522 int offset = target_di - my_di; 523 set_displacement(offset); 524 } 525 526 void BranchData::print_data_on(outputStream* st, const char* extra) const { 527 print_shared(st, "BranchData", extra); 528 st->print_cr("taken(%u) displacement(%d)", 529 taken(), displacement()); 530 tab(st); 531 st->print_cr("not taken(%u)", not_taken()); 532 } 533 534 // ================================================================== 535 // MultiBranchData 536 // 537 // A MultiBranchData is used to access profiling information for 538 // a multi-way branch (*switch bytecodes). It consists of a series 539 // of (count, displacement) pairs, which count the number of times each 540 // case was taken and specify the data displacment for each branch target. 541 542 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 543 int cell_count = 0; 544 if (stream->code() == Bytecodes::_tableswitch) { 545 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 546 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 547 } else { 548 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 549 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 550 } 551 return cell_count; 552 } 553 554 void MultiBranchData::post_initialize(BytecodeStream* stream, 555 MethodData* mdo) { 556 assert(stream->bci() == bci(), "wrong pos"); 557 int target; 558 int my_di; 559 int target_di; 560 int offset; 561 if (stream->code() == Bytecodes::_tableswitch) { 562 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 563 int len = sw.length(); 564 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 565 for (int count = 0; count < len; count++) { 566 target = sw.dest_offset_at(count) + bci(); 567 my_di = mdo->dp_to_di(dp()); 568 target_di = mdo->bci_to_di(target); 569 offset = target_di - my_di; 570 set_displacement_at(count, offset); 571 } 572 target = sw.default_offset() + bci(); 573 my_di = mdo->dp_to_di(dp()); 574 target_di = mdo->bci_to_di(target); 575 offset = target_di - my_di; 576 set_default_displacement(offset); 577 578 } else { 579 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 580 int npairs = sw.number_of_pairs(); 581 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 582 for (int count = 0; count < npairs; count++) { 583 LookupswitchPair pair = sw.pair_at(count); 584 target = pair.offset() + bci(); 585 my_di = mdo->dp_to_di(dp()); 586 target_di = mdo->bci_to_di(target); 587 offset = target_di - my_di; 588 set_displacement_at(count, offset); 589 } 590 target = sw.default_offset() + bci(); 591 my_di = mdo->dp_to_di(dp()); 592 target_di = mdo->bci_to_di(target); 593 offset = target_di - my_di; 594 set_default_displacement(offset); 595 } 596 } 597 598 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 599 print_shared(st, "MultiBranchData", extra); 600 st->print_cr("default_count(%u) displacement(%d)", 601 default_count(), default_displacement()); 602 int cases = number_of_cases(); 603 for (int i = 0; i < cases; i++) { 604 tab(st); 605 st->print_cr("count(%u) displacement(%d)", 606 count_at(i), displacement_at(i)); 607 } 608 } 609 610 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 611 print_shared(st, "ArgInfoData", extra); 612 int nargs = number_of_args(); 613 for (int i = 0; i < nargs; i++) { 614 st->print(" 0x%x", arg_modified(i)); 615 } 616 st->cr(); 617 } 618 619 int ParametersTypeData::compute_cell_count(Method* m) { 620 if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) { 621 return 0; 622 } 623 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 624 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 625 if (obj_args > 0) { 626 return obj_args + 1; // 1 cell for array len 627 } 628 return 0; 629 } 630 631 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 632 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 633 } 634 635 bool ParametersTypeData::profiling_enabled() { 636 return MethodData::profile_parameters(); 637 } 638 639 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 640 print_shared(st, "ParametersTypeData", extra); 641 tab(st); 642 _parameters.print_data_on(st); 643 st->cr(); 644 } 645 646 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 647 print_shared(st, "SpeculativeTrapData", extra); 648 tab(st); 649 method()->print_short_name(st); 650 st->cr(); 651 } 652 653 // ================================================================== 654 // MethodData* 655 // 656 // A MethodData* holds information which has been collected about 657 // a method. 658 659 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 660 int size = MethodData::compute_allocation_size_in_words(method); 661 662 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 663 MethodData(method); 664 } 665 666 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 667 if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) { 668 return no_profile_data; 669 } 670 switch (code) { 671 case Bytecodes::_checkcast: 672 case Bytecodes::_instanceof: 673 case Bytecodes::_aastore: 674 if (TypeProfileCasts) { 675 return ReceiverTypeData::static_cell_count(); 676 } else { 677 return BitData::static_cell_count(); 678 } 679 case Bytecodes::_invokespecial: 680 case Bytecodes::_invokestatic: 681 if (MethodData::profile_arguments() || MethodData::profile_return()) { 682 return variable_cell_count; 683 } else { 684 return CounterData::static_cell_count(); 685 } 686 case Bytecodes::_goto: 687 case Bytecodes::_goto_w: 688 case Bytecodes::_jsr: 689 case Bytecodes::_jsr_w: 690 return JumpData::static_cell_count(); 691 case Bytecodes::_invokevirtual: 692 case Bytecodes::_invokeinterface: 693 if (MethodData::profile_arguments() || MethodData::profile_return()) { 694 return variable_cell_count; 695 } else { 696 return VirtualCallData::static_cell_count(); 697 } 698 case Bytecodes::_invokedynamic: 699 if (MethodData::profile_arguments() || MethodData::profile_return()) { 700 return variable_cell_count; 701 } else { 702 return CounterData::static_cell_count(); 703 } 704 case Bytecodes::_ret: 705 return RetData::static_cell_count(); 706 case Bytecodes::_ifeq: 707 case Bytecodes::_ifne: 708 case Bytecodes::_iflt: 709 case Bytecodes::_ifge: 710 case Bytecodes::_ifgt: 711 case Bytecodes::_ifle: 712 case Bytecodes::_if_icmpeq: 713 case Bytecodes::_if_icmpne: 714 case Bytecodes::_if_icmplt: 715 case Bytecodes::_if_icmpge: 716 case Bytecodes::_if_icmpgt: 717 case Bytecodes::_if_icmple: 718 case Bytecodes::_if_acmpeq: 719 case Bytecodes::_if_acmpne: 720 case Bytecodes::_ifnull: 721 case Bytecodes::_ifnonnull: 722 return BranchData::static_cell_count(); 723 case Bytecodes::_lookupswitch: 724 case Bytecodes::_tableswitch: 725 return variable_cell_count; 726 default: 727 return no_profile_data; 728 } 729 } 730 731 // Compute the size of the profiling information corresponding to 732 // the current bytecode. 733 int MethodData::compute_data_size(BytecodeStream* stream) { 734 int cell_count = bytecode_cell_count(stream->code()); 735 if (cell_count == no_profile_data) { 736 return 0; 737 } 738 if (cell_count == variable_cell_count) { 739 switch (stream->code()) { 740 case Bytecodes::_lookupswitch: 741 case Bytecodes::_tableswitch: 742 cell_count = MultiBranchData::compute_cell_count(stream); 743 break; 744 case Bytecodes::_invokespecial: 745 case Bytecodes::_invokestatic: 746 case Bytecodes::_invokedynamic: 747 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 748 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 749 profile_return_for_invoke(stream->method(), stream->bci())) { 750 cell_count = CallTypeData::compute_cell_count(stream); 751 } else { 752 cell_count = CounterData::static_cell_count(); 753 } 754 break; 755 case Bytecodes::_invokevirtual: 756 case Bytecodes::_invokeinterface: { 757 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 758 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 759 profile_return_for_invoke(stream->method(), stream->bci())) { 760 cell_count = VirtualCallTypeData::compute_cell_count(stream); 761 } else { 762 cell_count = VirtualCallData::static_cell_count(); 763 } 764 break; 765 } 766 default: 767 fatal("unexpected bytecode for var length profile data"); 768 } 769 } 770 // Note: cell_count might be zero, meaning that there is just 771 // a DataLayout header, with no extra cells. 772 assert(cell_count >= 0, "sanity"); 773 return DataLayout::compute_size_in_bytes(cell_count); 774 } 775 776 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 777 // Bytecodes for which we may use speculation 778 switch (code) { 779 case Bytecodes::_checkcast: 780 case Bytecodes::_instanceof: 781 case Bytecodes::_aastore: 782 case Bytecodes::_invokevirtual: 783 case Bytecodes::_invokeinterface: 784 case Bytecodes::_if_acmpeq: 785 case Bytecodes::_if_acmpne: 786 case Bytecodes::_ifnull: 787 case Bytecodes::_ifnonnull: 788 case Bytecodes::_invokestatic: 789 #ifdef COMPILER2 790 if (CompilerConfig::is_c2_enabled()) { 791 return UseTypeSpeculation; 792 } 793 #endif 794 default: 795 return false; 796 } 797 return false; 798 } 799 800 #if INCLUDE_JVMCI 801 802 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() { 803 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow); 804 } 805 806 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) { 807 memcpy(data(), speculation, speculation_len); 808 } 809 810 // A heuristic check to detect nmethods that outlive a failed speculations list. 811 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) { 812 jlong head = (jlong)(address) *failed_speculations_address; 813 if ((head & 0x1) == 0x1) { 814 stringStream st; 815 if (nm != NULL) { 816 st.print("%d", nm->compile_id()); 817 Method* method = nm->method(); 818 st.print_raw("{"); 819 if (method != NULL) { 820 method->print_name(&st); 821 } else { 822 const char* jvmci_name = nm->jvmci_name(); 823 if (jvmci_name != NULL) { 824 st.print_raw(jvmci_name); 825 } 826 } 827 st.print_raw("}"); 828 } else { 829 st.print("<unknown>"); 830 } 831 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string()); 832 } 833 } 834 835 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) { 836 assert(failed_speculations_address != NULL, "must be"); 837 size_t fs_size = sizeof(FailedSpeculation) + speculation_len; 838 FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len); 839 if (fs == NULL) { 840 // no memory -> ignore failed speculation 841 return false; 842 } 843 844 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned"); 845 guarantee_failed_speculations_alive(nm, failed_speculations_address); 846 847 FailedSpeculation** cursor = failed_speculations_address; 848 do { 849 if (*cursor == NULL) { 850 FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs); 851 if (old_fs == NULL) { 852 // Successfully appended fs to end of the list 853 return true; 854 } 855 cursor = old_fs->next_adr(); 856 } else { 857 cursor = (*cursor)->next_adr(); 858 } 859 } while (true); 860 } 861 862 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) { 863 assert(failed_speculations_address != NULL, "must be"); 864 FailedSpeculation* fs = *failed_speculations_address; 865 while (fs != NULL) { 866 FailedSpeculation* next = fs->next(); 867 delete fs; 868 fs = next; 869 } 870 871 // Write an unaligned value to failed_speculations_address to denote 872 // that it is no longer a valid pointer. This is allows for the check 873 // in add_failed_speculation against adding to a freed failed 874 // speculations list. 875 long* head = (long*) failed_speculations_address; 876 (*head) = (*head) | 0x1; 877 } 878 #endif // INCLUDE_JVMCI 879 880 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 881 #if INCLUDE_JVMCI 882 if (ProfileTraps) { 883 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 884 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 885 886 // Make sure we have a minimum number of extra data slots to 887 // allocate SpeculativeTrapData entries. We would want to have one 888 // entry per compilation that inlines this method and for which 889 // some type speculation assumption fails. So the room we need for 890 // the SpeculativeTrapData entries doesn't directly depend on the 891 // size of the method. Because it's hard to estimate, we reserve 892 // space for an arbitrary number of entries. 893 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 894 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 895 896 return MAX2(extra_data_count, spec_data_count); 897 } else { 898 return 0; 899 } 900 #else // INCLUDE_JVMCI 901 if (ProfileTraps) { 902 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 903 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 904 // If the method is large, let the extra BCIs grow numerous (to ~1%). 905 int one_percent_of_data 906 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 907 if (extra_data_count < one_percent_of_data) 908 extra_data_count = one_percent_of_data; 909 if (extra_data_count > empty_bc_count) 910 extra_data_count = empty_bc_count; // no need for more 911 912 // Make sure we have a minimum number of extra data slots to 913 // allocate SpeculativeTrapData entries. We would want to have one 914 // entry per compilation that inlines this method and for which 915 // some type speculation assumption fails. So the room we need for 916 // the SpeculativeTrapData entries doesn't directly depend on the 917 // size of the method. Because it's hard to estimate, we reserve 918 // space for an arbitrary number of entries. 919 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 920 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 921 922 return MAX2(extra_data_count, spec_data_count); 923 } else { 924 return 0; 925 } 926 #endif // INCLUDE_JVMCI 927 } 928 929 // Compute the size of the MethodData* necessary to store 930 // profiling information about a given method. Size is in bytes. 931 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 932 int data_size = 0; 933 BytecodeStream stream(method); 934 Bytecodes::Code c; 935 int empty_bc_count = 0; // number of bytecodes lacking data 936 bool needs_speculative_traps = false; 937 while ((c = stream.next()) >= 0) { 938 int size_in_bytes = compute_data_size(&stream); 939 data_size += size_in_bytes; 940 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 941 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 942 } 943 int object_size = in_bytes(data_offset()) + data_size; 944 945 // Add some extra DataLayout cells (at least one) to track stray traps. 946 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 947 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 948 949 // Add a cell to record information about modified arguments. 950 int arg_size = method->size_of_parameters(); 951 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 952 953 // Reserve room for an area of the MDO dedicated to profiling of 954 // parameters 955 int args_cell = ParametersTypeData::compute_cell_count(method()); 956 if (args_cell > 0) { 957 object_size += DataLayout::compute_size_in_bytes(args_cell); 958 } 959 return object_size; 960 } 961 962 // Compute the size of the MethodData* necessary to store 963 // profiling information about a given method. Size is in words 964 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 965 int byte_size = compute_allocation_size_in_bytes(method); 966 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 967 return align_metadata_size(word_size); 968 } 969 970 // Initialize an individual data segment. Returns the size of 971 // the segment in bytes. 972 int MethodData::initialize_data(BytecodeStream* stream, 973 int data_index) { 974 if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) { 975 return 0; 976 } 977 int cell_count = -1; 978 int tag = DataLayout::no_tag; 979 DataLayout* data_layout = data_layout_at(data_index); 980 Bytecodes::Code c = stream->code(); 981 switch (c) { 982 case Bytecodes::_checkcast: 983 case Bytecodes::_instanceof: 984 case Bytecodes::_aastore: 985 if (TypeProfileCasts) { 986 cell_count = ReceiverTypeData::static_cell_count(); 987 tag = DataLayout::receiver_type_data_tag; 988 } else { 989 cell_count = BitData::static_cell_count(); 990 tag = DataLayout::bit_data_tag; 991 } 992 break; 993 case Bytecodes::_invokespecial: 994 case Bytecodes::_invokestatic: { 995 int counter_data_cell_count = CounterData::static_cell_count(); 996 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 997 profile_return_for_invoke(stream->method(), stream->bci())) { 998 cell_count = CallTypeData::compute_cell_count(stream); 999 } else { 1000 cell_count = counter_data_cell_count; 1001 } 1002 if (cell_count > counter_data_cell_count) { 1003 tag = DataLayout::call_type_data_tag; 1004 } else { 1005 tag = DataLayout::counter_data_tag; 1006 } 1007 break; 1008 } 1009 case Bytecodes::_goto: 1010 case Bytecodes::_goto_w: 1011 case Bytecodes::_jsr: 1012 case Bytecodes::_jsr_w: 1013 cell_count = JumpData::static_cell_count(); 1014 tag = DataLayout::jump_data_tag; 1015 break; 1016 case Bytecodes::_invokevirtual: 1017 case Bytecodes::_invokeinterface: { 1018 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 1019 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1020 profile_return_for_invoke(stream->method(), stream->bci())) { 1021 cell_count = VirtualCallTypeData::compute_cell_count(stream); 1022 } else { 1023 cell_count = virtual_call_data_cell_count; 1024 } 1025 if (cell_count > virtual_call_data_cell_count) { 1026 tag = DataLayout::virtual_call_type_data_tag; 1027 } else { 1028 tag = DataLayout::virtual_call_data_tag; 1029 } 1030 break; 1031 } 1032 case Bytecodes::_invokedynamic: { 1033 // %%% should make a type profile for any invokedynamic that takes a ref argument 1034 int counter_data_cell_count = CounterData::static_cell_count(); 1035 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1036 profile_return_for_invoke(stream->method(), stream->bci())) { 1037 cell_count = CallTypeData::compute_cell_count(stream); 1038 } else { 1039 cell_count = counter_data_cell_count; 1040 } 1041 if (cell_count > counter_data_cell_count) { 1042 tag = DataLayout::call_type_data_tag; 1043 } else { 1044 tag = DataLayout::counter_data_tag; 1045 } 1046 break; 1047 } 1048 case Bytecodes::_ret: 1049 cell_count = RetData::static_cell_count(); 1050 tag = DataLayout::ret_data_tag; 1051 break; 1052 case Bytecodes::_ifeq: 1053 case Bytecodes::_ifne: 1054 case Bytecodes::_iflt: 1055 case Bytecodes::_ifge: 1056 case Bytecodes::_ifgt: 1057 case Bytecodes::_ifle: 1058 case Bytecodes::_if_icmpeq: 1059 case Bytecodes::_if_icmpne: 1060 case Bytecodes::_if_icmplt: 1061 case Bytecodes::_if_icmpge: 1062 case Bytecodes::_if_icmpgt: 1063 case Bytecodes::_if_icmple: 1064 case Bytecodes::_if_acmpeq: 1065 case Bytecodes::_if_acmpne: 1066 case Bytecodes::_ifnull: 1067 case Bytecodes::_ifnonnull: 1068 cell_count = BranchData::static_cell_count(); 1069 tag = DataLayout::branch_data_tag; 1070 break; 1071 case Bytecodes::_lookupswitch: 1072 case Bytecodes::_tableswitch: 1073 cell_count = MultiBranchData::compute_cell_count(stream); 1074 tag = DataLayout::multi_branch_data_tag; 1075 break; 1076 default: 1077 break; 1078 } 1079 assert(tag == DataLayout::multi_branch_data_tag || 1080 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1081 (tag == DataLayout::call_type_data_tag || 1082 tag == DataLayout::counter_data_tag || 1083 tag == DataLayout::virtual_call_type_data_tag || 1084 tag == DataLayout::virtual_call_data_tag)) || 1085 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1086 if (cell_count >= 0) { 1087 assert(tag != DataLayout::no_tag, "bad tag"); 1088 assert(bytecode_has_profile(c), "agree w/ BHP"); 1089 data_layout->initialize(tag, stream->bci(), cell_count); 1090 return DataLayout::compute_size_in_bytes(cell_count); 1091 } else { 1092 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1093 return 0; 1094 } 1095 } 1096 1097 // Get the data at an arbitrary (sort of) data index. 1098 ProfileData* MethodData::data_at(int data_index) const { 1099 if (out_of_bounds(data_index)) { 1100 return NULL; 1101 } 1102 DataLayout* data_layout = data_layout_at(data_index); 1103 return data_layout->data_in(); 1104 } 1105 1106 int DataLayout::cell_count() { 1107 switch (tag()) { 1108 case DataLayout::no_tag: 1109 default: 1110 ShouldNotReachHere(); 1111 return 0; 1112 case DataLayout::bit_data_tag: 1113 return BitData::static_cell_count(); 1114 case DataLayout::counter_data_tag: 1115 return CounterData::static_cell_count(); 1116 case DataLayout::jump_data_tag: 1117 return JumpData::static_cell_count(); 1118 case DataLayout::receiver_type_data_tag: 1119 return ReceiverTypeData::static_cell_count(); 1120 case DataLayout::virtual_call_data_tag: 1121 return VirtualCallData::static_cell_count(); 1122 case DataLayout::ret_data_tag: 1123 return RetData::static_cell_count(); 1124 case DataLayout::branch_data_tag: 1125 return BranchData::static_cell_count(); 1126 case DataLayout::multi_branch_data_tag: 1127 return ((new MultiBranchData(this))->cell_count()); 1128 case DataLayout::arg_info_data_tag: 1129 return ((new ArgInfoData(this))->cell_count()); 1130 case DataLayout::call_type_data_tag: 1131 return ((new CallTypeData(this))->cell_count()); 1132 case DataLayout::virtual_call_type_data_tag: 1133 return ((new VirtualCallTypeData(this))->cell_count()); 1134 case DataLayout::parameters_type_data_tag: 1135 return ((new ParametersTypeData(this))->cell_count()); 1136 case DataLayout::speculative_trap_data_tag: 1137 return SpeculativeTrapData::static_cell_count(); 1138 } 1139 } 1140 ProfileData* DataLayout::data_in() { 1141 switch (tag()) { 1142 case DataLayout::no_tag: 1143 default: 1144 ShouldNotReachHere(); 1145 return NULL; 1146 case DataLayout::bit_data_tag: 1147 return new BitData(this); 1148 case DataLayout::counter_data_tag: 1149 return new CounterData(this); 1150 case DataLayout::jump_data_tag: 1151 return new JumpData(this); 1152 case DataLayout::receiver_type_data_tag: 1153 return new ReceiverTypeData(this); 1154 case DataLayout::virtual_call_data_tag: 1155 return new VirtualCallData(this); 1156 case DataLayout::ret_data_tag: 1157 return new RetData(this); 1158 case DataLayout::branch_data_tag: 1159 return new BranchData(this); 1160 case DataLayout::multi_branch_data_tag: 1161 return new MultiBranchData(this); 1162 case DataLayout::arg_info_data_tag: 1163 return new ArgInfoData(this); 1164 case DataLayout::call_type_data_tag: 1165 return new CallTypeData(this); 1166 case DataLayout::virtual_call_type_data_tag: 1167 return new VirtualCallTypeData(this); 1168 case DataLayout::parameters_type_data_tag: 1169 return new ParametersTypeData(this); 1170 case DataLayout::speculative_trap_data_tag: 1171 return new SpeculativeTrapData(this); 1172 } 1173 } 1174 1175 // Iteration over data. 1176 ProfileData* MethodData::next_data(ProfileData* current) const { 1177 int current_index = dp_to_di(current->dp()); 1178 int next_index = current_index + current->size_in_bytes(); 1179 ProfileData* next = data_at(next_index); 1180 return next; 1181 } 1182 1183 DataLayout* MethodData::next_data_layout(DataLayout* current) const { 1184 int current_index = dp_to_di((address)current); 1185 int next_index = current_index + current->size_in_bytes(); 1186 if (out_of_bounds(next_index)) { 1187 return NULL; 1188 } 1189 DataLayout* next = data_layout_at(next_index); 1190 return next; 1191 } 1192 1193 // Give each of the data entries a chance to perform specific 1194 // data initialization. 1195 void MethodData::post_initialize(BytecodeStream* stream) { 1196 ResourceMark rm; 1197 ProfileData* data; 1198 for (data = first_data(); is_valid(data); data = next_data(data)) { 1199 stream->set_start(data->bci()); 1200 stream->next(); 1201 data->post_initialize(stream, this); 1202 } 1203 if (_parameters_type_data_di != no_parameters) { 1204 parameters_type_data()->post_initialize(NULL, this); 1205 } 1206 } 1207 1208 // Initialize the MethodData* corresponding to a given method. 1209 MethodData::MethodData(const methodHandle& method) 1210 : _method(method()), 1211 // Holds Compile_lock 1212 _extra_data_lock(Mutex::safepoint-2, "MDOExtraData_lock"), 1213 _compiler_counters(), 1214 _parameters_type_data_di(parameters_uninitialized) { 1215 initialize(); 1216 } 1217 1218 void MethodData::initialize() { 1219 Thread* thread = Thread::current(); 1220 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1221 ResourceMark rm(thread); 1222 1223 init(); 1224 set_creation_mileage(mileage_of(method())); 1225 1226 // Go through the bytecodes and allocate and initialize the 1227 // corresponding data cells. 1228 int data_size = 0; 1229 int empty_bc_count = 0; // number of bytecodes lacking data 1230 _data[0] = 0; // apparently not set below. 1231 BytecodeStream stream(methodHandle(thread, method())); 1232 Bytecodes::Code c; 1233 bool needs_speculative_traps = false; 1234 while ((c = stream.next()) >= 0) { 1235 int size_in_bytes = initialize_data(&stream, data_size); 1236 data_size += size_in_bytes; 1237 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1238 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1239 } 1240 _data_size = data_size; 1241 int object_size = in_bytes(data_offset()) + data_size; 1242 1243 // Add some extra DataLayout cells (at least one) to track stray traps. 1244 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1245 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1246 1247 // Let's zero the space for the extra data 1248 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1249 1250 // Add a cell to record information about modified arguments. 1251 // Set up _args_modified array after traps cells so that 1252 // the code for traps cells works. 1253 DataLayout *dp = data_layout_at(data_size + extra_size); 1254 1255 int arg_size = method()->size_of_parameters(); 1256 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1257 1258 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1259 object_size += extra_size + arg_data_size; 1260 1261 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1262 // If we are profiling parameters, we reserved an area near the end 1263 // of the MDO after the slots for bytecodes (because there's no bci 1264 // for method entry so they don't fit with the framework for the 1265 // profiling of bytecodes). We store the offset within the MDO of 1266 // this area (or -1 if no parameter is profiled) 1267 if (parms_cell > 0) { 1268 object_size += DataLayout::compute_size_in_bytes(parms_cell); 1269 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1270 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1271 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1272 } else { 1273 _parameters_type_data_di = no_parameters; 1274 } 1275 1276 // Set an initial hint. Don't use set_hint_di() because 1277 // first_di() may be out of bounds if data_size is 0. 1278 // In that situation, _hint_di is never used, but at 1279 // least well-defined. 1280 _hint_di = first_di(); 1281 1282 post_initialize(&stream); 1283 1284 assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size"); 1285 set_size(object_size); 1286 } 1287 1288 void MethodData::init() { 1289 _compiler_counters = CompilerCounters(); // reset compiler counters 1290 _invocation_counter.init(); 1291 _backedge_counter.init(); 1292 _invocation_counter_start = 0; 1293 _backedge_counter_start = 0; 1294 1295 // Set per-method invoke- and backedge mask. 1296 double scale = 1.0; 1297 methodHandle mh(Thread::current(), _method); 1298 CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale); 1299 _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1300 _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1301 1302 _tenure_traps = 0; 1303 _num_loops = 0; 1304 _num_blocks = 0; 1305 _would_profile = unknown; 1306 1307 #if INCLUDE_JVMCI 1308 _jvmci_ir_size = 0; 1309 _failed_speculations = NULL; 1310 #endif 1311 1312 #if INCLUDE_RTM_OPT 1313 _rtm_state = NoRTM; // No RTM lock eliding by default 1314 if (UseRTMLocking && 1315 !CompilerOracle::has_option(mh, CompileCommand::NoRTMLockEliding)) { 1316 if (CompilerOracle::has_option(mh, CompileCommand::UseRTMLockEliding) || !UseRTMDeopt) { 1317 // Generate RTM lock eliding code without abort ratio calculation code. 1318 _rtm_state = UseRTM; 1319 } else if (UseRTMDeopt) { 1320 // Generate RTM lock eliding code and include abort ratio calculation 1321 // code if UseRTMDeopt is on. 1322 _rtm_state = ProfileRTM; 1323 } 1324 } 1325 #endif 1326 1327 // Initialize escape flags. 1328 clear_escape_info(); 1329 } 1330 1331 // Get a measure of how much mileage the method has on it. 1332 int MethodData::mileage_of(Method* method) { 1333 return MAX2(method->invocation_count(), method->backedge_count()); 1334 } 1335 1336 bool MethodData::is_mature() const { 1337 return CompilationPolicy::is_mature(_method); 1338 } 1339 1340 // Translate a bci to its corresponding data index (di). 1341 address MethodData::bci_to_dp(int bci) { 1342 ResourceMark rm; 1343 DataLayout* data = data_layout_before(bci); 1344 DataLayout* prev = NULL; 1345 for ( ; is_valid(data); data = next_data_layout(data)) { 1346 if (data->bci() >= bci) { 1347 if (data->bci() == bci) set_hint_di(dp_to_di((address)data)); 1348 else if (prev != NULL) set_hint_di(dp_to_di((address)prev)); 1349 return (address)data; 1350 } 1351 prev = data; 1352 } 1353 return (address)limit_data_position(); 1354 } 1355 1356 // Translate a bci to its corresponding data, or NULL. 1357 ProfileData* MethodData::bci_to_data(int bci) { 1358 DataLayout* data = data_layout_before(bci); 1359 for ( ; is_valid(data); data = next_data_layout(data)) { 1360 if (data->bci() == bci) { 1361 set_hint_di(dp_to_di((address)data)); 1362 return data->data_in(); 1363 } else if (data->bci() > bci) { 1364 break; 1365 } 1366 } 1367 return bci_to_extra_data(bci, NULL, false); 1368 } 1369 1370 DataLayout* MethodData::next_extra(DataLayout* dp) { 1371 int nb_cells = 0; 1372 switch(dp->tag()) { 1373 case DataLayout::bit_data_tag: 1374 case DataLayout::no_tag: 1375 nb_cells = BitData::static_cell_count(); 1376 break; 1377 case DataLayout::speculative_trap_data_tag: 1378 nb_cells = SpeculativeTrapData::static_cell_count(); 1379 break; 1380 default: 1381 fatal("unexpected tag %d", dp->tag()); 1382 } 1383 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1384 } 1385 1386 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) { 1387 DataLayout* end = args_data_limit(); 1388 1389 for (;; dp = next_extra(dp)) { 1390 assert(dp < end, "moved past end of extra data"); 1391 // No need for "Atomic::load_acquire" ops, 1392 // since the data structure is monotonic. 1393 switch(dp->tag()) { 1394 case DataLayout::no_tag: 1395 return NULL; 1396 case DataLayout::arg_info_data_tag: 1397 dp = end; 1398 return NULL; // ArgInfoData is at the end of extra data section. 1399 case DataLayout::bit_data_tag: 1400 if (m == NULL && dp->bci() == bci) { 1401 return new BitData(dp); 1402 } 1403 break; 1404 case DataLayout::speculative_trap_data_tag: 1405 if (m != NULL) { 1406 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1407 // data->method() may be null in case of a concurrent 1408 // allocation. Maybe it's for the same method. Try to use that 1409 // entry in that case. 1410 if (dp->bci() == bci) { 1411 if (data->method() == NULL) { 1412 assert(concurrent, "impossible because no concurrent allocation"); 1413 return NULL; 1414 } else if (data->method() == m) { 1415 return data; 1416 } 1417 } 1418 } 1419 break; 1420 default: 1421 fatal("unexpected tag %d", dp->tag()); 1422 } 1423 } 1424 return NULL; 1425 } 1426 1427 1428 // Translate a bci to its corresponding extra data, or NULL. 1429 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1430 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1431 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1432 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1433 "code needs to be adjusted"); 1434 1435 // Do not create one of these if method has been redefined. 1436 if (m != NULL && m->is_old()) { 1437 return NULL; 1438 } 1439 1440 DataLayout* dp = extra_data_base(); 1441 DataLayout* end = args_data_limit(); 1442 1443 // Allocation in the extra data space has to be atomic because not 1444 // all entries have the same size and non atomic concurrent 1445 // allocation would result in a corrupted extra data space. 1446 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true); 1447 if (result != NULL) { 1448 return result; 1449 } 1450 1451 if (create_if_missing && dp < end) { 1452 MutexLocker ml(&_extra_data_lock); 1453 // Check again now that we have the lock. Another thread may 1454 // have added extra data entries. 1455 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false); 1456 if (result != NULL || dp >= end) { 1457 return result; 1458 } 1459 1460 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free"); 1461 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1462 u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1463 // SpeculativeTrapData is 2 slots. Make sure we have room. 1464 if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) { 1465 return NULL; 1466 } 1467 DataLayout temp; 1468 temp.initialize(tag, bci, 0); 1469 1470 dp->set_header(temp.header()); 1471 assert(dp->tag() == tag, "sane"); 1472 assert(dp->bci() == bci, "no concurrent allocation"); 1473 if (tag == DataLayout::bit_data_tag) { 1474 return new BitData(dp); 1475 } else { 1476 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1477 data->set_method(m); 1478 return data; 1479 } 1480 } 1481 return NULL; 1482 } 1483 1484 ArgInfoData *MethodData::arg_info() { 1485 DataLayout* dp = extra_data_base(); 1486 DataLayout* end = args_data_limit(); 1487 for (; dp < end; dp = next_extra(dp)) { 1488 if (dp->tag() == DataLayout::arg_info_data_tag) 1489 return new ArgInfoData(dp); 1490 } 1491 return NULL; 1492 } 1493 1494 // Printing 1495 1496 void MethodData::print_on(outputStream* st) const { 1497 assert(is_methodData(), "should be method data"); 1498 st->print("method data for "); 1499 method()->print_value_on(st); 1500 st->cr(); 1501 print_data_on(st); 1502 } 1503 1504 void MethodData::print_value_on(outputStream* st) const { 1505 assert(is_methodData(), "should be method data"); 1506 st->print("method data for "); 1507 method()->print_value_on(st); 1508 } 1509 1510 void MethodData::print_data_on(outputStream* st) const { 1511 ResourceMark rm; 1512 ProfileData* data = first_data(); 1513 if (_parameters_type_data_di != no_parameters) { 1514 parameters_type_data()->print_data_on(st); 1515 } 1516 for ( ; is_valid(data); data = next_data(data)) { 1517 st->print("%d", dp_to_di(data->dp())); 1518 st->fill_to(6); 1519 data->print_data_on(st, this); 1520 } 1521 st->print_cr("--- Extra data:"); 1522 DataLayout* dp = extra_data_base(); 1523 DataLayout* end = args_data_limit(); 1524 for (;; dp = next_extra(dp)) { 1525 assert(dp < end, "moved past end of extra data"); 1526 // No need for "Atomic::load_acquire" ops, 1527 // since the data structure is monotonic. 1528 switch(dp->tag()) { 1529 case DataLayout::no_tag: 1530 continue; 1531 case DataLayout::bit_data_tag: 1532 data = new BitData(dp); 1533 break; 1534 case DataLayout::speculative_trap_data_tag: 1535 data = new SpeculativeTrapData(dp); 1536 break; 1537 case DataLayout::arg_info_data_tag: 1538 data = new ArgInfoData(dp); 1539 dp = end; // ArgInfoData is at the end of extra data section. 1540 break; 1541 default: 1542 fatal("unexpected tag %d", dp->tag()); 1543 } 1544 st->print("%d", dp_to_di(data->dp())); 1545 st->fill_to(6); 1546 data->print_data_on(st); 1547 if (dp >= end) return; 1548 } 1549 } 1550 1551 // Verification 1552 1553 void MethodData::verify_on(outputStream* st) { 1554 guarantee(is_methodData(), "object must be method data"); 1555 // guarantee(m->is_perm(), "should be in permspace"); 1556 this->verify_data_on(st); 1557 } 1558 1559 void MethodData::verify_data_on(outputStream* st) { 1560 NEEDS_CLEANUP; 1561 // not yet implemented. 1562 } 1563 1564 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1565 if (m->is_compiled_lambda_form()) { 1566 return true; 1567 } 1568 1569 Bytecode_invoke inv(m , bci); 1570 return inv.is_invokedynamic() || inv.is_invokehandle(); 1571 } 1572 1573 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1574 Bytecode_invoke inv(m , bci); 1575 if (inv.is_invokevirtual()) { 1576 Symbol* klass = inv.klass(); 1577 if (klass == vmSymbols::jdk_internal_misc_Unsafe() || 1578 klass == vmSymbols::sun_misc_Unsafe() || 1579 klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) { 1580 Symbol* name = inv.name(); 1581 if (name->starts_with("get") || name->starts_with("put")) { 1582 return true; 1583 } 1584 } 1585 } 1586 return false; 1587 } 1588 1589 int MethodData::profile_arguments_flag() { 1590 return TypeProfileLevel % 10; 1591 } 1592 1593 bool MethodData::profile_arguments() { 1594 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all; 1595 } 1596 1597 bool MethodData::profile_arguments_jsr292_only() { 1598 return profile_arguments_flag() == type_profile_jsr292; 1599 } 1600 1601 bool MethodData::profile_all_arguments() { 1602 return profile_arguments_flag() == type_profile_all; 1603 } 1604 1605 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1606 if (!profile_arguments()) { 1607 return false; 1608 } 1609 1610 if (profile_all_arguments()) { 1611 return true; 1612 } 1613 1614 if (profile_unsafe(m, bci)) { 1615 return true; 1616 } 1617 1618 assert(profile_arguments_jsr292_only(), "inconsistent"); 1619 return profile_jsr292(m, bci); 1620 } 1621 1622 int MethodData::profile_return_flag() { 1623 return (TypeProfileLevel % 100) / 10; 1624 } 1625 1626 bool MethodData::profile_return() { 1627 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1628 } 1629 1630 bool MethodData::profile_return_jsr292_only() { 1631 return profile_return_flag() == type_profile_jsr292; 1632 } 1633 1634 bool MethodData::profile_all_return() { 1635 return profile_return_flag() == type_profile_all; 1636 } 1637 1638 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1639 if (!profile_return()) { 1640 return false; 1641 } 1642 1643 if (profile_all_return()) { 1644 return true; 1645 } 1646 1647 assert(profile_return_jsr292_only(), "inconsistent"); 1648 return profile_jsr292(m, bci); 1649 } 1650 1651 int MethodData::profile_parameters_flag() { 1652 return TypeProfileLevel / 100; 1653 } 1654 1655 bool MethodData::profile_parameters() { 1656 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1657 } 1658 1659 bool MethodData::profile_parameters_jsr292_only() { 1660 return profile_parameters_flag() == type_profile_jsr292; 1661 } 1662 1663 bool MethodData::profile_all_parameters() { 1664 return profile_parameters_flag() == type_profile_all; 1665 } 1666 1667 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1668 if (!profile_parameters()) { 1669 return false; 1670 } 1671 1672 if (profile_all_parameters()) { 1673 return true; 1674 } 1675 1676 assert(profile_parameters_jsr292_only(), "inconsistent"); 1677 return m->is_compiled_lambda_form(); 1678 } 1679 1680 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1681 log_trace(cds)("Iter(MethodData): %p", this); 1682 it->push(&_method); 1683 } 1684 1685 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1686 if (shift == 0) { 1687 return; 1688 } 1689 if (!reset) { 1690 // Move all cells of trap entry at dp left by "shift" cells 1691 intptr_t* start = (intptr_t*)dp; 1692 intptr_t* end = (intptr_t*)next_extra(dp); 1693 for (intptr_t* ptr = start; ptr < end; ptr++) { 1694 *(ptr-shift) = *ptr; 1695 } 1696 } else { 1697 // Reset "shift" cells stopping at dp 1698 intptr_t* start = ((intptr_t*)dp) - shift; 1699 intptr_t* end = (intptr_t*)dp; 1700 for (intptr_t* ptr = start; ptr < end; ptr++) { 1701 *ptr = 0; 1702 } 1703 } 1704 } 1705 1706 // Check for entries that reference an unloaded method 1707 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1708 bool _always_clean; 1709 public: 1710 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {} 1711 bool is_live(Method* m) { 1712 return !(_always_clean) && m->method_holder()->is_loader_alive(); 1713 } 1714 }; 1715 1716 // Check for entries that reference a redefined method 1717 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1718 public: 1719 CleanExtraDataMethodClosure() {} 1720 bool is_live(Method* m) { return !m->is_old(); } 1721 }; 1722 1723 1724 // Remove SpeculativeTrapData entries that reference an unloaded or 1725 // redefined method 1726 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1727 DataLayout* dp = extra_data_base(); 1728 DataLayout* end = args_data_limit(); 1729 1730 int shift = 0; 1731 for (; dp < end; dp = next_extra(dp)) { 1732 switch(dp->tag()) { 1733 case DataLayout::speculative_trap_data_tag: { 1734 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1735 Method* m = data->method(); 1736 assert(m != NULL, "should have a method"); 1737 if (!cl->is_live(m)) { 1738 // "shift" accumulates the number of cells for dead 1739 // SpeculativeTrapData entries that have been seen so 1740 // far. Following entries must be shifted left by that many 1741 // cells to remove the dead SpeculativeTrapData entries. 1742 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1743 } else { 1744 // Shift this entry left if it follows dead 1745 // SpeculativeTrapData entries 1746 clean_extra_data_helper(dp, shift); 1747 } 1748 break; 1749 } 1750 case DataLayout::bit_data_tag: 1751 // Shift this entry left if it follows dead SpeculativeTrapData 1752 // entries 1753 clean_extra_data_helper(dp, shift); 1754 continue; 1755 case DataLayout::no_tag: 1756 case DataLayout::arg_info_data_tag: 1757 // We are at end of the live trap entries. The previous "shift" 1758 // cells contain entries that are either dead or were shifted 1759 // left. They need to be reset to no_tag 1760 clean_extra_data_helper(dp, shift, true); 1761 return; 1762 default: 1763 fatal("unexpected tag %d", dp->tag()); 1764 } 1765 } 1766 } 1767 1768 // Verify there's no unloaded or redefined method referenced by a 1769 // SpeculativeTrapData entry 1770 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1771 #ifdef ASSERT 1772 DataLayout* dp = extra_data_base(); 1773 DataLayout* end = args_data_limit(); 1774 1775 for (; dp < end; dp = next_extra(dp)) { 1776 switch(dp->tag()) { 1777 case DataLayout::speculative_trap_data_tag: { 1778 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1779 Method* m = data->method(); 1780 assert(m != NULL && cl->is_live(m), "Method should exist"); 1781 break; 1782 } 1783 case DataLayout::bit_data_tag: 1784 continue; 1785 case DataLayout::no_tag: 1786 case DataLayout::arg_info_data_tag: 1787 return; 1788 default: 1789 fatal("unexpected tag %d", dp->tag()); 1790 } 1791 } 1792 #endif 1793 } 1794 1795 void MethodData::clean_method_data(bool always_clean) { 1796 ResourceMark rm; 1797 for (ProfileData* data = first_data(); 1798 is_valid(data); 1799 data = next_data(data)) { 1800 data->clean_weak_klass_links(always_clean); 1801 } 1802 ParametersTypeData* parameters = parameters_type_data(); 1803 if (parameters != NULL) { 1804 parameters->clean_weak_klass_links(always_clean); 1805 } 1806 1807 CleanExtraDataKlassClosure cl(always_clean); 1808 clean_extra_data(&cl); 1809 verify_extra_data_clean(&cl); 1810 } 1811 1812 // This is called during redefinition to clean all "old" redefined 1813 // methods out of MethodData for all methods. 1814 void MethodData::clean_weak_method_links() { 1815 ResourceMark rm; 1816 CleanExtraDataMethodClosure cl; 1817 clean_extra_data(&cl); 1818 verify_extra_data_clean(&cl); 1819 }