1 /* 2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "compiler/compilationPolicy.hpp" 29 #include "compiler/compilerDefinitions.inline.hpp" 30 #include "compiler/compilerOracle.hpp" 31 #include "interpreter/bytecode.hpp" 32 #include "interpreter/bytecodeStream.hpp" 33 #include "interpreter/linkResolver.hpp" 34 #include "memory/metaspaceClosure.hpp" 35 #include "memory/resourceArea.hpp" 36 #include "oops/klass.inline.hpp" 37 #include "oops/methodData.inline.hpp" 38 #include "prims/jvmtiRedefineClasses.hpp" 39 #include "runtime/atomic.hpp" 40 #include "runtime/deoptimization.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/orderAccess.hpp" 43 #include "runtime/safepointVerifiers.hpp" 44 #include "runtime/signature.hpp" 45 #include "utilities/align.hpp" 46 #include "utilities/checkedCast.hpp" 47 #include "utilities/copy.hpp" 48 49 // ================================================================== 50 // DataLayout 51 // 52 // Overlay for generic profiling data. 53 54 // Some types of data layouts need a length field. 55 bool DataLayout::needs_array_len(u1 tag) { 56 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag); 57 } 58 59 // Perform generic initialization of the data. More specific 60 // initialization occurs in overrides of ProfileData::post_initialize. 61 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) { 62 _header._bits = (intptr_t)0; 63 _header._struct._tag = tag; 64 _header._struct._bci = bci; 65 for (int i = 0; i < cell_count; i++) { 66 set_cell_at(i, (intptr_t)0); 67 } 68 if (needs_array_len(tag)) { 69 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header. 70 } 71 if (tag == call_type_data_tag) { 72 CallTypeData::initialize(this, cell_count); 73 } else if (tag == virtual_call_type_data_tag) { 74 VirtualCallTypeData::initialize(this, cell_count); 75 } 76 } 77 78 void DataLayout::clean_weak_klass_links(bool always_clean) { 79 ResourceMark m; 80 data_in()->clean_weak_klass_links(always_clean); 81 } 82 83 84 // ================================================================== 85 // ProfileData 86 // 87 // A ProfileData object is created to refer to a section of profiling 88 // data in a structured way. 89 90 // Constructor for invalid ProfileData. 91 ProfileData::ProfileData() { 92 _data = nullptr; 93 } 94 95 char* ProfileData::print_data_on_helper(const MethodData* md) const { 96 DataLayout* dp = md->extra_data_base(); 97 DataLayout* end = md->args_data_limit(); 98 stringStream ss; 99 for (;; dp = MethodData::next_extra(dp)) { 100 assert(dp < end, "moved past end of extra data"); 101 switch(dp->tag()) { 102 case DataLayout::speculative_trap_data_tag: 103 if (dp->bci() == bci()) { 104 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 105 int trap = data->trap_state(); 106 char buf[100]; 107 ss.print("trap/"); 108 data->method()->print_short_name(&ss); 109 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 110 } 111 break; 112 case DataLayout::bit_data_tag: 113 break; 114 case DataLayout::no_tag: 115 case DataLayout::arg_info_data_tag: 116 return ss.as_string(); 117 break; 118 default: 119 fatal("unexpected tag %d", dp->tag()); 120 } 121 } 122 return nullptr; 123 } 124 125 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const { 126 print_data_on(st, print_data_on_helper(md)); 127 } 128 129 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const { 130 st->print("bci: %d ", bci()); 131 st->fill_to(tab_width_one + 1); 132 st->print("%s", name); 133 tab(st); 134 int trap = trap_state(); 135 if (trap != 0) { 136 char buf[100]; 137 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap)); 138 } 139 if (extra != nullptr) { 140 st->print("%s", extra); 141 } 142 int flags = data()->flags(); 143 if (flags != 0) { 144 st->print("flags(%d) %p/%d", flags, data(), in_bytes(DataLayout::flags_offset())); 145 } 146 } 147 148 void ProfileData::tab(outputStream* st, bool first) const { 149 st->fill_to(first ? tab_width_one : tab_width_two); 150 } 151 152 // ================================================================== 153 // BitData 154 // 155 // A BitData corresponds to a one-bit flag. This is used to indicate 156 // whether a checkcast bytecode has seen a null value. 157 158 159 void BitData::print_data_on(outputStream* st, const char* extra) const { 160 print_shared(st, "BitData", extra); 161 st->cr(); 162 } 163 164 // ================================================================== 165 // CounterData 166 // 167 // A CounterData corresponds to a simple counter. 168 169 void CounterData::print_data_on(outputStream* st, const char* extra) const { 170 print_shared(st, "CounterData", extra); 171 st->print_cr("count(%u)", count()); 172 } 173 174 // ================================================================== 175 // JumpData 176 // 177 // A JumpData is used to access profiling information for a direct 178 // branch. It is a counter, used for counting the number of branches, 179 // plus a data displacement, used for realigning the data pointer to 180 // the corresponding target bci. 181 182 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 183 assert(stream->bci() == bci(), "wrong pos"); 184 int target; 185 Bytecodes::Code c = stream->code(); 186 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) { 187 target = stream->dest_w(); 188 } else { 189 target = stream->dest(); 190 } 191 int my_di = mdo->dp_to_di(dp()); 192 int target_di = mdo->bci_to_di(target); 193 int offset = target_di - my_di; 194 set_displacement(offset); 195 } 196 197 void JumpData::print_data_on(outputStream* st, const char* extra) const { 198 print_shared(st, "JumpData", extra); 199 st->print_cr("taken(%u) displacement(%d)", taken(), displacement()); 200 } 201 202 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) { 203 // Parameter profiling include the receiver 204 int args_count = include_receiver ? 1 : 0; 205 ResourceMark rm; 206 ReferenceArgumentCount rac(signature); 207 args_count += rac.count(); 208 args_count = MIN2(args_count, max); 209 return args_count * per_arg_cell_count; 210 } 211 212 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) { 213 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 214 assert(TypeStackSlotEntries::per_arg_count() > SingleTypeEntry::static_cell_count(), "code to test for arguments/results broken"); 215 const methodHandle m = stream->method(); 216 int bci = stream->bci(); 217 Bytecode_invoke inv(m, bci); 218 int args_cell = 0; 219 if (MethodData::profile_arguments_for_invoke(m, bci)) { 220 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit); 221 } 222 int ret_cell = 0; 223 if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) { 224 ret_cell = SingleTypeEntry::static_cell_count(); 225 } 226 int header_cell = 0; 227 if (args_cell + ret_cell > 0) { 228 header_cell = header_cell_count(); 229 } 230 231 return header_cell + args_cell + ret_cell; 232 } 233 234 class ArgumentOffsetComputer : public SignatureIterator { 235 private: 236 int _max; 237 int _offset; 238 GrowableArray<int> _offsets; 239 240 friend class SignatureIterator; // so do_parameters_on can call do_type 241 void do_type(BasicType type) { 242 if (is_reference_type(type) && _offsets.length() < _max) { 243 _offsets.push(_offset); 244 } 245 _offset += parameter_type_word_count(type); 246 } 247 248 public: 249 ArgumentOffsetComputer(Symbol* signature, int max) 250 : SignatureIterator(signature), 251 _max(max), _offset(0), 252 _offsets(max) { 253 do_parameters_on(this); // non-virtual template execution 254 } 255 256 int off_at(int i) const { return _offsets.at(i); } 257 }; 258 259 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) { 260 ResourceMark rm; 261 int start = 0; 262 // Parameter profiling include the receiver 263 if (include_receiver && has_receiver) { 264 set_stack_slot(0, 0); 265 set_type(0, type_none()); 266 start += 1; 267 } 268 ArgumentOffsetComputer aos(signature, _number_of_entries-start); 269 for (int i = start; i < _number_of_entries; i++) { 270 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0)); 271 set_type(i, type_none()); 272 } 273 } 274 275 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 276 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 277 Bytecode_invoke inv(stream->method(), stream->bci()); 278 279 if (has_arguments()) { 280 #ifdef ASSERT 281 ResourceMark rm; 282 ReferenceArgumentCount rac(inv.signature()); 283 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 284 assert(count > 0, "room for args type but none found?"); 285 check_number_of_arguments(count); 286 #endif 287 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 288 } 289 290 if (has_return()) { 291 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 292 _ret.post_initialize(); 293 } 294 } 295 296 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 297 assert(Bytecodes::is_invoke(stream->code()), "should be invoke"); 298 Bytecode_invoke inv(stream->method(), stream->bci()); 299 300 if (has_arguments()) { 301 #ifdef ASSERT 302 ResourceMark rm; 303 ReferenceArgumentCount rac(inv.signature()); 304 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit); 305 assert(count > 0, "room for args type but none found?"); 306 check_number_of_arguments(count); 307 #endif 308 _args.post_initialize(inv.signature(), inv.has_receiver(), false); 309 } 310 311 if (has_return()) { 312 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?"); 313 _ret.post_initialize(); 314 } 315 } 316 317 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) { 318 for (int i = 0; i < _number_of_entries; i++) { 319 intptr_t p = type(i); 320 Klass* k = (Klass*)klass_part(p); 321 if (k != nullptr && (always_clean || !k->is_loader_alive())) { 322 set_type(i, with_status((Klass*)nullptr, p)); 323 } 324 } 325 } 326 327 void SingleTypeEntry::clean_weak_klass_links(bool always_clean) { 328 intptr_t p = type(); 329 Klass* k = (Klass*)klass_part(p); 330 if (k != nullptr && (always_clean || !k->is_loader_alive())) { 331 set_type(with_status((Klass*)nullptr, p)); 332 } 333 } 334 335 bool TypeEntriesAtCall::return_profiling_enabled() { 336 return MethodData::profile_return(); 337 } 338 339 bool TypeEntriesAtCall::arguments_profiling_enabled() { 340 return MethodData::profile_arguments(); 341 } 342 343 void TypeEntries::print_klass(outputStream* st, intptr_t k) { 344 if (is_type_none(k)) { 345 st->print("none"); 346 } else if (is_type_unknown(k)) { 347 st->print("unknown"); 348 } else { 349 valid_klass(k)->print_value_on(st); 350 } 351 if (was_null_seen(k)) { 352 st->print(" (null seen)"); 353 } 354 } 355 356 void TypeStackSlotEntries::print_data_on(outputStream* st) const { 357 for (int i = 0; i < _number_of_entries; i++) { 358 _pd->tab(st); 359 st->print("%d: stack(%u) ", i, stack_slot(i)); 360 print_klass(st, type(i)); 361 st->cr(); 362 } 363 } 364 365 void SingleTypeEntry::print_data_on(outputStream* st) const { 366 _pd->tab(st); 367 print_klass(st, type()); 368 st->cr(); 369 } 370 371 void CallTypeData::print_data_on(outputStream* st, const char* extra) const { 372 CounterData::print_data_on(st, extra); 373 if (has_arguments()) { 374 tab(st, true); 375 st->print("argument types"); 376 _args.print_data_on(st); 377 } 378 if (has_return()) { 379 tab(st, true); 380 st->print("return type"); 381 _ret.print_data_on(st); 382 } 383 } 384 385 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const { 386 VirtualCallData::print_data_on(st, extra); 387 if (has_arguments()) { 388 tab(st, true); 389 st->print("argument types"); 390 _args.print_data_on(st); 391 } 392 if (has_return()) { 393 tab(st, true); 394 st->print("return type"); 395 _ret.print_data_on(st); 396 } 397 } 398 399 // ================================================================== 400 // ReceiverTypeData 401 // 402 // A ReceiverTypeData is used to access profiling information about a 403 // dynamic type check. It consists of a counter which counts the total times 404 // that the check is reached, and a series of (Klass*, count) pairs 405 // which are used to store a type profile for the receiver of the check. 406 407 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) { 408 for (uint row = 0; row < row_limit(); row++) { 409 Klass* p = receiver(row); 410 if (p != nullptr && (always_clean || !p->is_loader_alive())) { 411 clear_row(row); 412 } 413 } 414 } 415 416 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const { 417 uint row; 418 int entries = 0; 419 for (row = 0; row < row_limit(); row++) { 420 if (receiver(row) != nullptr) entries++; 421 } 422 st->print_cr("count(%u) entries(%u)", count(), entries); 423 int total = count(); 424 for (row = 0; row < row_limit(); row++) { 425 if (receiver(row) != nullptr) { 426 total += receiver_count(row); 427 } 428 } 429 for (row = 0; row < row_limit(); row++) { 430 if (receiver(row) != nullptr) { 431 tab(st); 432 receiver(row)->print_value_on(st); 433 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total); 434 } 435 } 436 } 437 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const { 438 print_shared(st, "ReceiverTypeData", extra); 439 print_receiver_data_on(st); 440 } 441 442 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const { 443 print_shared(st, "VirtualCallData", extra); 444 print_receiver_data_on(st); 445 } 446 447 // ================================================================== 448 // RetData 449 // 450 // A RetData is used to access profiling information for a ret bytecode. 451 // It is composed of a count of the number of times that the ret has 452 // been executed, followed by a series of triples of the form 453 // (bci, count, di) which count the number of times that some bci was the 454 // target of the ret and cache a corresponding displacement. 455 456 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 457 for (uint row = 0; row < row_limit(); row++) { 458 set_bci_displacement(row, -1); 459 set_bci(row, no_bci); 460 } 461 // release so other threads see a consistent state. bci is used as 462 // a valid flag for bci_displacement. 463 OrderAccess::release(); 464 } 465 466 // This routine needs to atomically update the RetData structure, so the 467 // caller needs to hold the RetData_lock before it gets here. Since taking 468 // the lock can block (and allow GC) and since RetData is a ProfileData is a 469 // wrapper around a derived oop, taking the lock in _this_ method will 470 // basically cause the 'this' pointer's _data field to contain junk after the 471 // lock. We require the caller to take the lock before making the ProfileData 472 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret 473 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) { 474 // First find the mdp which corresponds to the return bci. 475 address mdp = h_mdo->bci_to_dp(return_bci); 476 477 // Now check to see if any of the cache slots are open. 478 for (uint row = 0; row < row_limit(); row++) { 479 if (bci(row) == no_bci) { 480 set_bci_displacement(row, checked_cast<int>(mdp - dp())); 481 set_bci_count(row, DataLayout::counter_increment); 482 // Barrier to ensure displacement is written before the bci; allows 483 // the interpreter to read displacement without fear of race condition. 484 release_set_bci(row, return_bci); 485 break; 486 } 487 } 488 return mdp; 489 } 490 491 void RetData::print_data_on(outputStream* st, const char* extra) const { 492 print_shared(st, "RetData", extra); 493 uint row; 494 int entries = 0; 495 for (row = 0; row < row_limit(); row++) { 496 if (bci(row) != no_bci) entries++; 497 } 498 st->print_cr("count(%u) entries(%u)", count(), entries); 499 for (row = 0; row < row_limit(); row++) { 500 if (bci(row) != no_bci) { 501 tab(st); 502 st->print_cr("bci(%d: count(%u) displacement(%d))", 503 bci(row), bci_count(row), bci_displacement(row)); 504 } 505 } 506 } 507 508 // ================================================================== 509 // BranchData 510 // 511 // A BranchData is used to access profiling data for a two-way branch. 512 // It consists of taken and not_taken counts as well as a data displacement 513 // for the taken case. 514 515 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 516 assert(stream->bci() == bci(), "wrong pos"); 517 int target = stream->dest(); 518 int my_di = mdo->dp_to_di(dp()); 519 int target_di = mdo->bci_to_di(target); 520 int offset = target_di - my_di; 521 set_displacement(offset); 522 } 523 524 void BranchData::print_data_on(outputStream* st, const char* extra) const { 525 print_shared(st, "BranchData", extra); 526 if (data()->flags()) { 527 tty->cr(); 528 tab(st); 529 } 530 st->print_cr("taken(%u) displacement(%d)", 531 taken(), displacement()); 532 tab(st); 533 st->print_cr("not taken(%u)", not_taken()); 534 } 535 536 // ================================================================== 537 // MultiBranchData 538 // 539 // A MultiBranchData is used to access profiling information for 540 // a multi-way branch (*switch bytecodes). It consists of a series 541 // of (count, displacement) pairs, which count the number of times each 542 // case was taken and specify the data displacement for each branch target. 543 544 int MultiBranchData::compute_cell_count(BytecodeStream* stream) { 545 int cell_count = 0; 546 if (stream->code() == Bytecodes::_tableswitch) { 547 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 548 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default 549 } else { 550 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 551 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default 552 } 553 return cell_count; 554 } 555 556 void MultiBranchData::post_initialize(BytecodeStream* stream, 557 MethodData* mdo) { 558 assert(stream->bci() == bci(), "wrong pos"); 559 int target; 560 int my_di; 561 int target_di; 562 int offset; 563 if (stream->code() == Bytecodes::_tableswitch) { 564 Bytecode_tableswitch sw(stream->method()(), stream->bcp()); 565 int len = sw.length(); 566 assert(array_len() == per_case_cell_count * (len + 1), "wrong len"); 567 for (int count = 0; count < len; count++) { 568 target = sw.dest_offset_at(count) + bci(); 569 my_di = mdo->dp_to_di(dp()); 570 target_di = mdo->bci_to_di(target); 571 offset = target_di - my_di; 572 set_displacement_at(count, offset); 573 } 574 target = sw.default_offset() + bci(); 575 my_di = mdo->dp_to_di(dp()); 576 target_di = mdo->bci_to_di(target); 577 offset = target_di - my_di; 578 set_default_displacement(offset); 579 580 } else { 581 Bytecode_lookupswitch sw(stream->method()(), stream->bcp()); 582 int npairs = sw.number_of_pairs(); 583 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len"); 584 for (int count = 0; count < npairs; count++) { 585 LookupswitchPair pair = sw.pair_at(count); 586 target = pair.offset() + bci(); 587 my_di = mdo->dp_to_di(dp()); 588 target_di = mdo->bci_to_di(target); 589 offset = target_di - my_di; 590 set_displacement_at(count, offset); 591 } 592 target = sw.default_offset() + bci(); 593 my_di = mdo->dp_to_di(dp()); 594 target_di = mdo->bci_to_di(target); 595 offset = target_di - my_di; 596 set_default_displacement(offset); 597 } 598 } 599 600 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const { 601 print_shared(st, "MultiBranchData", extra); 602 st->print_cr("default_count(%u) displacement(%d)", 603 default_count(), default_displacement()); 604 int cases = number_of_cases(); 605 for (int i = 0; i < cases; i++) { 606 tab(st); 607 st->print_cr("count(%u) displacement(%d)", 608 count_at(i), displacement_at(i)); 609 } 610 } 611 612 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const { 613 print_shared(st, "ArgInfoData", extra); 614 int nargs = number_of_args(); 615 for (int i = 0; i < nargs; i++) { 616 st->print(" 0x%x", arg_modified(i)); 617 } 618 st->cr(); 619 } 620 621 int ParametersTypeData::compute_cell_count(Method* m) { 622 if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) { 623 return 0; 624 } 625 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit; 626 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max); 627 if (obj_args > 0) { 628 return obj_args + 1; // 1 cell for array len 629 } 630 return 0; 631 } 632 633 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) { 634 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true); 635 } 636 637 bool ParametersTypeData::profiling_enabled() { 638 return MethodData::profile_parameters(); 639 } 640 641 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const { 642 print_shared(st, "ParametersTypeData", extra); 643 tab(st); 644 _parameters.print_data_on(st); 645 st->cr(); 646 } 647 648 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const { 649 print_shared(st, "SpeculativeTrapData", extra); 650 tab(st); 651 method()->print_short_name(st); 652 st->cr(); 653 } 654 655 void ArrayLoadStoreData::print_data_on(outputStream* st, const char* extra) const { 656 print_shared(st, "ArrayLoadStore", extra); 657 st->cr(); 658 tab(st, true); 659 st->print("array"); 660 _array.print_data_on(st); 661 tab(st, true); 662 st->print("element"); 663 _element.print_data_on(st); 664 } 665 666 void ACmpData::print_data_on(outputStream* st, const char* extra) const { 667 BranchData::print_data_on(st, extra); 668 tab(st, true); 669 st->print("left"); 670 _left.print_data_on(st); 671 tab(st, true); 672 st->print("right"); 673 _right.print_data_on(st); 674 } 675 676 // ================================================================== 677 // MethodData* 678 // 679 // A MethodData* holds information which has been collected about 680 // a method. 681 682 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) { 683 assert(!THREAD->owns_locks(), "Should not own any locks"); 684 int size = MethodData::compute_allocation_size_in_words(method); 685 686 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD) 687 MethodData(method); 688 } 689 690 int MethodData::bytecode_cell_count(Bytecodes::Code code) { 691 if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) { 692 return no_profile_data; 693 } 694 switch (code) { 695 case Bytecodes::_checkcast: 696 case Bytecodes::_instanceof: 697 if (TypeProfileCasts) { 698 return ReceiverTypeData::static_cell_count(); 699 } else { 700 return BitData::static_cell_count(); 701 } 702 case Bytecodes::_aaload: 703 case Bytecodes::_aastore: 704 return ArrayLoadStoreData::static_cell_count(); 705 case Bytecodes::_invokespecial: 706 case Bytecodes::_invokestatic: 707 if (MethodData::profile_arguments() || MethodData::profile_return()) { 708 return variable_cell_count; 709 } else { 710 return CounterData::static_cell_count(); 711 } 712 case Bytecodes::_goto: 713 case Bytecodes::_goto_w: 714 case Bytecodes::_jsr: 715 case Bytecodes::_jsr_w: 716 return JumpData::static_cell_count(); 717 case Bytecodes::_invokevirtual: 718 case Bytecodes::_invokeinterface: 719 if (MethodData::profile_arguments() || MethodData::profile_return()) { 720 return variable_cell_count; 721 } else { 722 return VirtualCallData::static_cell_count(); 723 } 724 case Bytecodes::_invokedynamic: 725 if (MethodData::profile_arguments() || MethodData::profile_return()) { 726 return variable_cell_count; 727 } else { 728 return CounterData::static_cell_count(); 729 } 730 case Bytecodes::_ret: 731 return RetData::static_cell_count(); 732 case Bytecodes::_ifeq: 733 case Bytecodes::_ifne: 734 case Bytecodes::_iflt: 735 case Bytecodes::_ifge: 736 case Bytecodes::_ifgt: 737 case Bytecodes::_ifle: 738 case Bytecodes::_if_icmpeq: 739 case Bytecodes::_if_icmpne: 740 case Bytecodes::_if_icmplt: 741 case Bytecodes::_if_icmpge: 742 case Bytecodes::_if_icmpgt: 743 case Bytecodes::_if_icmple: 744 case Bytecodes::_ifnull: 745 case Bytecodes::_ifnonnull: 746 return BranchData::static_cell_count(); 747 case Bytecodes::_if_acmpne: 748 case Bytecodes::_if_acmpeq: 749 return ACmpData::static_cell_count(); 750 case Bytecodes::_lookupswitch: 751 case Bytecodes::_tableswitch: 752 return variable_cell_count; 753 default: 754 return no_profile_data; 755 } 756 } 757 758 // Compute the size of the profiling information corresponding to 759 // the current bytecode. 760 int MethodData::compute_data_size(BytecodeStream* stream) { 761 int cell_count = bytecode_cell_count(stream->code()); 762 if (cell_count == no_profile_data) { 763 return 0; 764 } 765 if (cell_count == variable_cell_count) { 766 switch (stream->code()) { 767 case Bytecodes::_lookupswitch: 768 case Bytecodes::_tableswitch: 769 cell_count = MultiBranchData::compute_cell_count(stream); 770 break; 771 case Bytecodes::_invokespecial: 772 case Bytecodes::_invokestatic: 773 case Bytecodes::_invokedynamic: 774 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 775 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 776 profile_return_for_invoke(stream->method(), stream->bci())) { 777 cell_count = CallTypeData::compute_cell_count(stream); 778 } else { 779 cell_count = CounterData::static_cell_count(); 780 } 781 break; 782 case Bytecodes::_invokevirtual: 783 case Bytecodes::_invokeinterface: { 784 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile"); 785 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 786 profile_return_for_invoke(stream->method(), stream->bci())) { 787 cell_count = VirtualCallTypeData::compute_cell_count(stream); 788 } else { 789 cell_count = VirtualCallData::static_cell_count(); 790 } 791 break; 792 } 793 default: 794 fatal("unexpected bytecode for var length profile data"); 795 } 796 } 797 // Note: cell_count might be zero, meaning that there is just 798 // a DataLayout header, with no extra cells. 799 assert(cell_count >= 0, "sanity"); 800 return DataLayout::compute_size_in_bytes(cell_count); 801 } 802 803 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) { 804 // Bytecodes for which we may use speculation 805 switch (code) { 806 case Bytecodes::_checkcast: 807 case Bytecodes::_instanceof: 808 case Bytecodes::_aaload: 809 case Bytecodes::_aastore: 810 case Bytecodes::_invokevirtual: 811 case Bytecodes::_invokeinterface: 812 case Bytecodes::_if_acmpeq: 813 case Bytecodes::_if_acmpne: 814 case Bytecodes::_ifnull: 815 case Bytecodes::_ifnonnull: 816 case Bytecodes::_invokestatic: 817 #ifdef COMPILER2 818 if (CompilerConfig::is_c2_enabled()) { 819 return UseTypeSpeculation; 820 } 821 #endif 822 default: 823 return false; 824 } 825 return false; 826 } 827 828 #if INCLUDE_JVMCI 829 830 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() { 831 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow); 832 } 833 834 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) { 835 memcpy(data(), speculation, speculation_len); 836 } 837 838 // A heuristic check to detect nmethods that outlive a failed speculations list. 839 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) { 840 jlong head = (jlong)(address) *failed_speculations_address; 841 if ((head & 0x1) == 0x1) { 842 stringStream st; 843 if (nm != nullptr) { 844 st.print("%d", nm->compile_id()); 845 Method* method = nm->method(); 846 st.print_raw("{"); 847 if (method != nullptr) { 848 method->print_name(&st); 849 } else { 850 const char* jvmci_name = nm->jvmci_name(); 851 if (jvmci_name != nullptr) { 852 st.print_raw(jvmci_name); 853 } 854 } 855 st.print_raw("}"); 856 } else { 857 st.print("<unknown>"); 858 } 859 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string()); 860 } 861 } 862 863 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) { 864 assert(failed_speculations_address != nullptr, "must be"); 865 size_t fs_size = sizeof(FailedSpeculation) + speculation_len; 866 867 guarantee_failed_speculations_alive(nm, failed_speculations_address); 868 869 FailedSpeculation** cursor = failed_speculations_address; 870 FailedSpeculation* fs = nullptr; 871 do { 872 if (*cursor == nullptr) { 873 if (fs == nullptr) { 874 // lazily allocate FailedSpeculation 875 fs = new (fs_size) FailedSpeculation(speculation, speculation_len); 876 if (fs == nullptr) { 877 // no memory -> ignore failed speculation 878 return false; 879 } 880 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned"); 881 } 882 FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs); 883 if (old_fs == nullptr) { 884 // Successfully appended fs to end of the list 885 return true; 886 } 887 } 888 guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation"); 889 // check if the current entry matches this thread's failed speculation 890 if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) { 891 if (fs != nullptr) { 892 delete fs; 893 } 894 return false; 895 } 896 cursor = (*cursor)->next_adr(); 897 } while (true); 898 } 899 900 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) { 901 assert(failed_speculations_address != nullptr, "must be"); 902 FailedSpeculation* fs = *failed_speculations_address; 903 while (fs != nullptr) { 904 FailedSpeculation* next = fs->next(); 905 delete fs; 906 fs = next; 907 } 908 909 // Write an unaligned value to failed_speculations_address to denote 910 // that it is no longer a valid pointer. This is allows for the check 911 // in add_failed_speculation against adding to a freed failed 912 // speculations list. 913 long* head = (long*) failed_speculations_address; 914 (*head) = (*head) | 0x1; 915 } 916 #endif // INCLUDE_JVMCI 917 918 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) { 919 #if INCLUDE_JVMCI 920 if (ProfileTraps) { 921 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one. 922 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100)); 923 924 // Make sure we have a minimum number of extra data slots to 925 // allocate SpeculativeTrapData entries. We would want to have one 926 // entry per compilation that inlines this method and for which 927 // some type speculation assumption fails. So the room we need for 928 // the SpeculativeTrapData entries doesn't directly depend on the 929 // size of the method. Because it's hard to estimate, we reserve 930 // space for an arbitrary number of entries. 931 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 932 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 933 934 return MAX2(extra_data_count, spec_data_count); 935 } else { 936 return 0; 937 } 938 #else // INCLUDE_JVMCI 939 if (ProfileTraps) { 940 // Assume that up to 3% of BCIs with no MDP will need to allocate one. 941 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1; 942 // If the method is large, let the extra BCIs grow numerous (to ~1%). 943 int one_percent_of_data 944 = (uint)data_size / (DataLayout::header_size_in_bytes()*128); 945 if (extra_data_count < one_percent_of_data) 946 extra_data_count = one_percent_of_data; 947 if (extra_data_count > empty_bc_count) 948 extra_data_count = empty_bc_count; // no need for more 949 950 // Make sure we have a minimum number of extra data slots to 951 // allocate SpeculativeTrapData entries. We would want to have one 952 // entry per compilation that inlines this method and for which 953 // some type speculation assumption fails. So the room we need for 954 // the SpeculativeTrapData entries doesn't directly depend on the 955 // size of the method. Because it's hard to estimate, we reserve 956 // space for an arbitrary number of entries. 957 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) * 958 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells()); 959 960 return MAX2(extra_data_count, spec_data_count); 961 } else { 962 return 0; 963 } 964 #endif // INCLUDE_JVMCI 965 } 966 967 // Compute the size of the MethodData* necessary to store 968 // profiling information about a given method. Size is in bytes. 969 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) { 970 int data_size = 0; 971 BytecodeStream stream(method); 972 Bytecodes::Code c; 973 int empty_bc_count = 0; // number of bytecodes lacking data 974 bool needs_speculative_traps = false; 975 while ((c = stream.next()) >= 0) { 976 int size_in_bytes = compute_data_size(&stream); 977 data_size += size_in_bytes; 978 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 979 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 980 } 981 int object_size = in_bytes(data_offset()) + data_size; 982 983 // Add some extra DataLayout cells (at least one) to track stray traps. 984 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 985 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0); 986 987 // Add a cell to record information about modified arguments. 988 int arg_size = method->size_of_parameters(); 989 object_size += DataLayout::compute_size_in_bytes(arg_size+1); 990 991 // Reserve room for an area of the MDO dedicated to profiling of 992 // parameters 993 int args_cell = ParametersTypeData::compute_cell_count(method()); 994 if (args_cell > 0) { 995 object_size += DataLayout::compute_size_in_bytes(args_cell); 996 } 997 return object_size; 998 } 999 1000 // Compute the size of the MethodData* necessary to store 1001 // profiling information about a given method. Size is in words 1002 int MethodData::compute_allocation_size_in_words(const methodHandle& method) { 1003 int byte_size = compute_allocation_size_in_bytes(method); 1004 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord; 1005 return align_metadata_size(word_size); 1006 } 1007 1008 // Initialize an individual data segment. Returns the size of 1009 // the segment in bytes. 1010 int MethodData::initialize_data(BytecodeStream* stream, 1011 int data_index) { 1012 if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) { 1013 return 0; 1014 } 1015 int cell_count = -1; 1016 u1 tag = DataLayout::no_tag; 1017 DataLayout* data_layout = data_layout_at(data_index); 1018 Bytecodes::Code c = stream->code(); 1019 switch (c) { 1020 case Bytecodes::_checkcast: 1021 case Bytecodes::_instanceof: 1022 if (TypeProfileCasts) { 1023 cell_count = ReceiverTypeData::static_cell_count(); 1024 tag = DataLayout::receiver_type_data_tag; 1025 } else { 1026 cell_count = BitData::static_cell_count(); 1027 tag = DataLayout::bit_data_tag; 1028 } 1029 break; 1030 case Bytecodes::_aaload: 1031 case Bytecodes::_aastore: 1032 cell_count = ArrayLoadStoreData::static_cell_count(); 1033 tag = DataLayout::array_load_store_data_tag; 1034 break; 1035 case Bytecodes::_invokespecial: 1036 case Bytecodes::_invokestatic: { 1037 int counter_data_cell_count = CounterData::static_cell_count(); 1038 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1039 profile_return_for_invoke(stream->method(), stream->bci())) { 1040 cell_count = CallTypeData::compute_cell_count(stream); 1041 } else { 1042 cell_count = counter_data_cell_count; 1043 } 1044 if (cell_count > counter_data_cell_count) { 1045 tag = DataLayout::call_type_data_tag; 1046 } else { 1047 tag = DataLayout::counter_data_tag; 1048 } 1049 break; 1050 } 1051 case Bytecodes::_goto: 1052 case Bytecodes::_goto_w: 1053 case Bytecodes::_jsr: 1054 case Bytecodes::_jsr_w: 1055 cell_count = JumpData::static_cell_count(); 1056 tag = DataLayout::jump_data_tag; 1057 break; 1058 case Bytecodes::_invokevirtual: 1059 case Bytecodes::_invokeinterface: { 1060 int virtual_call_data_cell_count = VirtualCallData::static_cell_count(); 1061 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1062 profile_return_for_invoke(stream->method(), stream->bci())) { 1063 cell_count = VirtualCallTypeData::compute_cell_count(stream); 1064 } else { 1065 cell_count = virtual_call_data_cell_count; 1066 } 1067 if (cell_count > virtual_call_data_cell_count) { 1068 tag = DataLayout::virtual_call_type_data_tag; 1069 } else { 1070 tag = DataLayout::virtual_call_data_tag; 1071 } 1072 break; 1073 } 1074 case Bytecodes::_invokedynamic: { 1075 // %%% should make a type profile for any invokedynamic that takes a ref argument 1076 int counter_data_cell_count = CounterData::static_cell_count(); 1077 if (profile_arguments_for_invoke(stream->method(), stream->bci()) || 1078 profile_return_for_invoke(stream->method(), stream->bci())) { 1079 cell_count = CallTypeData::compute_cell_count(stream); 1080 } else { 1081 cell_count = counter_data_cell_count; 1082 } 1083 if (cell_count > counter_data_cell_count) { 1084 tag = DataLayout::call_type_data_tag; 1085 } else { 1086 tag = DataLayout::counter_data_tag; 1087 } 1088 break; 1089 } 1090 case Bytecodes::_ret: 1091 cell_count = RetData::static_cell_count(); 1092 tag = DataLayout::ret_data_tag; 1093 break; 1094 case Bytecodes::_ifeq: 1095 case Bytecodes::_ifne: 1096 case Bytecodes::_iflt: 1097 case Bytecodes::_ifge: 1098 case Bytecodes::_ifgt: 1099 case Bytecodes::_ifle: 1100 case Bytecodes::_if_icmpeq: 1101 case Bytecodes::_if_icmpne: 1102 case Bytecodes::_if_icmplt: 1103 case Bytecodes::_if_icmpge: 1104 case Bytecodes::_if_icmpgt: 1105 case Bytecodes::_if_icmple: 1106 case Bytecodes::_ifnull: 1107 case Bytecodes::_ifnonnull: 1108 cell_count = BranchData::static_cell_count(); 1109 tag = DataLayout::branch_data_tag; 1110 break; 1111 case Bytecodes::_if_acmpeq: 1112 case Bytecodes::_if_acmpne: 1113 cell_count = ACmpData::static_cell_count(); 1114 tag = DataLayout::acmp_data_tag; 1115 break; 1116 case Bytecodes::_lookupswitch: 1117 case Bytecodes::_tableswitch: 1118 cell_count = MultiBranchData::compute_cell_count(stream); 1119 tag = DataLayout::multi_branch_data_tag; 1120 break; 1121 default: 1122 break; 1123 } 1124 assert(tag == DataLayout::multi_branch_data_tag || 1125 ((MethodData::profile_arguments() || MethodData::profile_return()) && 1126 (tag == DataLayout::call_type_data_tag || 1127 tag == DataLayout::counter_data_tag || 1128 tag == DataLayout::virtual_call_type_data_tag || 1129 tag == DataLayout::virtual_call_data_tag)) || 1130 cell_count == bytecode_cell_count(c), "cell counts must agree"); 1131 if (cell_count >= 0) { 1132 assert(tag != DataLayout::no_tag, "bad tag"); 1133 assert(bytecode_has_profile(c), "agree w/ BHP"); 1134 data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count); 1135 return DataLayout::compute_size_in_bytes(cell_count); 1136 } else { 1137 assert(!bytecode_has_profile(c), "agree w/ !BHP"); 1138 return 0; 1139 } 1140 } 1141 1142 // Get the data at an arbitrary (sort of) data index. 1143 ProfileData* MethodData::data_at(int data_index) const { 1144 if (out_of_bounds(data_index)) { 1145 return nullptr; 1146 } 1147 DataLayout* data_layout = data_layout_at(data_index); 1148 return data_layout->data_in(); 1149 } 1150 1151 int DataLayout::cell_count() { 1152 switch (tag()) { 1153 case DataLayout::no_tag: 1154 default: 1155 ShouldNotReachHere(); 1156 return 0; 1157 case DataLayout::bit_data_tag: 1158 return BitData::static_cell_count(); 1159 case DataLayout::counter_data_tag: 1160 return CounterData::static_cell_count(); 1161 case DataLayout::jump_data_tag: 1162 return JumpData::static_cell_count(); 1163 case DataLayout::receiver_type_data_tag: 1164 return ReceiverTypeData::static_cell_count(); 1165 case DataLayout::virtual_call_data_tag: 1166 return VirtualCallData::static_cell_count(); 1167 case DataLayout::ret_data_tag: 1168 return RetData::static_cell_count(); 1169 case DataLayout::branch_data_tag: 1170 return BranchData::static_cell_count(); 1171 case DataLayout::multi_branch_data_tag: 1172 return ((new MultiBranchData(this))->cell_count()); 1173 case DataLayout::arg_info_data_tag: 1174 return ((new ArgInfoData(this))->cell_count()); 1175 case DataLayout::call_type_data_tag: 1176 return ((new CallTypeData(this))->cell_count()); 1177 case DataLayout::virtual_call_type_data_tag: 1178 return ((new VirtualCallTypeData(this))->cell_count()); 1179 case DataLayout::parameters_type_data_tag: 1180 return ((new ParametersTypeData(this))->cell_count()); 1181 case DataLayout::speculative_trap_data_tag: 1182 return SpeculativeTrapData::static_cell_count(); 1183 case DataLayout::array_load_store_data_tag: 1184 return ((new ArrayLoadStoreData(this))->cell_count()); 1185 case DataLayout::acmp_data_tag: 1186 return ((new ACmpData(this))->cell_count()); 1187 } 1188 } 1189 ProfileData* DataLayout::data_in() { 1190 switch (tag()) { 1191 case DataLayout::no_tag: 1192 default: 1193 ShouldNotReachHere(); 1194 return nullptr; 1195 case DataLayout::bit_data_tag: 1196 return new BitData(this); 1197 case DataLayout::counter_data_tag: 1198 return new CounterData(this); 1199 case DataLayout::jump_data_tag: 1200 return new JumpData(this); 1201 case DataLayout::receiver_type_data_tag: 1202 return new ReceiverTypeData(this); 1203 case DataLayout::virtual_call_data_tag: 1204 return new VirtualCallData(this); 1205 case DataLayout::ret_data_tag: 1206 return new RetData(this); 1207 case DataLayout::branch_data_tag: 1208 return new BranchData(this); 1209 case DataLayout::multi_branch_data_tag: 1210 return new MultiBranchData(this); 1211 case DataLayout::arg_info_data_tag: 1212 return new ArgInfoData(this); 1213 case DataLayout::call_type_data_tag: 1214 return new CallTypeData(this); 1215 case DataLayout::virtual_call_type_data_tag: 1216 return new VirtualCallTypeData(this); 1217 case DataLayout::parameters_type_data_tag: 1218 return new ParametersTypeData(this); 1219 case DataLayout::speculative_trap_data_tag: 1220 return new SpeculativeTrapData(this); 1221 case DataLayout::array_load_store_data_tag: 1222 return new ArrayLoadStoreData(this); 1223 case DataLayout::acmp_data_tag: 1224 return new ACmpData(this); 1225 } 1226 } 1227 1228 // Iteration over data. 1229 ProfileData* MethodData::next_data(ProfileData* current) const { 1230 int current_index = dp_to_di(current->dp()); 1231 int next_index = current_index + current->size_in_bytes(); 1232 ProfileData* next = data_at(next_index); 1233 return next; 1234 } 1235 1236 DataLayout* MethodData::next_data_layout(DataLayout* current) const { 1237 int current_index = dp_to_di((address)current); 1238 int next_index = current_index + current->size_in_bytes(); 1239 if (out_of_bounds(next_index)) { 1240 return nullptr; 1241 } 1242 DataLayout* next = data_layout_at(next_index); 1243 return next; 1244 } 1245 1246 // Give each of the data entries a chance to perform specific 1247 // data initialization. 1248 void MethodData::post_initialize(BytecodeStream* stream) { 1249 ResourceMark rm; 1250 ProfileData* data; 1251 for (data = first_data(); is_valid(data); data = next_data(data)) { 1252 stream->set_start(data->bci()); 1253 stream->next(); 1254 data->post_initialize(stream, this); 1255 } 1256 if (_parameters_type_data_di != no_parameters) { 1257 parameters_type_data()->post_initialize(nullptr, this); 1258 } 1259 } 1260 1261 // Initialize the MethodData* corresponding to a given method. 1262 MethodData::MethodData(const methodHandle& method) 1263 : _method(method()), 1264 // Holds Compile_lock 1265 _extra_data_lock(Mutex::safepoint-2, "MDOExtraData_lock"), 1266 _compiler_counters(), 1267 _parameters_type_data_di(parameters_uninitialized) { 1268 initialize(); 1269 } 1270 1271 void MethodData::initialize() { 1272 Thread* thread = Thread::current(); 1273 NoSafepointVerifier no_safepoint; // init function atomic wrt GC 1274 ResourceMark rm(thread); 1275 1276 init(); 1277 set_creation_mileage(mileage_of(method())); 1278 1279 // Go through the bytecodes and allocate and initialize the 1280 // corresponding data cells. 1281 int data_size = 0; 1282 int empty_bc_count = 0; // number of bytecodes lacking data 1283 _data[0] = 0; // apparently not set below. 1284 BytecodeStream stream(methodHandle(thread, method())); 1285 Bytecodes::Code c; 1286 bool needs_speculative_traps = false; 1287 while ((c = stream.next()) >= 0) { 1288 int size_in_bytes = initialize_data(&stream, data_size); 1289 data_size += size_in_bytes; 1290 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1; 1291 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c); 1292 } 1293 _data_size = data_size; 1294 int object_size = in_bytes(data_offset()) + data_size; 1295 1296 // Add some extra DataLayout cells (at least one) to track stray traps. 1297 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps); 1298 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0); 1299 1300 // Let's zero the space for the extra data 1301 if (extra_size > 0) { 1302 Copy::zero_to_bytes(((address)_data) + data_size, extra_size); 1303 } 1304 1305 // Add a cell to record information about modified arguments. 1306 // Set up _args_modified array after traps cells so that 1307 // the code for traps cells works. 1308 DataLayout *dp = data_layout_at(data_size + extra_size); 1309 1310 int arg_size = method()->size_of_parameters(); 1311 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1); 1312 1313 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1); 1314 object_size += extra_size + arg_data_size; 1315 1316 int parms_cell = ParametersTypeData::compute_cell_count(method()); 1317 // If we are profiling parameters, we reserved an area near the end 1318 // of the MDO after the slots for bytecodes (because there's no bci 1319 // for method entry so they don't fit with the framework for the 1320 // profiling of bytecodes). We store the offset within the MDO of 1321 // this area (or -1 if no parameter is profiled) 1322 if (parms_cell > 0) { 1323 object_size += DataLayout::compute_size_in_bytes(parms_cell); 1324 _parameters_type_data_di = data_size + extra_size + arg_data_size; 1325 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size); 1326 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell); 1327 } else { 1328 _parameters_type_data_di = no_parameters; 1329 } 1330 1331 // Set an initial hint. Don't use set_hint_di() because 1332 // first_di() may be out of bounds if data_size is 0. 1333 // In that situation, _hint_di is never used, but at 1334 // least well-defined. 1335 _hint_di = first_di(); 1336 1337 post_initialize(&stream); 1338 1339 assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size"); 1340 set_size(object_size); 1341 } 1342 1343 void MethodData::init() { 1344 _compiler_counters = CompilerCounters(); // reset compiler counters 1345 _invocation_counter.init(); 1346 _backedge_counter.init(); 1347 _invocation_counter_start = 0; 1348 _backedge_counter_start = 0; 1349 1350 // Set per-method invoke- and backedge mask. 1351 double scale = 1.0; 1352 methodHandle mh(Thread::current(), _method); 1353 CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale); 1354 _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1355 _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift; 1356 1357 _tenure_traps = 0; 1358 _num_loops = 0; 1359 _num_blocks = 0; 1360 _would_profile = unknown; 1361 1362 #if INCLUDE_JVMCI 1363 _jvmci_ir_size = 0; 1364 _failed_speculations = nullptr; 1365 #endif 1366 1367 #if INCLUDE_RTM_OPT 1368 _rtm_state = NoRTM; // No RTM lock eliding by default 1369 if (UseRTMLocking && 1370 !CompilerOracle::has_option(mh, CompileCommand::NoRTMLockEliding)) { 1371 if (CompilerOracle::has_option(mh, CompileCommand::UseRTMLockEliding) || !UseRTMDeopt) { 1372 // Generate RTM lock eliding code without abort ratio calculation code. 1373 _rtm_state = UseRTM; 1374 } else if (UseRTMDeopt) { 1375 // Generate RTM lock eliding code and include abort ratio calculation 1376 // code if UseRTMDeopt is on. 1377 _rtm_state = ProfileRTM; 1378 } 1379 } 1380 #endif 1381 1382 // Initialize escape flags. 1383 clear_escape_info(); 1384 } 1385 1386 // Get a measure of how much mileage the method has on it. 1387 int MethodData::mileage_of(Method* method) { 1388 return MAX2(method->invocation_count(), method->backedge_count()); 1389 } 1390 1391 bool MethodData::is_mature() const { 1392 return CompilationPolicy::is_mature(_method); 1393 } 1394 1395 // Translate a bci to its corresponding data index (di). 1396 address MethodData::bci_to_dp(int bci) { 1397 ResourceMark rm; 1398 DataLayout* data = data_layout_before(bci); 1399 DataLayout* prev = nullptr; 1400 for ( ; is_valid(data); data = next_data_layout(data)) { 1401 if (data->bci() >= bci) { 1402 if (data->bci() == bci) set_hint_di(dp_to_di((address)data)); 1403 else if (prev != nullptr) set_hint_di(dp_to_di((address)prev)); 1404 return (address)data; 1405 } 1406 prev = data; 1407 } 1408 return (address)limit_data_position(); 1409 } 1410 1411 // Translate a bci to its corresponding data, or null. 1412 ProfileData* MethodData::bci_to_data(int bci) { 1413 DataLayout* data = data_layout_before(bci); 1414 for ( ; is_valid(data); data = next_data_layout(data)) { 1415 if (data->bci() == bci) { 1416 set_hint_di(dp_to_di((address)data)); 1417 return data->data_in(); 1418 } else if (data->bci() > bci) { 1419 break; 1420 } 1421 } 1422 return bci_to_extra_data(bci, nullptr, false); 1423 } 1424 1425 DataLayout* MethodData::next_extra(DataLayout* dp) { 1426 int nb_cells = 0; 1427 switch(dp->tag()) { 1428 case DataLayout::bit_data_tag: 1429 case DataLayout::no_tag: 1430 nb_cells = BitData::static_cell_count(); 1431 break; 1432 case DataLayout::speculative_trap_data_tag: 1433 nb_cells = SpeculativeTrapData::static_cell_count(); 1434 break; 1435 default: 1436 fatal("unexpected tag %d", dp->tag()); 1437 } 1438 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells)); 1439 } 1440 1441 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) { 1442 DataLayout* end = args_data_limit(); 1443 1444 for (;; dp = next_extra(dp)) { 1445 assert(dp < end, "moved past end of extra data"); 1446 // No need for "Atomic::load_acquire" ops, 1447 // since the data structure is monotonic. 1448 switch(dp->tag()) { 1449 case DataLayout::no_tag: 1450 return nullptr; 1451 case DataLayout::arg_info_data_tag: 1452 dp = end; 1453 return nullptr; // ArgInfoData is at the end of extra data section. 1454 case DataLayout::bit_data_tag: 1455 if (m == nullptr && dp->bci() == bci) { 1456 return new BitData(dp); 1457 } 1458 break; 1459 case DataLayout::speculative_trap_data_tag: 1460 if (m != nullptr) { 1461 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1462 // data->method() may be null in case of a concurrent 1463 // allocation. Maybe it's for the same method. Try to use that 1464 // entry in that case. 1465 if (dp->bci() == bci) { 1466 if (data->method() == nullptr) { 1467 assert(concurrent, "impossible because no concurrent allocation"); 1468 return nullptr; 1469 } else if (data->method() == m) { 1470 return data; 1471 } 1472 } 1473 } 1474 break; 1475 default: 1476 fatal("unexpected tag %d", dp->tag()); 1477 } 1478 } 1479 return nullptr; 1480 } 1481 1482 1483 // Translate a bci to its corresponding extra data, or null. 1484 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) { 1485 // This code assumes an entry for a SpeculativeTrapData is 2 cells 1486 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) == 1487 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()), 1488 "code needs to be adjusted"); 1489 1490 // Do not create one of these if method has been redefined. 1491 if (m != nullptr && m->is_old()) { 1492 return nullptr; 1493 } 1494 1495 DataLayout* dp = extra_data_base(); 1496 DataLayout* end = args_data_limit(); 1497 1498 // Allocation in the extra data space has to be atomic because not 1499 // all entries have the same size and non atomic concurrent 1500 // allocation would result in a corrupted extra data space. 1501 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true); 1502 if (result != nullptr) { 1503 return result; 1504 } 1505 1506 if (create_if_missing && dp < end) { 1507 MutexLocker ml(&_extra_data_lock); 1508 // Check again now that we have the lock. Another thread may 1509 // have added extra data entries. 1510 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false); 1511 if (result != nullptr || dp >= end) { 1512 return result; 1513 } 1514 1515 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free"); 1516 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info"); 1517 u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag; 1518 // SpeculativeTrapData is 2 slots. Make sure we have room. 1519 if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) { 1520 return nullptr; 1521 } 1522 DataLayout temp; 1523 temp.initialize(tag, checked_cast<u2>(bci), 0); 1524 1525 dp->set_header(temp.header()); 1526 assert(dp->tag() == tag, "sane"); 1527 assert(dp->bci() == bci, "no concurrent allocation"); 1528 if (tag == DataLayout::bit_data_tag) { 1529 return new BitData(dp); 1530 } else { 1531 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1532 data->set_method(m); 1533 return data; 1534 } 1535 } 1536 return nullptr; 1537 } 1538 1539 ArgInfoData *MethodData::arg_info() { 1540 DataLayout* dp = extra_data_base(); 1541 DataLayout* end = args_data_limit(); 1542 for (; dp < end; dp = next_extra(dp)) { 1543 if (dp->tag() == DataLayout::arg_info_data_tag) 1544 return new ArgInfoData(dp); 1545 } 1546 return nullptr; 1547 } 1548 1549 // Printing 1550 1551 void MethodData::print_on(outputStream* st) const { 1552 assert(is_methodData(), "should be method data"); 1553 st->print("method data for "); 1554 method()->print_value_on(st); 1555 st->cr(); 1556 print_data_on(st); 1557 } 1558 1559 void MethodData::print_value_on(outputStream* st) const { 1560 assert(is_methodData(), "should be method data"); 1561 st->print("method data for "); 1562 method()->print_value_on(st); 1563 } 1564 1565 void MethodData::print_data_on(outputStream* st) const { 1566 ResourceMark rm; 1567 ProfileData* data = first_data(); 1568 if (_parameters_type_data_di != no_parameters) { 1569 parameters_type_data()->print_data_on(st); 1570 } 1571 for ( ; is_valid(data); data = next_data(data)) { 1572 st->print("%d", dp_to_di(data->dp())); 1573 st->fill_to(6); 1574 data->print_data_on(st, this); 1575 } 1576 st->print_cr("--- Extra data:"); 1577 DataLayout* dp = extra_data_base(); 1578 DataLayout* end = args_data_limit(); 1579 for (;; dp = next_extra(dp)) { 1580 assert(dp < end, "moved past end of extra data"); 1581 // No need for "Atomic::load_acquire" ops, 1582 // since the data structure is monotonic. 1583 switch(dp->tag()) { 1584 case DataLayout::no_tag: 1585 continue; 1586 case DataLayout::bit_data_tag: 1587 data = new BitData(dp); 1588 break; 1589 case DataLayout::speculative_trap_data_tag: 1590 data = new SpeculativeTrapData(dp); 1591 break; 1592 case DataLayout::arg_info_data_tag: 1593 data = new ArgInfoData(dp); 1594 dp = end; // ArgInfoData is at the end of extra data section. 1595 break; 1596 default: 1597 fatal("unexpected tag %d", dp->tag()); 1598 } 1599 st->print("%d", dp_to_di(data->dp())); 1600 st->fill_to(6); 1601 data->print_data_on(st); 1602 if (dp >= end) return; 1603 } 1604 } 1605 1606 // Verification 1607 1608 void MethodData::verify_on(outputStream* st) { 1609 guarantee(is_methodData(), "object must be method data"); 1610 // guarantee(m->is_perm(), "should be in permspace"); 1611 this->verify_data_on(st); 1612 } 1613 1614 void MethodData::verify_data_on(outputStream* st) { 1615 NEEDS_CLEANUP; 1616 // not yet implemented. 1617 } 1618 1619 bool MethodData::profile_jsr292(const methodHandle& m, int bci) { 1620 if (m->is_compiled_lambda_form()) { 1621 return true; 1622 } 1623 1624 Bytecode_invoke inv(m , bci); 1625 return inv.is_invokedynamic() || inv.is_invokehandle(); 1626 } 1627 1628 bool MethodData::profile_unsafe(const methodHandle& m, int bci) { 1629 Bytecode_invoke inv(m , bci); 1630 if (inv.is_invokevirtual()) { 1631 Symbol* klass = inv.klass(); 1632 if (klass == vmSymbols::jdk_internal_misc_Unsafe() || 1633 klass == vmSymbols::sun_misc_Unsafe() || 1634 klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) { 1635 Symbol* name = inv.name(); 1636 if (name->starts_with("get") || name->starts_with("put")) { 1637 return true; 1638 } 1639 } 1640 } 1641 return false; 1642 } 1643 1644 int MethodData::profile_arguments_flag() { 1645 return TypeProfileLevel % 10; 1646 } 1647 1648 bool MethodData::profile_arguments() { 1649 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0; 1650 } 1651 1652 bool MethodData::profile_arguments_jsr292_only() { 1653 return profile_arguments_flag() == type_profile_jsr292; 1654 } 1655 1656 bool MethodData::profile_all_arguments() { 1657 return profile_arguments_flag() == type_profile_all; 1658 } 1659 1660 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) { 1661 if (!profile_arguments()) { 1662 return false; 1663 } 1664 1665 if (profile_all_arguments()) { 1666 return true; 1667 } 1668 1669 if (profile_unsafe(m, bci)) { 1670 return true; 1671 } 1672 1673 assert(profile_arguments_jsr292_only(), "inconsistent"); 1674 return profile_jsr292(m, bci); 1675 } 1676 1677 int MethodData::profile_return_flag() { 1678 return (TypeProfileLevel % 100) / 10; 1679 } 1680 1681 bool MethodData::profile_return() { 1682 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all; 1683 } 1684 1685 bool MethodData::profile_return_jsr292_only() { 1686 return profile_return_flag() == type_profile_jsr292; 1687 } 1688 1689 bool MethodData::profile_all_return() { 1690 return profile_return_flag() == type_profile_all; 1691 } 1692 1693 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) { 1694 if (!profile_return()) { 1695 return false; 1696 } 1697 1698 if (profile_all_return()) { 1699 return true; 1700 } 1701 1702 assert(profile_return_jsr292_only(), "inconsistent"); 1703 return profile_jsr292(m, bci); 1704 } 1705 1706 int MethodData::profile_parameters_flag() { 1707 return TypeProfileLevel / 100; 1708 } 1709 1710 bool MethodData::profile_parameters() { 1711 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all; 1712 } 1713 1714 bool MethodData::profile_parameters_jsr292_only() { 1715 return profile_parameters_flag() == type_profile_jsr292; 1716 } 1717 1718 bool MethodData::profile_all_parameters() { 1719 return profile_parameters_flag() == type_profile_all; 1720 } 1721 1722 bool MethodData::profile_parameters_for_method(const methodHandle& m) { 1723 if (!profile_parameters()) { 1724 return false; 1725 } 1726 1727 if (profile_all_parameters()) { 1728 return true; 1729 } 1730 1731 assert(profile_parameters_jsr292_only(), "inconsistent"); 1732 return m->is_compiled_lambda_form(); 1733 } 1734 1735 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) { 1736 log_trace(cds)("Iter(MethodData): %p", this); 1737 it->push(&_method); 1738 } 1739 1740 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) { 1741 if (shift == 0) { 1742 return; 1743 } 1744 if (!reset) { 1745 // Move all cells of trap entry at dp left by "shift" cells 1746 intptr_t* start = (intptr_t*)dp; 1747 intptr_t* end = (intptr_t*)next_extra(dp); 1748 for (intptr_t* ptr = start; ptr < end; ptr++) { 1749 *(ptr-shift) = *ptr; 1750 } 1751 } else { 1752 // Reset "shift" cells stopping at dp 1753 intptr_t* start = ((intptr_t*)dp) - shift; 1754 intptr_t* end = (intptr_t*)dp; 1755 for (intptr_t* ptr = start; ptr < end; ptr++) { 1756 *ptr = 0; 1757 } 1758 } 1759 } 1760 1761 // Check for entries that reference an unloaded method 1762 class CleanExtraDataKlassClosure : public CleanExtraDataClosure { 1763 bool _always_clean; 1764 public: 1765 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {} 1766 bool is_live(Method* m) { 1767 return !(_always_clean) && m->method_holder()->is_loader_alive(); 1768 } 1769 }; 1770 1771 // Check for entries that reference a redefined method 1772 class CleanExtraDataMethodClosure : public CleanExtraDataClosure { 1773 public: 1774 CleanExtraDataMethodClosure() {} 1775 bool is_live(Method* m) { return !m->is_old(); } 1776 }; 1777 1778 1779 // Remove SpeculativeTrapData entries that reference an unloaded or 1780 // redefined method 1781 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) { 1782 DataLayout* dp = extra_data_base(); 1783 DataLayout* end = args_data_limit(); 1784 1785 int shift = 0; 1786 for (; dp < end; dp = next_extra(dp)) { 1787 switch(dp->tag()) { 1788 case DataLayout::speculative_trap_data_tag: { 1789 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1790 Method* m = data->method(); 1791 assert(m != nullptr, "should have a method"); 1792 if (!cl->is_live(m)) { 1793 // "shift" accumulates the number of cells for dead 1794 // SpeculativeTrapData entries that have been seen so 1795 // far. Following entries must be shifted left by that many 1796 // cells to remove the dead SpeculativeTrapData entries. 1797 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp); 1798 } else { 1799 // Shift this entry left if it follows dead 1800 // SpeculativeTrapData entries 1801 clean_extra_data_helper(dp, shift); 1802 } 1803 break; 1804 } 1805 case DataLayout::bit_data_tag: 1806 // Shift this entry left if it follows dead SpeculativeTrapData 1807 // entries 1808 clean_extra_data_helper(dp, shift); 1809 continue; 1810 case DataLayout::no_tag: 1811 case DataLayout::arg_info_data_tag: 1812 // We are at end of the live trap entries. The previous "shift" 1813 // cells contain entries that are either dead or were shifted 1814 // left. They need to be reset to no_tag 1815 clean_extra_data_helper(dp, shift, true); 1816 return; 1817 default: 1818 fatal("unexpected tag %d", dp->tag()); 1819 } 1820 } 1821 } 1822 1823 // Verify there's no unloaded or redefined method referenced by a 1824 // SpeculativeTrapData entry 1825 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) { 1826 #ifdef ASSERT 1827 DataLayout* dp = extra_data_base(); 1828 DataLayout* end = args_data_limit(); 1829 1830 for (; dp < end; dp = next_extra(dp)) { 1831 switch(dp->tag()) { 1832 case DataLayout::speculative_trap_data_tag: { 1833 SpeculativeTrapData* data = new SpeculativeTrapData(dp); 1834 Method* m = data->method(); 1835 assert(m != nullptr && cl->is_live(m), "Method should exist"); 1836 break; 1837 } 1838 case DataLayout::bit_data_tag: 1839 continue; 1840 case DataLayout::no_tag: 1841 case DataLayout::arg_info_data_tag: 1842 return; 1843 default: 1844 fatal("unexpected tag %d", dp->tag()); 1845 } 1846 } 1847 #endif 1848 } 1849 1850 void MethodData::clean_method_data(bool always_clean) { 1851 ResourceMark rm; 1852 for (ProfileData* data = first_data(); 1853 is_valid(data); 1854 data = next_data(data)) { 1855 data->clean_weak_klass_links(always_clean); 1856 } 1857 ParametersTypeData* parameters = parameters_type_data(); 1858 if (parameters != nullptr) { 1859 parameters->clean_weak_klass_links(always_clean); 1860 } 1861 1862 CleanExtraDataKlassClosure cl(always_clean); 1863 clean_extra_data(&cl); 1864 verify_extra_data_clean(&cl); 1865 } 1866 1867 // This is called during redefinition to clean all "old" redefined 1868 // methods out of MethodData for all methods. 1869 void MethodData::clean_weak_method_links() { 1870 ResourceMark rm; 1871 CleanExtraDataMethodClosure cl; 1872 clean_extra_data(&cl); 1873 verify_extra_data_clean(&cl); 1874 } 1875 1876 void MethodData::deallocate_contents(ClassLoaderData* loader_data) { 1877 release_C_heap_structures(); 1878 } 1879 1880 void MethodData::release_C_heap_structures() { 1881 #if INCLUDE_JVMCI 1882 FailedSpeculation::free_failed_speculations(get_failed_speculations_address()); 1883 #endif 1884 }