1 /* 2 * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OOPS_METHODDATA_HPP 26 #define SHARE_OOPS_METHODDATA_HPP 27 28 #include "interpreter/bytecodes.hpp" 29 #include "interpreter/invocationCounter.hpp" 30 #include "oops/metadata.hpp" 31 #include "oops/method.hpp" 32 #include "oops/oop.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/deoptimization.hpp" 35 #include "runtime/mutex.hpp" 36 #include "utilities/align.hpp" 37 #include "utilities/copy.hpp" 38 39 class BytecodeStream; 40 41 // The MethodData object collects counts and other profile information 42 // during zeroth-tier (interpreter) and third-tier (C1 with full profiling) 43 // execution. 44 // 45 // The profile is used later by compilation heuristics. Some heuristics 46 // enable use of aggressive (or "heroic") optimizations. An aggressive 47 // optimization often has a down-side, a corner case that it handles 48 // poorly, but which is thought to be rare. The profile provides 49 // evidence of this rarity for a given method or even BCI. It allows 50 // the compiler to back out of the optimization at places where it 51 // has historically been a poor choice. Other heuristics try to use 52 // specific information gathered about types observed at a given site. 53 // 54 // All data in the profile is approximate. It is expected to be accurate 55 // on the whole, but the system expects occasional inaccuraces, due to 56 // counter overflow, multiprocessor races during data collection, space 57 // limitations, missing MDO blocks, etc. Bad or missing data will degrade 58 // optimization quality but will not affect correctness. Also, each MDO 59 // is marked with its birth-date ("creation_mileage") which can be used 60 // to assess the quality ("maturity") of its data. 61 // 62 // Short (<32-bit) counters are designed to overflow to a known "saturated" 63 // state. Also, certain recorded per-BCI events are given one-bit counters 64 // which overflow to a saturated state which applied to all counters at 65 // that BCI. In other words, there is a small lattice which approximates 66 // the ideal of an infinite-precision counter for each event at each BCI, 67 // and the lattice quickly "bottoms out" in a state where all counters 68 // are taken to be indefinitely large. 69 // 70 // The reader will find many data races in profile gathering code, starting 71 // with invocation counter incrementation. None of these races harm correct 72 // execution of the compiled code. 73 74 // forward decl 75 class ProfileData; 76 77 // DataLayout 78 // 79 // Overlay for generic profiling data. 80 class DataLayout { 81 friend class VMStructs; 82 friend class JVMCIVMStructs; 83 84 private: 85 // Every data layout begins with a header. This header 86 // contains a tag, which is used to indicate the size/layout 87 // of the data, 8 bits of flags, which can be used in any way, 88 // 32 bits of trap history (none/one reason/many reasons), 89 // and a bci, which is used to tie this piece of data to a 90 // specific bci in the bytecodes. 91 union { 92 u8 _bits; 93 struct { 94 u1 _tag; 95 u1 _flags; 96 u2 _bci; 97 u4 _traps; 98 } _struct; 99 } _header; 100 101 // The data layout has an arbitrary number of cells, each sized 102 // to accommodate a pointer or an integer. 103 intptr_t _cells[1]; 104 105 // Some types of data layouts need a length field. 106 static bool needs_array_len(u1 tag); 107 108 public: 109 enum { 110 counter_increment = 1 111 }; 112 113 enum { 114 cell_size = sizeof(intptr_t) 115 }; 116 117 // Tag values 118 enum : u1 { 119 no_tag, 120 bit_data_tag, 121 counter_data_tag, 122 jump_data_tag, 123 receiver_type_data_tag, 124 virtual_call_data_tag, 125 ret_data_tag, 126 branch_data_tag, 127 multi_branch_data_tag, 128 arg_info_data_tag, 129 call_type_data_tag, 130 virtual_call_type_data_tag, 131 parameters_type_data_tag, 132 speculative_trap_data_tag, 133 array_load_store_data_tag, 134 acmp_data_tag 135 }; 136 137 enum { 138 // The trap state breaks down as [recompile:1 | reason:31]. 139 // This further breakdown is defined in deoptimization.cpp. 140 // See Deoptimization::trap_state_reason for an assert that 141 // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT. 142 // 143 // The trap_state is collected only if ProfileTraps is true. 144 trap_bits = 1+31, // 31: enough to distinguish [0..Reason_RECORDED_LIMIT]. 145 trap_mask = -1, 146 first_flag = 0 147 }; 148 149 // Size computation 150 static int header_size_in_bytes() { 151 return header_size_in_cells() * cell_size; 152 } 153 static int header_size_in_cells() { 154 return LP64_ONLY(1) NOT_LP64(2); 155 } 156 157 static int compute_size_in_bytes(int cell_count) { 158 return header_size_in_bytes() + cell_count * cell_size; 159 } 160 161 // Initialization 162 void initialize(u1 tag, u2 bci, int cell_count); 163 164 // Accessors 165 u1 tag() { 166 return _header._struct._tag; 167 } 168 169 // Return 32 bits of trap state. 170 // The state tells if traps with zero, one, or many reasons have occurred. 171 // It also tells whether zero or many recompilations have occurred. 172 // The associated trap histogram in the MDO itself tells whether 173 // traps are common or not. If a BCI shows that a trap X has 174 // occurred, and the MDO shows N occurrences of X, we make the 175 // simplifying assumption that all N occurrences can be blamed 176 // on that BCI. 177 uint trap_state() const { 178 return _header._struct._traps; 179 } 180 181 void set_trap_state(uint new_state) { 182 assert(ProfileTraps, "used only under +ProfileTraps"); 183 uint old_flags = _header._struct._traps; 184 _header._struct._traps = new_state | old_flags; 185 } 186 187 u1 flags() const { 188 return _header._struct._flags; 189 } 190 191 u2 bci() const { 192 return _header._struct._bci; 193 } 194 195 void set_header(u8 value) { 196 _header._bits = value; 197 } 198 u8 header() { 199 return _header._bits; 200 } 201 void set_cell_at(int index, intptr_t value) { 202 _cells[index] = value; 203 } 204 void release_set_cell_at(int index, intptr_t value); 205 intptr_t cell_at(int index) const { 206 return _cells[index]; 207 } 208 209 void set_flag_at(u1 flag_number) { 210 _header._struct._flags |= (u1)(0x1 << flag_number); 211 } 212 bool flag_at(u1 flag_number) const { 213 return (_header._struct._flags & (0x1 << flag_number)) != 0; 214 } 215 216 // Low-level support for code generation. 217 static ByteSize header_offset() { 218 return byte_offset_of(DataLayout, _header); 219 } 220 static ByteSize tag_offset() { 221 return byte_offset_of(DataLayout, _header._struct._tag); 222 } 223 static ByteSize flags_offset() { 224 return byte_offset_of(DataLayout, _header._struct._flags); 225 } 226 static ByteSize bci_offset() { 227 return byte_offset_of(DataLayout, _header._struct._bci); 228 } 229 static ByteSize cell_offset(int index) { 230 return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size); 231 } 232 // Return a value which, when or-ed as a byte into _flags, sets the flag. 233 static u1 flag_number_to_constant(u1 flag_number) { 234 DataLayout temp; temp.set_header(0); 235 temp.set_flag_at(flag_number); 236 return temp._header._struct._flags; 237 } 238 // Return a value which, when or-ed as a word into _header, sets the flag. 239 static u8 flag_mask_to_header_mask(u1 byte_constant) { 240 DataLayout temp; temp.set_header(0); 241 temp._header._struct._flags = byte_constant; 242 return temp._header._bits; 243 } 244 245 ProfileData* data_in(); 246 247 int size_in_bytes() { 248 int cells = cell_count(); 249 assert(cells >= 0, "invalid number of cells"); 250 return DataLayout::compute_size_in_bytes(cells); 251 } 252 int cell_count(); 253 254 // GC support 255 void clean_weak_klass_links(bool always_clean); 256 }; 257 258 259 // ProfileData class hierarchy 260 class ProfileData; 261 class BitData; 262 class CounterData; 263 class ReceiverTypeData; 264 class VirtualCallData; 265 class VirtualCallTypeData; 266 class RetData; 267 class CallTypeData; 268 class JumpData; 269 class BranchData; 270 class ACmpData; 271 class ArrayData; 272 class MultiBranchData; 273 class ArgInfoData; 274 class ParametersTypeData; 275 class SpeculativeTrapData; 276 class ArrayLoadStoreData; 277 278 // ProfileData 279 // 280 // A ProfileData object is created to refer to a section of profiling 281 // data in a structured way. 282 class ProfileData : public ResourceObj { 283 friend class TypeEntries; 284 friend class SingleTypeEntry; 285 friend class TypeStackSlotEntries; 286 private: 287 enum { 288 tab_width_one = 16, 289 tab_width_two = 36 290 }; 291 292 // This is a pointer to a section of profiling data. 293 DataLayout* _data; 294 295 char* print_data_on_helper(const MethodData* md) const; 296 297 protected: 298 DataLayout* data() { return _data; } 299 const DataLayout* data() const { return _data; } 300 301 enum { 302 cell_size = DataLayout::cell_size 303 }; 304 305 public: 306 // How many cells are in this? 307 virtual int cell_count() const { 308 ShouldNotReachHere(); 309 return -1; 310 } 311 312 // Return the size of this data. 313 int size_in_bytes() { 314 return DataLayout::compute_size_in_bytes(cell_count()); 315 } 316 317 protected: 318 // Low-level accessors for underlying data 319 void set_intptr_at(int index, intptr_t value) { 320 assert(0 <= index && index < cell_count(), "oob"); 321 data()->set_cell_at(index, value); 322 } 323 void release_set_intptr_at(int index, intptr_t value); 324 intptr_t intptr_at(int index) const { 325 assert(0 <= index && index < cell_count(), "oob"); 326 return data()->cell_at(index); 327 } 328 void set_uint_at(int index, uint value) { 329 set_intptr_at(index, (intptr_t) value); 330 } 331 void release_set_uint_at(int index, uint value); 332 uint uint_at(int index) const { 333 return (uint)intptr_at(index); 334 } 335 void set_int_at(int index, int value) { 336 set_intptr_at(index, (intptr_t) value); 337 } 338 void release_set_int_at(int index, int value); 339 int int_at(int index) const { 340 return (int)intptr_at(index); 341 } 342 int int_at_unchecked(int index) const { 343 return (int)data()->cell_at(index); 344 } 345 void set_oop_at(int index, oop value) { 346 set_intptr_at(index, cast_from_oop<intptr_t>(value)); 347 } 348 oop oop_at(int index) const { 349 return cast_to_oop(intptr_at(index)); 350 } 351 352 void set_flag_at(u1 flag_number) { 353 data()->set_flag_at(flag_number); 354 } 355 bool flag_at(u1 flag_number) const { 356 return data()->flag_at(flag_number); 357 } 358 359 // two convenient imports for use by subclasses: 360 static ByteSize cell_offset(int index) { 361 return DataLayout::cell_offset(index); 362 } 363 static u1 flag_number_to_constant(u1 flag_number) { 364 return DataLayout::flag_number_to_constant(flag_number); 365 } 366 367 ProfileData(DataLayout* data) { 368 _data = data; 369 } 370 371 public: 372 // Constructor for invalid ProfileData. 373 ProfileData(); 374 375 u2 bci() const { 376 return data()->bci(); 377 } 378 379 address dp() { 380 return (address)_data; 381 } 382 383 int trap_state() const { 384 return data()->trap_state(); 385 } 386 void set_trap_state(int new_state) { 387 data()->set_trap_state(new_state); 388 } 389 390 // Type checking 391 virtual bool is_BitData() const { return false; } 392 virtual bool is_CounterData() const { return false; } 393 virtual bool is_JumpData() const { return false; } 394 virtual bool is_ReceiverTypeData()const { return false; } 395 virtual bool is_VirtualCallData() const { return false; } 396 virtual bool is_RetData() const { return false; } 397 virtual bool is_BranchData() const { return false; } 398 virtual bool is_ArrayData() const { return false; } 399 virtual bool is_MultiBranchData() const { return false; } 400 virtual bool is_ArgInfoData() const { return false; } 401 virtual bool is_CallTypeData() const { return false; } 402 virtual bool is_VirtualCallTypeData()const { return false; } 403 virtual bool is_ParametersTypeData() const { return false; } 404 virtual bool is_SpeculativeTrapData()const { return false; } 405 virtual bool is_ArrayLoadStoreData() const { return false; } 406 virtual bool is_ACmpData() const { return false; } 407 408 409 BitData* as_BitData() const { 410 assert(is_BitData(), "wrong type"); 411 return is_BitData() ? (BitData*) this : nullptr; 412 } 413 CounterData* as_CounterData() const { 414 assert(is_CounterData(), "wrong type"); 415 return is_CounterData() ? (CounterData*) this : nullptr; 416 } 417 JumpData* as_JumpData() const { 418 assert(is_JumpData(), "wrong type"); 419 return is_JumpData() ? (JumpData*) this : nullptr; 420 } 421 ReceiverTypeData* as_ReceiverTypeData() const { 422 assert(is_ReceiverTypeData(), "wrong type"); 423 return is_ReceiverTypeData() ? (ReceiverTypeData*)this : nullptr; 424 } 425 VirtualCallData* as_VirtualCallData() const { 426 assert(is_VirtualCallData(), "wrong type"); 427 return is_VirtualCallData() ? (VirtualCallData*)this : nullptr; 428 } 429 RetData* as_RetData() const { 430 assert(is_RetData(), "wrong type"); 431 return is_RetData() ? (RetData*) this : nullptr; 432 } 433 BranchData* as_BranchData() const { 434 assert(is_BranchData(), "wrong type"); 435 return is_BranchData() ? (BranchData*) this : nullptr; 436 } 437 ArrayData* as_ArrayData() const { 438 assert(is_ArrayData(), "wrong type"); 439 return is_ArrayData() ? (ArrayData*) this : nullptr; 440 } 441 MultiBranchData* as_MultiBranchData() const { 442 assert(is_MultiBranchData(), "wrong type"); 443 return is_MultiBranchData() ? (MultiBranchData*)this : nullptr; 444 } 445 ArgInfoData* as_ArgInfoData() const { 446 assert(is_ArgInfoData(), "wrong type"); 447 return is_ArgInfoData() ? (ArgInfoData*)this : nullptr; 448 } 449 CallTypeData* as_CallTypeData() const { 450 assert(is_CallTypeData(), "wrong type"); 451 return is_CallTypeData() ? (CallTypeData*)this : nullptr; 452 } 453 VirtualCallTypeData* as_VirtualCallTypeData() const { 454 assert(is_VirtualCallTypeData(), "wrong type"); 455 return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr; 456 } 457 ParametersTypeData* as_ParametersTypeData() const { 458 assert(is_ParametersTypeData(), "wrong type"); 459 return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr; 460 } 461 SpeculativeTrapData* as_SpeculativeTrapData() const { 462 assert(is_SpeculativeTrapData(), "wrong type"); 463 return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr; 464 } 465 ArrayLoadStoreData* as_ArrayLoadStoreData() const { 466 assert(is_ArrayLoadStoreData(), "wrong type"); 467 return is_ArrayLoadStoreData() ? (ArrayLoadStoreData*)this : nullptr; 468 } 469 ACmpData* as_ACmpData() const { 470 assert(is_ACmpData(), "wrong type"); 471 return is_ACmpData() ? (ACmpData*)this : nullptr; 472 } 473 474 475 // Subclass specific initialization 476 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {} 477 478 // GC support 479 virtual void clean_weak_klass_links(bool always_clean) {} 480 481 // CI translation: ProfileData can represent both MethodDataOop data 482 // as well as CIMethodData data. This function is provided for translating 483 // an oop in a ProfileData to the ci equivalent. Generally speaking, 484 // most ProfileData don't require any translation, so we provide the null 485 // translation here, and the required translators are in the ci subclasses. 486 virtual void translate_from(const ProfileData* data) {} 487 488 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const { 489 ShouldNotReachHere(); 490 } 491 492 void print_data_on(outputStream* st, const MethodData* md) const; 493 494 void print_shared(outputStream* st, const char* name, const char* extra) const; 495 void tab(outputStream* st, bool first = false) const; 496 }; 497 498 // BitData 499 // 500 // A BitData holds a flag or two in its header. 501 class BitData : public ProfileData { 502 friend class VMStructs; 503 friend class JVMCIVMStructs; 504 protected: 505 enum : u1 { 506 // null_seen: 507 // saw a null operand (cast/aastore/instanceof) 508 null_seen_flag = DataLayout::first_flag + 0 509 #if INCLUDE_JVMCI 510 // bytecode threw any exception 511 , exception_seen_flag = null_seen_flag + 1 512 #endif 513 }; 514 enum { bit_cell_count = 0 }; // no additional data fields needed. 515 public: 516 BitData(DataLayout* layout) : ProfileData(layout) { 517 } 518 519 virtual bool is_BitData() const { return true; } 520 521 static int static_cell_count() { 522 return bit_cell_count; 523 } 524 525 virtual int cell_count() const { 526 return static_cell_count(); 527 } 528 529 // Accessor 530 531 // The null_seen flag bit is specially known to the interpreter. 532 // Consulting it allows the compiler to avoid setting up null_check traps. 533 bool null_seen() { return flag_at(null_seen_flag); } 534 void set_null_seen() { set_flag_at(null_seen_flag); } 535 536 #if INCLUDE_JVMCI 537 // true if an exception was thrown at the specific BCI 538 bool exception_seen() { return flag_at(exception_seen_flag); } 539 void set_exception_seen() { set_flag_at(exception_seen_flag); } 540 #endif 541 542 // Code generation support 543 static u1 null_seen_byte_constant() { 544 return flag_number_to_constant(null_seen_flag); 545 } 546 547 static ByteSize bit_data_size() { 548 return cell_offset(bit_cell_count); 549 } 550 551 void print_data_on(outputStream* st, const char* extra = nullptr) const; 552 }; 553 554 // CounterData 555 // 556 // A CounterData corresponds to a simple counter. 557 class CounterData : public BitData { 558 friend class VMStructs; 559 friend class JVMCIVMStructs; 560 protected: 561 enum { 562 count_off, 563 counter_cell_count 564 }; 565 public: 566 CounterData(DataLayout* layout) : BitData(layout) {} 567 568 virtual bool is_CounterData() const { return true; } 569 570 static int static_cell_count() { 571 return counter_cell_count; 572 } 573 574 virtual int cell_count() const { 575 return static_cell_count(); 576 } 577 578 // Direct accessor 579 int count() const { 580 intptr_t raw_data = intptr_at(count_off); 581 if (raw_data > max_jint) { 582 raw_data = max_jint; 583 } else if (raw_data < min_jint) { 584 raw_data = min_jint; 585 } 586 return int(raw_data); 587 } 588 589 // Code generation support 590 static ByteSize count_offset() { 591 return cell_offset(count_off); 592 } 593 static ByteSize counter_data_size() { 594 return cell_offset(counter_cell_count); 595 } 596 597 void set_count(int count) { 598 set_int_at(count_off, count); 599 } 600 601 void print_data_on(outputStream* st, const char* extra = nullptr) const; 602 }; 603 604 // JumpData 605 // 606 // A JumpData is used to access profiling information for a direct 607 // branch. It is a counter, used for counting the number of branches, 608 // plus a data displacement, used for realigning the data pointer to 609 // the corresponding target bci. 610 class JumpData : public ProfileData { 611 friend class VMStructs; 612 friend class JVMCIVMStructs; 613 protected: 614 enum { 615 taken_off_set, 616 displacement_off_set, 617 jump_cell_count 618 }; 619 620 void set_displacement(int displacement) { 621 set_int_at(displacement_off_set, displacement); 622 } 623 624 public: 625 JumpData(DataLayout* layout) : ProfileData(layout) { 626 assert(layout->tag() == DataLayout::jump_data_tag || 627 layout->tag() == DataLayout::branch_data_tag || 628 layout->tag() == DataLayout::acmp_data_tag, "wrong type"); 629 } 630 631 virtual bool is_JumpData() const { return true; } 632 633 static int static_cell_count() { 634 return jump_cell_count; 635 } 636 637 virtual int cell_count() const { 638 return static_cell_count(); 639 } 640 641 // Direct accessor 642 uint taken() const { 643 return uint_at(taken_off_set); 644 } 645 646 void set_taken(uint cnt) { 647 set_uint_at(taken_off_set, cnt); 648 } 649 650 // Saturating counter 651 uint inc_taken() { 652 uint cnt = taken() + 1; 653 // Did we wrap? Will compiler screw us?? 654 if (cnt == 0) cnt--; 655 set_uint_at(taken_off_set, cnt); 656 return cnt; 657 } 658 659 int displacement() const { 660 return int_at(displacement_off_set); 661 } 662 663 // Code generation support 664 static ByteSize taken_offset() { 665 return cell_offset(taken_off_set); 666 } 667 668 static ByteSize displacement_offset() { 669 return cell_offset(displacement_off_set); 670 } 671 672 // Specific initialization. 673 void post_initialize(BytecodeStream* stream, MethodData* mdo); 674 675 void print_data_on(outputStream* st, const char* extra = nullptr) const; 676 }; 677 678 // Entries in a ProfileData object to record types: it can either be 679 // none (no profile), unknown (conflicting profile data) or a klass if 680 // a single one is seen. Whether a null reference was seen is also 681 // recorded. No counter is associated with the type and a single type 682 // is tracked (unlike VirtualCallData). 683 class TypeEntries { 684 685 public: 686 687 // A single cell is used to record information for a type: 688 // - the cell is initialized to 0 689 // - when a type is discovered it is stored in the cell 690 // - bit zero of the cell is used to record whether a null reference 691 // was encountered or not 692 // - bit 1 is set to record a conflict in the type information 693 694 enum { 695 null_seen = 1, 696 type_mask = ~null_seen, 697 type_unknown = 2, 698 status_bits = null_seen | type_unknown, 699 type_klass_mask = ~status_bits 700 }; 701 702 // what to initialize a cell to 703 static intptr_t type_none() { 704 return 0; 705 } 706 707 // null seen = bit 0 set? 708 static bool was_null_seen(intptr_t v) { 709 return (v & null_seen) != 0; 710 } 711 712 // conflicting type information = bit 1 set? 713 static bool is_type_unknown(intptr_t v) { 714 return (v & type_unknown) != 0; 715 } 716 717 // not type information yet = all bits cleared, ignoring bit 0? 718 static bool is_type_none(intptr_t v) { 719 return (v & type_mask) == 0; 720 } 721 722 // recorded type: cell without bit 0 and 1 723 static intptr_t klass_part(intptr_t v) { 724 intptr_t r = v & type_klass_mask; 725 return r; 726 } 727 728 // type recorded 729 static Klass* valid_klass(intptr_t k) { 730 if (!is_type_none(k) && 731 !is_type_unknown(k)) { 732 Klass* res = (Klass*)klass_part(k); 733 assert(res != nullptr, "invalid"); 734 return res; 735 } else { 736 return nullptr; 737 } 738 } 739 740 static intptr_t with_status(intptr_t k, intptr_t in) { 741 return k | (in & status_bits); 742 } 743 744 static intptr_t with_status(Klass* k, intptr_t in) { 745 return with_status((intptr_t)k, in); 746 } 747 748 static void print_klass(outputStream* st, intptr_t k); 749 750 protected: 751 // ProfileData object these entries are part of 752 ProfileData* _pd; 753 // offset within the ProfileData object where the entries start 754 const int _base_off; 755 756 TypeEntries(int base_off) 757 : _pd(nullptr), _base_off(base_off) {} 758 759 void set_intptr_at(int index, intptr_t value) { 760 _pd->set_intptr_at(index, value); 761 } 762 763 intptr_t intptr_at(int index) const { 764 return _pd->intptr_at(index); 765 } 766 767 public: 768 void set_profile_data(ProfileData* pd) { 769 _pd = pd; 770 } 771 }; 772 773 // Type entries used for arguments passed at a call and parameters on 774 // method entry. 2 cells per entry: one for the type encoded as in 775 // TypeEntries and one initialized with the stack slot where the 776 // profiled object is to be found so that the interpreter can locate 777 // it quickly. 778 class TypeStackSlotEntries : public TypeEntries { 779 780 private: 781 enum { 782 stack_slot_entry, 783 type_entry, 784 per_arg_cell_count 785 }; 786 787 // offset of cell for stack slot for entry i within ProfileData object 788 int stack_slot_offset(int i) const { 789 return _base_off + stack_slot_local_offset(i); 790 } 791 792 const int _number_of_entries; 793 794 // offset of cell for type for entry i within ProfileData object 795 int type_offset_in_cells(int i) const { 796 return _base_off + type_local_offset(i); 797 } 798 799 public: 800 801 TypeStackSlotEntries(int base_off, int nb_entries) 802 : TypeEntries(base_off), _number_of_entries(nb_entries) {} 803 804 static int compute_cell_count(Symbol* signature, bool include_receiver, int max); 805 806 void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver); 807 808 int number_of_entries() const { return _number_of_entries; } 809 810 // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries 811 static int stack_slot_local_offset(int i) { 812 return i * per_arg_cell_count + stack_slot_entry; 813 } 814 815 // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries 816 static int type_local_offset(int i) { 817 return i * per_arg_cell_count + type_entry; 818 } 819 820 // stack slot for entry i 821 uint stack_slot(int i) const { 822 assert(i >= 0 && i < _number_of_entries, "oob"); 823 return _pd->uint_at(stack_slot_offset(i)); 824 } 825 826 // set stack slot for entry i 827 void set_stack_slot(int i, uint num) { 828 assert(i >= 0 && i < _number_of_entries, "oob"); 829 _pd->set_uint_at(stack_slot_offset(i), num); 830 } 831 832 // type for entry i 833 intptr_t type(int i) const { 834 assert(i >= 0 && i < _number_of_entries, "oob"); 835 return _pd->intptr_at(type_offset_in_cells(i)); 836 } 837 838 // set type for entry i 839 void set_type(int i, intptr_t k) { 840 assert(i >= 0 && i < _number_of_entries, "oob"); 841 _pd->set_intptr_at(type_offset_in_cells(i), k); 842 } 843 844 static ByteSize per_arg_size() { 845 return in_ByteSize(per_arg_cell_count * DataLayout::cell_size); 846 } 847 848 static int per_arg_count() { 849 return per_arg_cell_count; 850 } 851 852 ByteSize type_offset(int i) const { 853 return DataLayout::cell_offset(type_offset_in_cells(i)); 854 } 855 856 // GC support 857 void clean_weak_klass_links(bool always_clean); 858 859 void print_data_on(outputStream* st) const; 860 }; 861 862 // Type entry used for return from a call. A single cell to record the 863 // type. 864 class SingleTypeEntry : public TypeEntries { 865 866 private: 867 enum { 868 cell_count = 1 869 }; 870 871 public: 872 SingleTypeEntry(int base_off) 873 : TypeEntries(base_off) {} 874 875 void post_initialize() { 876 set_type(type_none()); 877 } 878 879 intptr_t type() const { 880 return _pd->intptr_at(_base_off); 881 } 882 883 void set_type(intptr_t k) { 884 _pd->set_intptr_at(_base_off, k); 885 } 886 887 static int static_cell_count() { 888 return cell_count; 889 } 890 891 static ByteSize size() { 892 return in_ByteSize(cell_count * DataLayout::cell_size); 893 } 894 895 ByteSize type_offset() { 896 return DataLayout::cell_offset(_base_off); 897 } 898 899 // GC support 900 void clean_weak_klass_links(bool always_clean); 901 902 void print_data_on(outputStream* st) const; 903 }; 904 905 // Entries to collect type information at a call: contains arguments 906 // (TypeStackSlotEntries), a return type (SingleTypeEntry) and a 907 // number of cells. Because the number of cells for the return type is 908 // smaller than the number of cells for the type of an arguments, the 909 // number of cells is used to tell how many arguments are profiled and 910 // whether a return value is profiled. See has_arguments() and 911 // has_return(). 912 class TypeEntriesAtCall { 913 private: 914 static int stack_slot_local_offset(int i) { 915 return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i); 916 } 917 918 static int argument_type_local_offset(int i) { 919 return header_cell_count() + TypeStackSlotEntries::type_local_offset(i); 920 } 921 922 public: 923 924 static int header_cell_count() { 925 return 1; 926 } 927 928 static int cell_count_local_offset() { 929 return 0; 930 } 931 932 static int compute_cell_count(BytecodeStream* stream); 933 934 static void initialize(DataLayout* dl, int base, int cell_count) { 935 int off = base + cell_count_local_offset(); 936 dl->set_cell_at(off, cell_count - base - header_cell_count()); 937 } 938 939 static bool arguments_profiling_enabled(); 940 static bool return_profiling_enabled(); 941 942 // Code generation support 943 static ByteSize cell_count_offset() { 944 return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size); 945 } 946 947 static ByteSize args_data_offset() { 948 return in_ByteSize(header_cell_count() * DataLayout::cell_size); 949 } 950 951 static ByteSize stack_slot_offset(int i) { 952 return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size); 953 } 954 955 static ByteSize argument_type_offset(int i) { 956 return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size); 957 } 958 959 static ByteSize return_only_size() { 960 return SingleTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size); 961 } 962 963 }; 964 965 // CallTypeData 966 // 967 // A CallTypeData is used to access profiling information about a non 968 // virtual call for which we collect type information about arguments 969 // and return value. 970 class CallTypeData : public CounterData { 971 private: 972 // entries for arguments if any 973 TypeStackSlotEntries _args; 974 // entry for return type if any 975 SingleTypeEntry _ret; 976 977 int cell_count_global_offset() const { 978 return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset(); 979 } 980 981 // number of cells not counting the header 982 int cell_count_no_header() const { 983 return uint_at(cell_count_global_offset()); 984 } 985 986 void check_number_of_arguments(int total) { 987 assert(number_of_arguments() == total, "should be set in DataLayout::initialize"); 988 } 989 990 public: 991 CallTypeData(DataLayout* layout) : 992 CounterData(layout), 993 _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()), 994 _ret(cell_count() - SingleTypeEntry::static_cell_count()) 995 { 996 assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type"); 997 // Some compilers (VC++) don't want this passed in member initialization list 998 _args.set_profile_data(this); 999 _ret.set_profile_data(this); 1000 } 1001 1002 const TypeStackSlotEntries* args() const { 1003 assert(has_arguments(), "no profiling of arguments"); 1004 return &_args; 1005 } 1006 1007 const SingleTypeEntry* ret() const { 1008 assert(has_return(), "no profiling of return value"); 1009 return &_ret; 1010 } 1011 1012 virtual bool is_CallTypeData() const { return true; } 1013 1014 static int static_cell_count() { 1015 return -1; 1016 } 1017 1018 static int compute_cell_count(BytecodeStream* stream) { 1019 return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream); 1020 } 1021 1022 static void initialize(DataLayout* dl, int cell_count) { 1023 TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count); 1024 } 1025 1026 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo); 1027 1028 virtual int cell_count() const { 1029 return CounterData::static_cell_count() + 1030 TypeEntriesAtCall::header_cell_count() + 1031 int_at_unchecked(cell_count_global_offset()); 1032 } 1033 1034 int number_of_arguments() const { 1035 return cell_count_no_header() / TypeStackSlotEntries::per_arg_count(); 1036 } 1037 1038 void set_argument_type(int i, Klass* k) { 1039 assert(has_arguments(), "no arguments!"); 1040 intptr_t current = _args.type(i); 1041 _args.set_type(i, TypeEntries::with_status(k, current)); 1042 } 1043 1044 void set_return_type(Klass* k) { 1045 assert(has_return(), "no return!"); 1046 intptr_t current = _ret.type(); 1047 _ret.set_type(TypeEntries::with_status(k, current)); 1048 } 1049 1050 // An entry for a return value takes less space than an entry for an 1051 // argument so if the number of cells exceeds the number of cells 1052 // needed for an argument, this object contains type information for 1053 // at least one argument. 1054 bool has_arguments() const { 1055 bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count(); 1056 assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments"); 1057 return res; 1058 } 1059 1060 // An entry for a return value takes less space than an entry for an 1061 // argument, so if the remainder of the number of cells divided by 1062 // the number of cells for an argument is not null, a return value 1063 // is profiled in this object. 1064 bool has_return() const { 1065 bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0; 1066 assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values"); 1067 return res; 1068 } 1069 1070 // Code generation support 1071 static ByteSize args_data_offset() { 1072 return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset(); 1073 } 1074 1075 ByteSize argument_type_offset(int i) { 1076 return _args.type_offset(i); 1077 } 1078 1079 ByteSize return_type_offset() { 1080 return _ret.type_offset(); 1081 } 1082 1083 // GC support 1084 virtual void clean_weak_klass_links(bool always_clean) { 1085 if (has_arguments()) { 1086 _args.clean_weak_klass_links(always_clean); 1087 } 1088 if (has_return()) { 1089 _ret.clean_weak_klass_links(always_clean); 1090 } 1091 } 1092 1093 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; 1094 }; 1095 1096 // ReceiverTypeData 1097 // 1098 // A ReceiverTypeData is used to access profiling information about a 1099 // dynamic type check. It consists of a series of (Klass*, count) 1100 // pairs which are used to store a type profile for the receiver of 1101 // the check, the associated count is incremented every time the type 1102 // is seen. A per ReceiverTypeData counter is incremented on type 1103 // overflow (when there's no more room for a not yet profiled Klass*). 1104 // 1105 class ReceiverTypeData : public CounterData { 1106 friend class VMStructs; 1107 friend class JVMCIVMStructs; 1108 protected: 1109 enum { 1110 receiver0_offset = counter_cell_count, 1111 count0_offset, 1112 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset 1113 }; 1114 1115 public: 1116 ReceiverTypeData(DataLayout* layout) : CounterData(layout) { 1117 assert(layout->tag() == DataLayout::receiver_type_data_tag || 1118 layout->tag() == DataLayout::virtual_call_data_tag || 1119 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1120 } 1121 1122 virtual bool is_ReceiverTypeData() const { return true; } 1123 1124 static int static_cell_count() { 1125 return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count; 1126 } 1127 1128 virtual int cell_count() const { 1129 return static_cell_count(); 1130 } 1131 1132 // Direct accessors 1133 static uint row_limit() { 1134 return (uint) TypeProfileWidth; 1135 } 1136 static int receiver_cell_index(uint row) { 1137 return receiver0_offset + row * receiver_type_row_cell_count; 1138 } 1139 static int receiver_count_cell_index(uint row) { 1140 return count0_offset + row * receiver_type_row_cell_count; 1141 } 1142 1143 Klass* receiver(uint row) const { 1144 assert(row < row_limit(), "oob"); 1145 1146 Klass* recv = (Klass*)intptr_at(receiver_cell_index(row)); 1147 assert(recv == nullptr || recv->is_klass(), "wrong type"); 1148 return recv; 1149 } 1150 1151 void set_receiver(uint row, Klass* k) { 1152 assert((uint)row < row_limit(), "oob"); 1153 set_intptr_at(receiver_cell_index(row), (uintptr_t)k); 1154 } 1155 1156 uint receiver_count(uint row) const { 1157 assert(row < row_limit(), "oob"); 1158 return uint_at(receiver_count_cell_index(row)); 1159 } 1160 1161 void set_receiver_count(uint row, uint count) { 1162 assert(row < row_limit(), "oob"); 1163 set_uint_at(receiver_count_cell_index(row), count); 1164 } 1165 1166 void clear_row(uint row) { 1167 assert(row < row_limit(), "oob"); 1168 // Clear total count - indicator of polymorphic call site. 1169 // The site may look like as monomorphic after that but 1170 // it allow to have more accurate profiling information because 1171 // there was execution phase change since klasses were unloaded. 1172 // If the site is still polymorphic then MDO will be updated 1173 // to reflect it. But it could be the case that the site becomes 1174 // only bimorphic. Then keeping total count not 0 will be wrong. 1175 // Even if we use monomorphic (when it is not) for compilation 1176 // we will only have trap, deoptimization and recompile again 1177 // with updated MDO after executing method in Interpreter. 1178 // An additional receiver will be recorded in the cleaned row 1179 // during next call execution. 1180 // 1181 // Note: our profiling logic works with empty rows in any slot. 1182 // We do sorting a profiling info (ciCallProfile) for compilation. 1183 // 1184 set_count(0); 1185 set_receiver(row, nullptr); 1186 set_receiver_count(row, 0); 1187 } 1188 1189 // Code generation support 1190 static ByteSize receiver_offset(uint row) { 1191 return cell_offset(receiver_cell_index(row)); 1192 } 1193 static ByteSize receiver_count_offset(uint row) { 1194 return cell_offset(receiver_count_cell_index(row)); 1195 } 1196 static ByteSize receiver_type_data_size() { 1197 return cell_offset(static_cell_count()); 1198 } 1199 1200 // GC support 1201 virtual void clean_weak_klass_links(bool always_clean); 1202 1203 void print_receiver_data_on(outputStream* st) const; 1204 void print_data_on(outputStream* st, const char* extra = nullptr) const; 1205 }; 1206 1207 // VirtualCallData 1208 // 1209 // A VirtualCallData is used to access profiling information about a 1210 // virtual call. For now, it has nothing more than a ReceiverTypeData. 1211 class VirtualCallData : public ReceiverTypeData { 1212 public: 1213 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) { 1214 assert(layout->tag() == DataLayout::virtual_call_data_tag || 1215 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1216 } 1217 1218 virtual bool is_VirtualCallData() const { return true; } 1219 1220 static int static_cell_count() { 1221 // At this point we could add more profile state, e.g., for arguments. 1222 // But for now it's the same size as the base record type. 1223 return ReceiverTypeData::static_cell_count(); 1224 } 1225 1226 virtual int cell_count() const { 1227 return static_cell_count(); 1228 } 1229 1230 // Direct accessors 1231 static ByteSize virtual_call_data_size() { 1232 return cell_offset(static_cell_count()); 1233 } 1234 1235 void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN; 1236 void print_data_on(outputStream* st, const char* extra = nullptr) const; 1237 }; 1238 1239 // VirtualCallTypeData 1240 // 1241 // A VirtualCallTypeData is used to access profiling information about 1242 // a virtual call for which we collect type information about 1243 // arguments and return value. 1244 class VirtualCallTypeData : public VirtualCallData { 1245 private: 1246 // entries for arguments if any 1247 TypeStackSlotEntries _args; 1248 // entry for return type if any 1249 SingleTypeEntry _ret; 1250 1251 int cell_count_global_offset() const { 1252 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset(); 1253 } 1254 1255 // number of cells not counting the header 1256 int cell_count_no_header() const { 1257 return uint_at(cell_count_global_offset()); 1258 } 1259 1260 void check_number_of_arguments(int total) { 1261 assert(number_of_arguments() == total, "should be set in DataLayout::initialize"); 1262 } 1263 1264 public: 1265 VirtualCallTypeData(DataLayout* layout) : 1266 VirtualCallData(layout), 1267 _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()), 1268 _ret(cell_count() - SingleTypeEntry::static_cell_count()) 1269 { 1270 assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type"); 1271 // Some compilers (VC++) don't want this passed in member initialization list 1272 _args.set_profile_data(this); 1273 _ret.set_profile_data(this); 1274 } 1275 1276 const TypeStackSlotEntries* args() const { 1277 assert(has_arguments(), "no profiling of arguments"); 1278 return &_args; 1279 } 1280 1281 const SingleTypeEntry* ret() const { 1282 assert(has_return(), "no profiling of return value"); 1283 return &_ret; 1284 } 1285 1286 virtual bool is_VirtualCallTypeData() const { return true; } 1287 1288 static int static_cell_count() { 1289 return -1; 1290 } 1291 1292 static int compute_cell_count(BytecodeStream* stream) { 1293 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream); 1294 } 1295 1296 static void initialize(DataLayout* dl, int cell_count) { 1297 TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count); 1298 } 1299 1300 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo); 1301 1302 virtual int cell_count() const { 1303 return VirtualCallData::static_cell_count() + 1304 TypeEntriesAtCall::header_cell_count() + 1305 int_at_unchecked(cell_count_global_offset()); 1306 } 1307 1308 int number_of_arguments() const { 1309 return cell_count_no_header() / TypeStackSlotEntries::per_arg_count(); 1310 } 1311 1312 void set_argument_type(int i, Klass* k) { 1313 assert(has_arguments(), "no arguments!"); 1314 intptr_t current = _args.type(i); 1315 _args.set_type(i, TypeEntries::with_status(k, current)); 1316 } 1317 1318 void set_return_type(Klass* k) { 1319 assert(has_return(), "no return!"); 1320 intptr_t current = _ret.type(); 1321 _ret.set_type(TypeEntries::with_status(k, current)); 1322 } 1323 1324 // An entry for a return value takes less space than an entry for an 1325 // argument, so if the remainder of the number of cells divided by 1326 // the number of cells for an argument is not null, a return value 1327 // is profiled in this object. 1328 bool has_return() const { 1329 bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0; 1330 assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values"); 1331 return res; 1332 } 1333 1334 // An entry for a return value takes less space than an entry for an 1335 // argument so if the number of cells exceeds the number of cells 1336 // needed for an argument, this object contains type information for 1337 // at least one argument. 1338 bool has_arguments() const { 1339 bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count(); 1340 assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments"); 1341 return res; 1342 } 1343 1344 // Code generation support 1345 static ByteSize args_data_offset() { 1346 return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset(); 1347 } 1348 1349 ByteSize argument_type_offset(int i) { 1350 return _args.type_offset(i); 1351 } 1352 1353 ByteSize return_type_offset() { 1354 return _ret.type_offset(); 1355 } 1356 1357 // GC support 1358 virtual void clean_weak_klass_links(bool always_clean) { 1359 ReceiverTypeData::clean_weak_klass_links(always_clean); 1360 if (has_arguments()) { 1361 _args.clean_weak_klass_links(always_clean); 1362 } 1363 if (has_return()) { 1364 _ret.clean_weak_klass_links(always_clean); 1365 } 1366 } 1367 1368 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; 1369 }; 1370 1371 // RetData 1372 // 1373 // A RetData is used to access profiling information for a ret bytecode. 1374 // It is composed of a count of the number of times that the ret has 1375 // been executed, followed by a series of triples of the form 1376 // (bci, count, di) which count the number of times that some bci was the 1377 // target of the ret and cache a corresponding data displacement. 1378 class RetData : public CounterData { 1379 protected: 1380 enum { 1381 bci0_offset = counter_cell_count, 1382 count0_offset, 1383 displacement0_offset, 1384 ret_row_cell_count = (displacement0_offset + 1) - bci0_offset 1385 }; 1386 1387 void set_bci(uint row, int bci) { 1388 assert((uint)row < row_limit(), "oob"); 1389 set_int_at(bci0_offset + row * ret_row_cell_count, bci); 1390 } 1391 void release_set_bci(uint row, int bci); 1392 void set_bci_count(uint row, uint count) { 1393 assert((uint)row < row_limit(), "oob"); 1394 set_uint_at(count0_offset + row * ret_row_cell_count, count); 1395 } 1396 void set_bci_displacement(uint row, int disp) { 1397 set_int_at(displacement0_offset + row * ret_row_cell_count, disp); 1398 } 1399 1400 public: 1401 RetData(DataLayout* layout) : CounterData(layout) { 1402 assert(layout->tag() == DataLayout::ret_data_tag, "wrong type"); 1403 } 1404 1405 virtual bool is_RetData() const { return true; } 1406 1407 enum { 1408 no_bci = -1 // value of bci when bci1/2 are not in use. 1409 }; 1410 1411 static int static_cell_count() { 1412 return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count; 1413 } 1414 1415 virtual int cell_count() const { 1416 return static_cell_count(); 1417 } 1418 1419 static uint row_limit() { 1420 return (uint) BciProfileWidth; 1421 } 1422 static int bci_cell_index(uint row) { 1423 return bci0_offset + row * ret_row_cell_count; 1424 } 1425 static int bci_count_cell_index(uint row) { 1426 return count0_offset + row * ret_row_cell_count; 1427 } 1428 static int bci_displacement_cell_index(uint row) { 1429 return displacement0_offset + row * ret_row_cell_count; 1430 } 1431 1432 // Direct accessors 1433 int bci(uint row) const { 1434 return int_at(bci_cell_index(row)); 1435 } 1436 uint bci_count(uint row) const { 1437 return uint_at(bci_count_cell_index(row)); 1438 } 1439 int bci_displacement(uint row) const { 1440 return int_at(bci_displacement_cell_index(row)); 1441 } 1442 1443 // Interpreter Runtime support 1444 address fixup_ret(int return_bci, MethodData* mdo); 1445 1446 // Code generation support 1447 static ByteSize bci_offset(uint row) { 1448 return cell_offset(bci_cell_index(row)); 1449 } 1450 static ByteSize bci_count_offset(uint row) { 1451 return cell_offset(bci_count_cell_index(row)); 1452 } 1453 static ByteSize bci_displacement_offset(uint row) { 1454 return cell_offset(bci_displacement_cell_index(row)); 1455 } 1456 1457 // Specific initialization. 1458 void post_initialize(BytecodeStream* stream, MethodData* mdo); 1459 1460 void print_data_on(outputStream* st, const char* extra = nullptr) const; 1461 }; 1462 1463 // BranchData 1464 // 1465 // A BranchData is used to access profiling data for a two-way branch. 1466 // It consists of taken and not_taken counts as well as a data displacement 1467 // for the taken case. 1468 class BranchData : public JumpData { 1469 friend class VMStructs; 1470 friend class JVMCIVMStructs; 1471 protected: 1472 enum { 1473 not_taken_off_set = jump_cell_count, 1474 branch_cell_count 1475 }; 1476 1477 void set_displacement(int displacement) { 1478 set_int_at(displacement_off_set, displacement); 1479 } 1480 1481 public: 1482 BranchData(DataLayout* layout) : JumpData(layout) { 1483 assert(layout->tag() == DataLayout::branch_data_tag || layout->tag() == DataLayout::acmp_data_tag, "wrong type"); 1484 } 1485 1486 virtual bool is_BranchData() const { return true; } 1487 1488 static int static_cell_count() { 1489 return branch_cell_count; 1490 } 1491 1492 virtual int cell_count() const { 1493 return static_cell_count(); 1494 } 1495 1496 // Direct accessor 1497 uint not_taken() const { 1498 return uint_at(not_taken_off_set); 1499 } 1500 1501 void set_not_taken(uint cnt) { 1502 set_uint_at(not_taken_off_set, cnt); 1503 } 1504 1505 uint inc_not_taken() { 1506 uint cnt = not_taken() + 1; 1507 // Did we wrap? Will compiler screw us?? 1508 if (cnt == 0) cnt--; 1509 set_uint_at(not_taken_off_set, cnt); 1510 return cnt; 1511 } 1512 1513 // Code generation support 1514 static ByteSize not_taken_offset() { 1515 return cell_offset(not_taken_off_set); 1516 } 1517 static ByteSize branch_data_size() { 1518 return cell_offset(branch_cell_count); 1519 } 1520 1521 // Specific initialization. 1522 void post_initialize(BytecodeStream* stream, MethodData* mdo); 1523 1524 void print_data_on(outputStream* st, const char* extra = nullptr) const; 1525 }; 1526 1527 // ArrayData 1528 // 1529 // A ArrayData is a base class for accessing profiling data which does 1530 // not have a statically known size. It consists of an array length 1531 // and an array start. 1532 class ArrayData : public ProfileData { 1533 friend class VMStructs; 1534 friend class JVMCIVMStructs; 1535 protected: 1536 friend class DataLayout; 1537 1538 enum { 1539 array_len_off_set, 1540 array_start_off_set 1541 }; 1542 1543 uint array_uint_at(int index) const { 1544 int aindex = index + array_start_off_set; 1545 return uint_at(aindex); 1546 } 1547 int array_int_at(int index) const { 1548 int aindex = index + array_start_off_set; 1549 return int_at(aindex); 1550 } 1551 oop array_oop_at(int index) const { 1552 int aindex = index + array_start_off_set; 1553 return oop_at(aindex); 1554 } 1555 void array_set_int_at(int index, int value) { 1556 int aindex = index + array_start_off_set; 1557 set_int_at(aindex, value); 1558 } 1559 1560 // Code generation support for subclasses. 1561 static ByteSize array_element_offset(int index) { 1562 return cell_offset(array_start_off_set + index); 1563 } 1564 1565 public: 1566 ArrayData(DataLayout* layout) : ProfileData(layout) {} 1567 1568 virtual bool is_ArrayData() const { return true; } 1569 1570 static int static_cell_count() { 1571 return -1; 1572 } 1573 1574 int array_len() const { 1575 return int_at_unchecked(array_len_off_set); 1576 } 1577 1578 virtual int cell_count() const { 1579 return array_len() + 1; 1580 } 1581 1582 // Code generation support 1583 static ByteSize array_len_offset() { 1584 return cell_offset(array_len_off_set); 1585 } 1586 static ByteSize array_start_offset() { 1587 return cell_offset(array_start_off_set); 1588 } 1589 }; 1590 1591 // MultiBranchData 1592 // 1593 // A MultiBranchData is used to access profiling information for 1594 // a multi-way branch (*switch bytecodes). It consists of a series 1595 // of (count, displacement) pairs, which count the number of times each 1596 // case was taken and specify the data displacement for each branch target. 1597 class MultiBranchData : public ArrayData { 1598 friend class VMStructs; 1599 friend class JVMCIVMStructs; 1600 protected: 1601 enum { 1602 default_count_off_set, 1603 default_disaplacement_off_set, 1604 case_array_start 1605 }; 1606 enum { 1607 relative_count_off_set, 1608 relative_displacement_off_set, 1609 per_case_cell_count 1610 }; 1611 1612 void set_default_displacement(int displacement) { 1613 array_set_int_at(default_disaplacement_off_set, displacement); 1614 } 1615 void set_displacement_at(int index, int displacement) { 1616 array_set_int_at(case_array_start + 1617 index * per_case_cell_count + 1618 relative_displacement_off_set, 1619 displacement); 1620 } 1621 1622 public: 1623 MultiBranchData(DataLayout* layout) : ArrayData(layout) { 1624 assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type"); 1625 } 1626 1627 virtual bool is_MultiBranchData() const { return true; } 1628 1629 static int compute_cell_count(BytecodeStream* stream); 1630 1631 int number_of_cases() const { 1632 int alen = array_len() - 2; // get rid of default case here. 1633 assert(alen % per_case_cell_count == 0, "must be even"); 1634 return (alen / per_case_cell_count); 1635 } 1636 1637 uint default_count() const { 1638 return array_uint_at(default_count_off_set); 1639 } 1640 int default_displacement() const { 1641 return array_int_at(default_disaplacement_off_set); 1642 } 1643 1644 uint count_at(int index) const { 1645 return array_uint_at(case_array_start + 1646 index * per_case_cell_count + 1647 relative_count_off_set); 1648 } 1649 int displacement_at(int index) const { 1650 return array_int_at(case_array_start + 1651 index * per_case_cell_count + 1652 relative_displacement_off_set); 1653 } 1654 1655 // Code generation support 1656 static ByteSize default_count_offset() { 1657 return array_element_offset(default_count_off_set); 1658 } 1659 static ByteSize default_displacement_offset() { 1660 return array_element_offset(default_disaplacement_off_set); 1661 } 1662 static ByteSize case_count_offset(int index) { 1663 return case_array_offset() + 1664 (per_case_size() * index) + 1665 relative_count_offset(); 1666 } 1667 static ByteSize case_array_offset() { 1668 return array_element_offset(case_array_start); 1669 } 1670 static ByteSize per_case_size() { 1671 return in_ByteSize(per_case_cell_count) * cell_size; 1672 } 1673 static ByteSize relative_count_offset() { 1674 return in_ByteSize(relative_count_off_set) * cell_size; 1675 } 1676 static ByteSize relative_displacement_offset() { 1677 return in_ByteSize(relative_displacement_off_set) * cell_size; 1678 } 1679 1680 // Specific initialization. 1681 void post_initialize(BytecodeStream* stream, MethodData* mdo); 1682 1683 void print_data_on(outputStream* st, const char* extra = nullptr) const; 1684 }; 1685 1686 class ArgInfoData : public ArrayData { 1687 1688 public: 1689 ArgInfoData(DataLayout* layout) : ArrayData(layout) { 1690 assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type"); 1691 } 1692 1693 virtual bool is_ArgInfoData() const { return true; } 1694 1695 1696 int number_of_args() const { 1697 return array_len(); 1698 } 1699 1700 uint arg_modified(int arg) const { 1701 return array_uint_at(arg); 1702 } 1703 1704 void set_arg_modified(int arg, uint val) { 1705 array_set_int_at(arg, val); 1706 } 1707 1708 void print_data_on(outputStream* st, const char* extra = nullptr) const; 1709 }; 1710 1711 // ParametersTypeData 1712 // 1713 // A ParametersTypeData is used to access profiling information about 1714 // types of parameters to a method 1715 class ParametersTypeData : public ArrayData { 1716 1717 private: 1718 TypeStackSlotEntries _parameters; 1719 1720 static int stack_slot_local_offset(int i) { 1721 assert_profiling_enabled(); 1722 return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i); 1723 } 1724 1725 static int type_local_offset(int i) { 1726 assert_profiling_enabled(); 1727 return array_start_off_set + TypeStackSlotEntries::type_local_offset(i); 1728 } 1729 1730 static bool profiling_enabled(); 1731 static void assert_profiling_enabled() { 1732 assert(profiling_enabled(), "method parameters profiling should be on"); 1733 } 1734 1735 public: 1736 ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) { 1737 assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type"); 1738 // Some compilers (VC++) don't want this passed in member initialization list 1739 _parameters.set_profile_data(this); 1740 } 1741 1742 static int compute_cell_count(Method* m); 1743 1744 virtual bool is_ParametersTypeData() const { return true; } 1745 1746 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo); 1747 1748 int number_of_parameters() const { 1749 return array_len() / TypeStackSlotEntries::per_arg_count(); 1750 } 1751 1752 const TypeStackSlotEntries* parameters() const { return &_parameters; } 1753 1754 uint stack_slot(int i) const { 1755 return _parameters.stack_slot(i); 1756 } 1757 1758 void set_type(int i, Klass* k) { 1759 intptr_t current = _parameters.type(i); 1760 _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current)); 1761 } 1762 1763 virtual void clean_weak_klass_links(bool always_clean) { 1764 _parameters.clean_weak_klass_links(always_clean); 1765 } 1766 1767 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; 1768 1769 static ByteSize stack_slot_offset(int i) { 1770 return cell_offset(stack_slot_local_offset(i)); 1771 } 1772 1773 static ByteSize type_offset(int i) { 1774 return cell_offset(type_local_offset(i)); 1775 } 1776 }; 1777 1778 // SpeculativeTrapData 1779 // 1780 // A SpeculativeTrapData is used to record traps due to type 1781 // speculation. It records the root of the compilation: that type 1782 // speculation is wrong in the context of one compilation (for 1783 // method1) doesn't mean it's wrong in the context of another one (for 1784 // method2). Type speculation could have more/different data in the 1785 // context of the compilation of method2 and it's worthwhile to try an 1786 // optimization that failed for compilation of method1 in the context 1787 // of compilation of method2. 1788 // Space for SpeculativeTrapData entries is allocated from the extra 1789 // data space in the MDO. If we run out of space, the trap data for 1790 // the ProfileData at that bci is updated. 1791 class SpeculativeTrapData : public ProfileData { 1792 protected: 1793 enum { 1794 speculative_trap_method, 1795 #ifndef _LP64 1796 // The size of the area for traps is a multiple of the header 1797 // size, 2 cells on 32 bits. Packed at the end of this area are 1798 // argument info entries (with tag 1799 // DataLayout::arg_info_data_tag). The logic in 1800 // MethodData::bci_to_extra_data() that guarantees traps don't 1801 // overflow over argument info entries assumes the size of a 1802 // SpeculativeTrapData is twice the header size. On 32 bits, a 1803 // SpeculativeTrapData must be 4 cells. 1804 padding, 1805 #endif 1806 speculative_trap_cell_count 1807 }; 1808 public: 1809 SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) { 1810 assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type"); 1811 } 1812 1813 virtual bool is_SpeculativeTrapData() const { return true; } 1814 1815 static int static_cell_count() { 1816 return speculative_trap_cell_count; 1817 } 1818 1819 virtual int cell_count() const { 1820 return static_cell_count(); 1821 } 1822 1823 // Direct accessor 1824 Method* method() const { 1825 return (Method*)intptr_at(speculative_trap_method); 1826 } 1827 1828 void set_method(Method* m) { 1829 assert(!m->is_old(), "cannot add old methods"); 1830 set_intptr_at(speculative_trap_method, (intptr_t)m); 1831 } 1832 1833 static ByteSize method_offset() { 1834 return cell_offset(speculative_trap_method); 1835 } 1836 1837 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; 1838 }; 1839 1840 class ArrayLoadStoreData : public ProfileData { 1841 private: 1842 enum { 1843 flat_array_flag = DataLayout::first_flag, 1844 null_free_array_flag = flat_array_flag + 1, 1845 }; 1846 1847 SingleTypeEntry _array; 1848 SingleTypeEntry _element; 1849 1850 public: 1851 ArrayLoadStoreData(DataLayout* layout) : 1852 ProfileData(layout), 1853 _array(0), 1854 _element(SingleTypeEntry::static_cell_count()) { 1855 assert(layout->tag() == DataLayout::array_load_store_data_tag, "wrong type"); 1856 _array.set_profile_data(this); 1857 _element.set_profile_data(this); 1858 } 1859 1860 const SingleTypeEntry* array() const { 1861 return &_array; 1862 } 1863 1864 const SingleTypeEntry* element() const { 1865 return &_element; 1866 } 1867 1868 virtual bool is_ArrayLoadStoreData() const { return true; } 1869 1870 static int static_cell_count() { 1871 return SingleTypeEntry::static_cell_count() * 2; 1872 } 1873 1874 virtual int cell_count() const { 1875 return static_cell_count(); 1876 } 1877 1878 void set_flat_array() { set_flag_at(flat_array_flag); } 1879 bool flat_array() const { return flag_at(flat_array_flag); } 1880 1881 void set_null_free_array() { set_flag_at(null_free_array_flag); } 1882 bool null_free_array() const { return flag_at(null_free_array_flag); } 1883 1884 // Code generation support 1885 static int flat_array_byte_constant() { 1886 return flag_number_to_constant(flat_array_flag); 1887 } 1888 1889 static int null_free_array_byte_constant() { 1890 return flag_number_to_constant(null_free_array_flag); 1891 } 1892 1893 static ByteSize array_offset() { 1894 return cell_offset(0); 1895 } 1896 1897 static ByteSize element_offset() { 1898 return cell_offset(SingleTypeEntry::static_cell_count()); 1899 } 1900 1901 virtual void clean_weak_klass_links(bool always_clean) { 1902 _array.clean_weak_klass_links(always_clean); 1903 _element.clean_weak_klass_links(always_clean); 1904 } 1905 1906 static ByteSize array_load_store_data_size() { 1907 return cell_offset(static_cell_count()); 1908 } 1909 1910 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; 1911 }; 1912 1913 class ACmpData : public BranchData { 1914 private: 1915 enum { 1916 left_inline_type_flag = DataLayout::first_flag, 1917 right_inline_type_flag 1918 }; 1919 1920 SingleTypeEntry _left; 1921 SingleTypeEntry _right; 1922 1923 public: 1924 ACmpData(DataLayout* layout) : 1925 BranchData(layout), 1926 _left(BranchData::static_cell_count()), 1927 _right(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count()) { 1928 assert(layout->tag() == DataLayout::acmp_data_tag, "wrong type"); 1929 _left.set_profile_data(this); 1930 _right.set_profile_data(this); 1931 } 1932 1933 const SingleTypeEntry* left() const { 1934 return &_left; 1935 } 1936 1937 const SingleTypeEntry* right() const { 1938 return &_right; 1939 } 1940 1941 virtual bool is_ACmpData() const { return true; } 1942 1943 static int static_cell_count() { 1944 return BranchData::static_cell_count() + SingleTypeEntry::static_cell_count() * 2; 1945 } 1946 1947 virtual int cell_count() const { 1948 return static_cell_count(); 1949 } 1950 1951 void set_left_inline_type() { set_flag_at(left_inline_type_flag); } 1952 bool left_inline_type() const { return flag_at(left_inline_type_flag); } 1953 1954 void set_right_inline_type() { set_flag_at(right_inline_type_flag); } 1955 bool right_inline_type() const { return flag_at(right_inline_type_flag); } 1956 1957 // Code generation support 1958 static int left_inline_type_byte_constant() { 1959 return flag_number_to_constant(left_inline_type_flag); 1960 } 1961 1962 static int right_inline_type_byte_constant() { 1963 return flag_number_to_constant(right_inline_type_flag); 1964 } 1965 1966 static ByteSize left_offset() { 1967 return cell_offset(BranchData::static_cell_count()); 1968 } 1969 1970 static ByteSize right_offset() { 1971 return cell_offset(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count()); 1972 } 1973 1974 virtual void clean_weak_klass_links(bool always_clean) { 1975 _left.clean_weak_klass_links(always_clean); 1976 _right.clean_weak_klass_links(always_clean); 1977 } 1978 1979 static ByteSize acmp_data_size() { 1980 return cell_offset(static_cell_count()); 1981 } 1982 1983 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const; 1984 }; 1985 1986 // MethodData* 1987 // 1988 // A MethodData* holds information which has been collected about 1989 // a method. Its layout looks like this: 1990 // 1991 // ----------------------------- 1992 // | header | 1993 // | klass | 1994 // ----------------------------- 1995 // | method | 1996 // | size of the MethodData* | 1997 // ----------------------------- 1998 // | Data entries... | 1999 // | (variable size) | 2000 // | | 2001 // . . 2002 // . . 2003 // . . 2004 // | | 2005 // ----------------------------- 2006 // 2007 // The data entry area is a heterogeneous array of DataLayouts. Each 2008 // DataLayout in the array corresponds to a specific bytecode in the 2009 // method. The entries in the array are sorted by the corresponding 2010 // bytecode. Access to the data is via resource-allocated ProfileData, 2011 // which point to the underlying blocks of DataLayout structures. 2012 // 2013 // During interpretation, if profiling in enabled, the interpreter 2014 // maintains a method data pointer (mdp), which points at the entry 2015 // in the array corresponding to the current bci. In the course of 2016 // interpretation, when a bytecode is encountered that has profile data 2017 // associated with it, the entry pointed to by mdp is updated, then the 2018 // mdp is adjusted to point to the next appropriate DataLayout. If mdp 2019 // is null to begin with, the interpreter assumes that the current method 2020 // is not (yet) being profiled. 2021 // 2022 // In MethodData* parlance, "dp" is a "data pointer", the actual address 2023 // of a DataLayout element. A "di" is a "data index", the offset in bytes 2024 // from the base of the data entry array. A "displacement" is the byte offset 2025 // in certain ProfileData objects that indicate the amount the mdp must be 2026 // adjusted in the event of a change in control flow. 2027 // 2028 2029 class CleanExtraDataClosure : public StackObj { 2030 public: 2031 virtual bool is_live(Method* m) = 0; 2032 }; 2033 2034 2035 #if INCLUDE_JVMCI 2036 // Encapsulates an encoded speculation reason. These are linked together in 2037 // a list that is atomically appended to during deoptimization. Entries are 2038 // never removed from the list. 2039 // @see jdk.vm.ci.hotspot.HotSpotSpeculationLog.HotSpotSpeculationEncoding 2040 class FailedSpeculation: public CHeapObj<mtCompiler> { 2041 private: 2042 // The length of HotSpotSpeculationEncoding.toByteArray(). The data itself 2043 // is an array embedded at the end of this object. 2044 int _data_len; 2045 2046 // Next entry in a linked list. 2047 FailedSpeculation* _next; 2048 2049 FailedSpeculation(address data, int data_len); 2050 2051 FailedSpeculation** next_adr() { return &_next; } 2052 2053 // Placement new operator for inlining the speculation data into 2054 // the FailedSpeculation object. 2055 void* operator new(size_t size, size_t fs_size) throw(); 2056 2057 public: 2058 char* data() { return (char*)(((address) this) + sizeof(FailedSpeculation)); } 2059 int data_len() const { return _data_len; } 2060 FailedSpeculation* next() const { return _next; } 2061 2062 // Atomically appends a speculation from nm to the list whose head is at (*failed_speculations_address). 2063 // Returns false if the FailedSpeculation object could not be allocated. 2064 static bool add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len); 2065 2066 // Frees all entries in the linked list whose head is at (*failed_speculations_address). 2067 static void free_failed_speculations(FailedSpeculation** failed_speculations_address); 2068 }; 2069 #endif 2070 2071 class ciMethodData; 2072 2073 class MethodData : public Metadata { 2074 friend class VMStructs; 2075 friend class JVMCIVMStructs; 2076 private: 2077 friend class ProfileData; 2078 friend class TypeEntriesAtCall; 2079 friend class ciMethodData; 2080 2081 // If you add a new field that points to any metaspace object, you 2082 // must add this field to MethodData::metaspace_pointers_do(). 2083 2084 // Back pointer to the Method* 2085 Method* _method; 2086 2087 // Size of this oop in bytes 2088 int _size; 2089 2090 // Cached hint for bci_to_dp and bci_to_data 2091 int _hint_di; 2092 2093 Mutex _extra_data_lock; 2094 2095 MethodData(const methodHandle& method); 2096 public: 2097 static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS); 2098 2099 virtual bool is_methodData() const { return true; } 2100 void initialize(); 2101 2102 // Whole-method sticky bits and flags 2103 enum { 2104 _trap_hist_limit = Deoptimization::Reason_TRAP_HISTORY_LENGTH, 2105 _trap_hist_mask = max_jubyte, 2106 _extra_data_count = 4 // extra DataLayout headers, for trap history 2107 }; // Public flag values 2108 2109 // Compiler-related counters. 2110 class CompilerCounters { 2111 friend class VMStructs; 2112 friend class JVMCIVMStructs; 2113 2114 uint _nof_decompiles; // count of all nmethod removals 2115 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits 2116 uint _nof_overflow_traps; // trap count, excluding _trap_hist 2117 union { 2118 intptr_t _align; 2119 // JVMCI separates trap history for OSR compilations from normal compilations 2120 u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit]; 2121 } _trap_hist; 2122 2123 public: 2124 CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0) { 2125 #ifndef ZERO 2126 // Some Zero platforms do not have expected alignment, and do not use 2127 // this code. static_assert would still fire and fail for them. 2128 static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align"); 2129 #endif 2130 uint size_in_words = sizeof(_trap_hist) / HeapWordSize; 2131 Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words); 2132 } 2133 2134 // Return (uint)-1 for overflow. 2135 uint trap_count(int reason) const { 2136 assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob"); 2137 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1; 2138 } 2139 2140 uint inc_trap_count(int reason) { 2141 // Count another trap, anywhere in this method. 2142 assert(reason >= 0, "must be single trap"); 2143 assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob"); 2144 uint cnt1 = 1 + _trap_hist._array[reason]; 2145 if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow... 2146 _trap_hist._array[reason] = (u1)cnt1; 2147 return cnt1; 2148 } else { 2149 return _trap_hist_mask + (++_nof_overflow_traps); 2150 } 2151 } 2152 2153 uint overflow_trap_count() const { 2154 return _nof_overflow_traps; 2155 } 2156 uint overflow_recompile_count() const { 2157 return _nof_overflow_recompiles; 2158 } 2159 uint inc_overflow_recompile_count() { 2160 return ++_nof_overflow_recompiles; 2161 } 2162 uint decompile_count() const { 2163 return _nof_decompiles; 2164 } 2165 uint inc_decompile_count() { 2166 return ++_nof_decompiles; 2167 } 2168 2169 // Support for code generation 2170 static ByteSize trap_history_offset() { 2171 return byte_offset_of(CompilerCounters, _trap_hist._array); 2172 } 2173 }; 2174 2175 private: 2176 CompilerCounters _compiler_counters; 2177 2178 // Support for interprocedural escape analysis, from Thomas Kotzmann. 2179 intx _eflags; // flags on escape information 2180 intx _arg_local; // bit set of non-escaping arguments 2181 intx _arg_stack; // bit set of stack-allocatable arguments 2182 intx _arg_returned; // bit set of returned arguments 2183 2184 int _creation_mileage; // method mileage at MDO creation 2185 2186 // How many invocations has this MDO seen? 2187 // These counters are used to determine the exact age of MDO. 2188 // We need those because in tiered a method can be concurrently 2189 // executed at different levels. 2190 InvocationCounter _invocation_counter; 2191 // Same for backedges. 2192 InvocationCounter _backedge_counter; 2193 // Counter values at the time profiling started. 2194 int _invocation_counter_start; 2195 int _backedge_counter_start; 2196 uint _tenure_traps; 2197 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog 2198 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog 2199 2200 #if INCLUDE_RTM_OPT 2201 // State of RTM code generation during compilation of the method 2202 int _rtm_state; 2203 #endif 2204 2205 // Number of loops and blocks is computed when compiling the first 2206 // time with C1. It is used to determine if method is trivial. 2207 short _num_loops; 2208 short _num_blocks; 2209 // Does this method contain anything worth profiling? 2210 enum WouldProfile {unknown, no_profile, profile}; 2211 WouldProfile _would_profile; 2212 2213 #if INCLUDE_JVMCI 2214 // Support for HotSpotMethodData.setCompiledIRSize(int) 2215 int _jvmci_ir_size; 2216 FailedSpeculation* _failed_speculations; 2217 #endif 2218 2219 // Size of _data array in bytes. (Excludes header and extra_data fields.) 2220 int _data_size; 2221 2222 // data index for the area dedicated to parameters. -1 if no 2223 // parameter profiling. 2224 enum { no_parameters = -2, parameters_uninitialized = -1 }; 2225 int _parameters_type_data_di; 2226 2227 // Beginning of the data entries 2228 intptr_t _data[1]; 2229 2230 // Helper for size computation 2231 static int compute_data_size(BytecodeStream* stream); 2232 static int bytecode_cell_count(Bytecodes::Code code); 2233 static bool is_speculative_trap_bytecode(Bytecodes::Code code); 2234 enum { no_profile_data = -1, variable_cell_count = -2 }; 2235 2236 // Helper for initialization 2237 DataLayout* data_layout_at(int data_index) const { 2238 assert(data_index % sizeof(intptr_t) == 0, "unaligned"); 2239 return (DataLayout*) (((address)_data) + data_index); 2240 } 2241 2242 // Initialize an individual data segment. Returns the size of 2243 // the segment in bytes. 2244 int initialize_data(BytecodeStream* stream, int data_index); 2245 2246 // Helper for data_at 2247 DataLayout* limit_data_position() const { 2248 return data_layout_at(_data_size); 2249 } 2250 bool out_of_bounds(int data_index) const { 2251 return data_index >= data_size(); 2252 } 2253 2254 // Give each of the data entries a chance to perform specific 2255 // data initialization. 2256 void post_initialize(BytecodeStream* stream); 2257 2258 // hint accessors 2259 int hint_di() const { return _hint_di; } 2260 void set_hint_di(int di) { 2261 assert(!out_of_bounds(di), "hint_di out of bounds"); 2262 _hint_di = di; 2263 } 2264 2265 DataLayout* data_layout_before(int bci) { 2266 // avoid SEGV on this edge case 2267 if (data_size() == 0) 2268 return nullptr; 2269 DataLayout* layout = data_layout_at(hint_di()); 2270 if (layout->bci() <= bci) 2271 return layout; 2272 return data_layout_at(first_di()); 2273 } 2274 2275 // What is the index of the first data entry? 2276 int first_di() const { return 0; } 2277 2278 ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent); 2279 // Find or create an extra ProfileData: 2280 ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing); 2281 2282 // return the argument info cell 2283 ArgInfoData *arg_info(); 2284 2285 enum { 2286 no_type_profile = 0, 2287 type_profile_jsr292 = 1, 2288 type_profile_all = 2 2289 }; 2290 2291 static bool profile_jsr292(const methodHandle& m, int bci); 2292 static bool profile_unsafe(const methodHandle& m, int bci); 2293 static bool profile_memory_access(const methodHandle& m, int bci); 2294 static int profile_arguments_flag(); 2295 static bool profile_all_arguments(); 2296 static bool profile_arguments_for_invoke(const methodHandle& m, int bci); 2297 static int profile_return_flag(); 2298 static bool profile_all_return(); 2299 static bool profile_return_for_invoke(const methodHandle& m, int bci); 2300 static int profile_parameters_flag(); 2301 static bool profile_parameters_jsr292_only(); 2302 static bool profile_all_parameters(); 2303 2304 void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false); 2305 void verify_extra_data_clean(CleanExtraDataClosure* cl); 2306 2307 public: 2308 void clean_extra_data(CleanExtraDataClosure* cl); 2309 2310 static int header_size() { 2311 return sizeof(MethodData)/wordSize; 2312 } 2313 2314 // Compute the size of a MethodData* before it is created. 2315 static int compute_allocation_size_in_bytes(const methodHandle& method); 2316 static int compute_allocation_size_in_words(const methodHandle& method); 2317 static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps); 2318 2319 // Determine if a given bytecode can have profile information. 2320 static bool bytecode_has_profile(Bytecodes::Code code) { 2321 return bytecode_cell_count(code) != no_profile_data; 2322 } 2323 2324 // reset into original state 2325 void init(); 2326 2327 // My size 2328 int size_in_bytes() const { return _size; } 2329 int size() const { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); } 2330 2331 int creation_mileage() const { return _creation_mileage; } 2332 void set_creation_mileage(int x) { _creation_mileage = x; } 2333 2334 int invocation_count() { 2335 if (invocation_counter()->carry()) { 2336 return InvocationCounter::count_limit; 2337 } 2338 return invocation_counter()->count(); 2339 } 2340 int backedge_count() { 2341 if (backedge_counter()->carry()) { 2342 return InvocationCounter::count_limit; 2343 } 2344 return backedge_counter()->count(); 2345 } 2346 2347 int invocation_count_start() { 2348 if (invocation_counter()->carry()) { 2349 return 0; 2350 } 2351 return _invocation_counter_start; 2352 } 2353 2354 int backedge_count_start() { 2355 if (backedge_counter()->carry()) { 2356 return 0; 2357 } 2358 return _backedge_counter_start; 2359 } 2360 2361 int invocation_count_delta() { return invocation_count() - invocation_count_start(); } 2362 int backedge_count_delta() { return backedge_count() - backedge_count_start(); } 2363 2364 void reset_start_counters() { 2365 _invocation_counter_start = invocation_count(); 2366 _backedge_counter_start = backedge_count(); 2367 } 2368 2369 InvocationCounter* invocation_counter() { return &_invocation_counter; } 2370 InvocationCounter* backedge_counter() { return &_backedge_counter; } 2371 2372 #if INCLUDE_JVMCI 2373 FailedSpeculation** get_failed_speculations_address() { 2374 return &_failed_speculations; 2375 } 2376 #endif 2377 2378 #if INCLUDE_RTM_OPT 2379 int rtm_state() const { 2380 return _rtm_state; 2381 } 2382 void set_rtm_state(RTMState rstate) { 2383 _rtm_state = (int)rstate; 2384 } 2385 void atomic_set_rtm_state(RTMState rstate) { 2386 Atomic::store(&_rtm_state, (int)rstate); 2387 } 2388 2389 static ByteSize rtm_state_offset() { 2390 return byte_offset_of(MethodData, _rtm_state); 2391 } 2392 #endif 2393 2394 void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; } 2395 bool would_profile() const { return _would_profile != no_profile; } 2396 2397 int num_loops() const { return _num_loops; } 2398 void set_num_loops(short n) { _num_loops = n; } 2399 int num_blocks() const { return _num_blocks; } 2400 void set_num_blocks(short n) { _num_blocks = n; } 2401 2402 bool is_mature() const; // consult mileage and ProfileMaturityPercentage 2403 static int mileage_of(Method* m); 2404 2405 // Support for interprocedural escape analysis, from Thomas Kotzmann. 2406 enum EscapeFlag { 2407 estimated = 1 << 0, 2408 return_local = 1 << 1, 2409 return_allocated = 1 << 2, 2410 allocated_escapes = 1 << 3, 2411 unknown_modified = 1 << 4 2412 }; 2413 2414 intx eflags() { return _eflags; } 2415 intx arg_local() { return _arg_local; } 2416 intx arg_stack() { return _arg_stack; } 2417 intx arg_returned() { return _arg_returned; } 2418 uint arg_modified(int a) { ArgInfoData *aid = arg_info(); 2419 assert(aid != nullptr, "arg_info must be not null"); 2420 assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); 2421 return aid->arg_modified(a); } 2422 2423 void set_eflags(intx v) { _eflags = v; } 2424 void set_arg_local(intx v) { _arg_local = v; } 2425 void set_arg_stack(intx v) { _arg_stack = v; } 2426 void set_arg_returned(intx v) { _arg_returned = v; } 2427 void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info(); 2428 assert(aid != nullptr, "arg_info must be not null"); 2429 assert(a >= 0 && a < aid->number_of_args(), "valid argument number"); 2430 aid->set_arg_modified(a, v); } 2431 2432 void clear_escape_info() { _eflags = _arg_local = _arg_stack = _arg_returned = 0; } 2433 2434 // Location and size of data area 2435 address data_base() const { 2436 return (address) _data; 2437 } 2438 int data_size() const { 2439 return _data_size; 2440 } 2441 2442 int parameters_size_in_bytes() const { 2443 ParametersTypeData* param = parameters_type_data(); 2444 return param == nullptr ? 0 : param->size_in_bytes(); 2445 } 2446 2447 // Accessors 2448 Method* method() const { return _method; } 2449 2450 // Get the data at an arbitrary (sort of) data index. 2451 ProfileData* data_at(int data_index) const; 2452 2453 // Walk through the data in order. 2454 ProfileData* first_data() const { return data_at(first_di()); } 2455 ProfileData* next_data(ProfileData* current) const; 2456 DataLayout* next_data_layout(DataLayout* current) const; 2457 bool is_valid(ProfileData* current) const { return current != nullptr; } 2458 bool is_valid(DataLayout* current) const { return current != nullptr; } 2459 2460 // Convert a dp (data pointer) to a di (data index). 2461 int dp_to_di(address dp) const { 2462 return (int)(dp - ((address)_data)); 2463 } 2464 2465 // bci to di/dp conversion. 2466 address bci_to_dp(int bci); 2467 int bci_to_di(int bci) { 2468 return dp_to_di(bci_to_dp(bci)); 2469 } 2470 2471 // Get the data at an arbitrary bci, or null if there is none. 2472 ProfileData* bci_to_data(int bci); 2473 2474 // Same, but try to create an extra_data record if one is needed: 2475 ProfileData* allocate_bci_to_data(int bci, Method* m) { 2476 ProfileData* data = nullptr; 2477 // If m not null, try to allocate a SpeculativeTrapData entry 2478 if (m == nullptr) { 2479 data = bci_to_data(bci); 2480 } 2481 if (data != nullptr) { 2482 return data; 2483 } 2484 data = bci_to_extra_data(bci, m, true); 2485 if (data != nullptr) { 2486 return data; 2487 } 2488 // If SpeculativeTrapData allocation fails try to allocate a 2489 // regular entry 2490 data = bci_to_data(bci); 2491 if (data != nullptr) { 2492 return data; 2493 } 2494 return bci_to_extra_data(bci, nullptr, true); 2495 } 2496 2497 // Add a handful of extra data records, for trap tracking. 2498 DataLayout* extra_data_base() const { return limit_data_position(); } 2499 DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); } 2500 DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() - 2501 parameters_size_in_bytes()); } 2502 int extra_data_size() const { return (int)((address)extra_data_limit() - (address)extra_data_base()); } 2503 static DataLayout* next_extra(DataLayout* dp); 2504 2505 // Return (uint)-1 for overflow. 2506 uint trap_count(int reason) const { 2507 return _compiler_counters.trap_count(reason); 2508 } 2509 // For loops: 2510 static uint trap_reason_limit() { return _trap_hist_limit; } 2511 static uint trap_count_limit() { return _trap_hist_mask; } 2512 uint inc_trap_count(int reason) { 2513 return _compiler_counters.inc_trap_count(reason); 2514 } 2515 2516 uint overflow_trap_count() const { 2517 return _compiler_counters.overflow_trap_count(); 2518 } 2519 uint overflow_recompile_count() const { 2520 return _compiler_counters.overflow_recompile_count(); 2521 } 2522 uint inc_overflow_recompile_count() { 2523 return _compiler_counters.inc_overflow_recompile_count(); 2524 } 2525 uint decompile_count() const { 2526 return _compiler_counters.decompile_count(); 2527 } 2528 uint inc_decompile_count() { 2529 uint dec_count = _compiler_counters.inc_decompile_count(); 2530 if (dec_count > (uint)PerMethodRecompilationCutoff) { 2531 method()->set_not_compilable("decompile_count > PerMethodRecompilationCutoff", CompLevel_full_optimization); 2532 } 2533 return dec_count; 2534 } 2535 uint tenure_traps() const { 2536 return _tenure_traps; 2537 } 2538 void inc_tenure_traps() { 2539 _tenure_traps += 1; 2540 } 2541 2542 // Return pointer to area dedicated to parameters in MDO 2543 ParametersTypeData* parameters_type_data() const { 2544 assert(_parameters_type_data_di != parameters_uninitialized, "called too early"); 2545 return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : nullptr; 2546 } 2547 2548 int parameters_type_data_di() const { 2549 assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data"); 2550 return _parameters_type_data_di; 2551 } 2552 2553 // Support for code generation 2554 static ByteSize data_offset() { 2555 return byte_offset_of(MethodData, _data[0]); 2556 } 2557 2558 static ByteSize trap_history_offset() { 2559 return byte_offset_of(MethodData, _compiler_counters) + CompilerCounters::trap_history_offset(); 2560 } 2561 2562 static ByteSize invocation_counter_offset() { 2563 return byte_offset_of(MethodData, _invocation_counter); 2564 } 2565 2566 static ByteSize backedge_counter_offset() { 2567 return byte_offset_of(MethodData, _backedge_counter); 2568 } 2569 2570 static ByteSize invoke_mask_offset() { 2571 return byte_offset_of(MethodData, _invoke_mask); 2572 } 2573 2574 static ByteSize backedge_mask_offset() { 2575 return byte_offset_of(MethodData, _backedge_mask); 2576 } 2577 2578 static ByteSize parameters_type_data_di_offset() { 2579 return byte_offset_of(MethodData, _parameters_type_data_di); 2580 } 2581 2582 virtual void metaspace_pointers_do(MetaspaceClosure* iter); 2583 virtual MetaspaceObj::Type type() const { return MethodDataType; } 2584 2585 // Deallocation support 2586 void deallocate_contents(ClassLoaderData* loader_data); 2587 void release_C_heap_structures(); 2588 2589 // GC support 2590 void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; } 2591 2592 // Printing 2593 void print_on (outputStream* st) const; 2594 void print_value_on(outputStream* st) const; 2595 2596 // printing support for method data 2597 void print_data_on(outputStream* st) const; 2598 2599 const char* internal_name() const { return "{method data}"; } 2600 2601 // verification 2602 void verify_on(outputStream* st); 2603 void verify_data_on(outputStream* st); 2604 2605 static bool profile_parameters_for_method(const methodHandle& m); 2606 static bool profile_arguments(); 2607 static bool profile_arguments_jsr292_only(); 2608 static bool profile_return(); 2609 static bool profile_parameters(); 2610 static bool profile_return_jsr292_only(); 2611 2612 void clean_method_data(bool always_clean); 2613 void clean_weak_method_links(); 2614 Mutex* extra_data_lock() { return &_extra_data_lock; } 2615 }; 2616 2617 #endif // SHARE_OOPS_METHODDATA_HPP