1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_METHODDATA_HPP
  26 #define SHARE_OOPS_METHODDATA_HPP
  27 
  28 #include "interpreter/bytecodes.hpp"
  29 #include "oops/metadata.hpp"
  30 #include "oops/method.hpp"
  31 #include "oops/oop.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/deoptimization.hpp"
  34 #include "runtime/mutex.hpp"
  35 #include "utilities/align.hpp"
  36 #include "utilities/copy.hpp"
  37 
  38 class BytecodeStream;
  39 
  40 // The MethodData object collects counts and other profile information
  41 // during zeroth-tier (interpreter) and third-tier (C1 with full profiling)
  42 // execution.
  43 //
  44 // The profile is used later by compilation heuristics.  Some heuristics
  45 // enable use of aggressive (or "heroic") optimizations.  An aggressive
  46 // optimization often has a down-side, a corner case that it handles
  47 // poorly, but which is thought to be rare.  The profile provides
  48 // evidence of this rarity for a given method or even BCI.  It allows
  49 // the compiler to back out of the optimization at places where it
  50 // has historically been a poor choice.  Other heuristics try to use
  51 // specific information gathered about types observed at a given site.
  52 //
  53 // All data in the profile is approximate.  It is expected to be accurate
  54 // on the whole, but the system expects occasional inaccuraces, due to
  55 // counter overflow, multiprocessor races during data collection, space
  56 // limitations, missing MDO blocks, etc.  Bad or missing data will degrade
  57 // optimization quality but will not affect correctness.  Also, each MDO
  58 // is marked with its birth-date ("creation_mileage") which can be used
  59 // to assess the quality ("maturity") of its data.
  60 //
  61 // Short (<32-bit) counters are designed to overflow to a known "saturated"
  62 // state.  Also, certain recorded per-BCI events are given one-bit counters
  63 // which overflow to a saturated state which applied to all counters at
  64 // that BCI.  In other words, there is a small lattice which approximates
  65 // the ideal of an infinite-precision counter for each event at each BCI,
  66 // and the lattice quickly "bottoms out" in a state where all counters
  67 // are taken to be indefinitely large.
  68 //
  69 // The reader will find many data races in profile gathering code, starting
  70 // with invocation counter incrementation.  None of these races harm correct
  71 // execution of the compiled code.
  72 
  73 // forward decl
  74 class ProfileData;
  75 
  76 // DataLayout
  77 //
  78 // Overlay for generic profiling data.
  79 class DataLayout {
  80   friend class VMStructs;
  81   friend class JVMCIVMStructs;
  82 
  83 private:
  84   // Every data layout begins with a header.  This header
  85   // contains a tag, which is used to indicate the size/layout
  86   // of the data, 8 bits of flags, which can be used in any way,
  87   // 32 bits of trap history (none/one reason/many reasons),
  88   // and a bci, which is used to tie this piece of data to a
  89   // specific bci in the bytecodes.
  90   union {
  91     u8 _bits;
  92     struct {
  93       u1 _tag;
  94       u1 _flags;
  95       u2 _bci;
  96       u4 _traps;
  97     } _struct;
  98   } _header;
  99 
 100   // The data layout has an arbitrary number of cells, each sized
 101   // to accommodate a pointer or an integer.
 102   intptr_t _cells[1];
 103 
 104   // Some types of data layouts need a length field.
 105   static bool needs_array_len(u1 tag);
 106 
 107 public:
 108   enum {
 109     counter_increment = 1
 110   };
 111 
 112   enum {
 113     cell_size = sizeof(intptr_t)
 114   };
 115 
 116   // Tag values
 117   enum : u1 {
 118     no_tag,
 119     bit_data_tag,
 120     counter_data_tag,
 121     jump_data_tag,
 122     receiver_type_data_tag,
 123     virtual_call_data_tag,
 124     ret_data_tag,
 125     branch_data_tag,
 126     multi_branch_data_tag,
 127     arg_info_data_tag,
 128     call_type_data_tag,
 129     virtual_call_type_data_tag,
 130     parameters_type_data_tag,
 131     speculative_trap_data_tag
 132   };
 133 
 134   enum {
 135     // The trap state breaks down as [recompile:1 | reason:31].
 136     // This further breakdown is defined in deoptimization.cpp.
 137     // See Deoptimization::trap_state_reason for an assert that
 138     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 139     //
 140     // The trap_state is collected only if ProfileTraps is true.
 141     trap_bits = 1+31,  // 31: enough to distinguish [0..Reason_RECORDED_LIMIT].
 142     trap_mask = -1,
 143     first_flag = 0
 144   };
 145 
 146   // Size computation
 147   static int header_size_in_bytes() {
 148     return header_size_in_cells() * cell_size;
 149   }
 150   static int header_size_in_cells() {
 151     return LP64_ONLY(1) NOT_LP64(2);
 152   }
 153 
 154   static int compute_size_in_bytes(int cell_count) {
 155     return header_size_in_bytes() + cell_count * cell_size;
 156   }
 157 
 158   // Initialization
 159   void initialize(u1 tag, u2 bci, int cell_count);
 160 
 161   // Accessors
 162   u1 tag() {
 163     return _header._struct._tag;
 164   }
 165 
 166   // Return 32 bits of trap state.
 167   // The state tells if traps with zero, one, or many reasons have occurred.
 168   // It also tells whether zero or many recompilations have occurred.
 169   // The associated trap histogram in the MDO itself tells whether
 170   // traps are common or not.  If a BCI shows that a trap X has
 171   // occurred, and the MDO shows N occurrences of X, we make the
 172   // simplifying assumption that all N occurrences can be blamed
 173   // on that BCI.
 174   uint trap_state() const {
 175     return _header._struct._traps;
 176   }
 177 
 178   void set_trap_state(uint new_state) {
 179     assert(ProfileTraps, "used only under +ProfileTraps");
 180     uint old_flags = _header._struct._traps;
 181     _header._struct._traps = new_state | old_flags;
 182   }
 183 
 184   u1 flags() const {
 185     return _header._struct._flags;
 186   }
 187 
 188   u2 bci() const {
 189     return _header._struct._bci;
 190   }
 191 
 192   void set_header(u8 value) {
 193     _header._bits = value;
 194   }
 195   u8 header() {
 196     return _header._bits;
 197   }
 198   void set_cell_at(int index, intptr_t value) {
 199     _cells[index] = value;
 200   }
 201   void release_set_cell_at(int index, intptr_t value);
 202   intptr_t cell_at(int index) const {
 203     return _cells[index];
 204   }
 205 
 206   void set_flag_at(u1 flag_number) {
 207     _header._struct._flags |= (u1)(0x1 << flag_number);
 208   }
 209   bool flag_at(u1 flag_number) const {
 210     return (_header._struct._flags & (0x1 << flag_number)) != 0;
 211   }
 212 
 213   // Low-level support for code generation.
 214   static ByteSize header_offset() {
 215     return byte_offset_of(DataLayout, _header);
 216   }
 217   static ByteSize tag_offset() {
 218     return byte_offset_of(DataLayout, _header._struct._tag);
 219   }
 220   static ByteSize flags_offset() {
 221     return byte_offset_of(DataLayout, _header._struct._flags);
 222   }
 223   static ByteSize bci_offset() {
 224     return byte_offset_of(DataLayout, _header._struct._bci);
 225   }
 226   static ByteSize cell_offset(int index) {
 227     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 228   }
 229   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 230   static u1 flag_number_to_constant(u1 flag_number) {
 231     DataLayout temp; temp.set_header(0);
 232     temp.set_flag_at(flag_number);
 233     return temp._header._struct._flags;
 234   }
 235   // Return a value which, when or-ed as a word into _header, sets the flag.
 236   static u8 flag_mask_to_header_mask(u1 byte_constant) {
 237     DataLayout temp; temp.set_header(0);
 238     temp._header._struct._flags = byte_constant;
 239     return temp._header._bits;
 240   }
 241 
 242   ProfileData* data_in();
 243 
 244   int size_in_bytes() {
 245     int cells = cell_count();
 246     assert(cells >= 0, "invalid number of cells");
 247     return DataLayout::compute_size_in_bytes(cells);
 248   }
 249   int cell_count();
 250 
 251   // GC support
 252   void clean_weak_klass_links(bool always_clean);
 253 };
 254 
 255 
 256 // ProfileData class hierarchy
 257 class ProfileData;
 258 class   BitData;
 259 class     CounterData;
 260 class       ReceiverTypeData;
 261 class         VirtualCallData;
 262 class           VirtualCallTypeData;
 263 class       RetData;
 264 class       CallTypeData;
 265 class   JumpData;
 266 class     BranchData;
 267 class   ArrayData;
 268 class     MultiBranchData;
 269 class     ArgInfoData;
 270 class     ParametersTypeData;
 271 class   SpeculativeTrapData;
 272 
 273 // ProfileData
 274 //
 275 // A ProfileData object is created to refer to a section of profiling
 276 // data in a structured way.
 277 class ProfileData : public ResourceObj {
 278   friend class TypeEntries;
 279   friend class ReturnTypeEntry;
 280   friend class TypeStackSlotEntries;
 281 private:
 282   enum {
 283     tab_width_one = 16,
 284     tab_width_two = 36
 285   };
 286 
 287   // This is a pointer to a section of profiling data.
 288   DataLayout* _data;
 289 
 290   char* print_data_on_helper(const MethodData* md) const;
 291 
 292 protected:
 293   DataLayout* data() { return _data; }
 294   const DataLayout* data() const { return _data; }
 295 
 296   enum {
 297     cell_size = DataLayout::cell_size
 298   };
 299 
 300 public:
 301   // How many cells are in this?
 302   virtual int cell_count() const {
 303     ShouldNotReachHere();
 304     return -1;
 305   }
 306 
 307   // Return the size of this data.
 308   int size_in_bytes() {
 309     return DataLayout::compute_size_in_bytes(cell_count());
 310   }
 311 
 312 protected:
 313   // Low-level accessors for underlying data
 314   void set_intptr_at(int index, intptr_t value) {
 315     assert(0 <= index && index < cell_count(), "oob");
 316     data()->set_cell_at(index, value);
 317   }
 318   void release_set_intptr_at(int index, intptr_t value);
 319   intptr_t intptr_at(int index) const {
 320     assert(0 <= index && index < cell_count(), "oob");
 321     return data()->cell_at(index);
 322   }
 323   void set_uint_at(int index, uint value) {
 324     set_intptr_at(index, (intptr_t) value);
 325   }
 326   void release_set_uint_at(int index, uint value);
 327   uint uint_at(int index) const {
 328     return (uint)intptr_at(index);
 329   }
 330   void set_int_at(int index, int value) {
 331     set_intptr_at(index, (intptr_t) value);
 332   }
 333   void release_set_int_at(int index, int value);
 334   int int_at(int index) const {
 335     return (int)intptr_at(index);
 336   }
 337   int int_at_unchecked(int index) const {
 338     return (int)data()->cell_at(index);
 339   }
 340   void set_oop_at(int index, oop value) {
 341     set_intptr_at(index, cast_from_oop<intptr_t>(value));
 342   }
 343   oop oop_at(int index) const {
 344     return cast_to_oop(intptr_at(index));
 345   }
 346 
 347   void set_flag_at(u1 flag_number) {
 348     data()->set_flag_at(flag_number);
 349   }
 350   bool flag_at(u1 flag_number) const {
 351     return data()->flag_at(flag_number);
 352   }
 353 
 354   // two convenient imports for use by subclasses:
 355   static ByteSize cell_offset(int index) {
 356     return DataLayout::cell_offset(index);
 357   }
 358   static u1 flag_number_to_constant(u1 flag_number) {
 359     return DataLayout::flag_number_to_constant(flag_number);
 360   }
 361 
 362   ProfileData(DataLayout* data) {
 363     _data = data;
 364   }
 365 
 366 public:
 367   // Constructor for invalid ProfileData.
 368   ProfileData();
 369 
 370   u2 bci() const {
 371     return data()->bci();
 372   }
 373 
 374   address dp() {
 375     return (address)_data;
 376   }
 377 
 378   int trap_state() const {
 379     return data()->trap_state();
 380   }
 381   void set_trap_state(int new_state) {
 382     data()->set_trap_state(new_state);
 383   }
 384 
 385   // Type checking
 386   virtual bool is_BitData()         const { return false; }
 387   virtual bool is_CounterData()     const { return false; }
 388   virtual bool is_JumpData()        const { return false; }
 389   virtual bool is_ReceiverTypeData()const { return false; }
 390   virtual bool is_VirtualCallData() const { return false; }
 391   virtual bool is_RetData()         const { return false; }
 392   virtual bool is_BranchData()      const { return false; }
 393   virtual bool is_ArrayData()       const { return false; }
 394   virtual bool is_MultiBranchData() const { return false; }
 395   virtual bool is_ArgInfoData()     const { return false; }
 396   virtual bool is_CallTypeData()    const { return false; }
 397   virtual bool is_VirtualCallTypeData()const { return false; }
 398   virtual bool is_ParametersTypeData() const { return false; }
 399   virtual bool is_SpeculativeTrapData()const { return false; }
 400 
 401 
 402   BitData* as_BitData() const {
 403     assert(is_BitData(), "wrong type");
 404     return is_BitData()         ? (BitData*)        this : nullptr;
 405   }
 406   CounterData* as_CounterData() const {
 407     assert(is_CounterData(), "wrong type");
 408     return is_CounterData()     ? (CounterData*)    this : nullptr;
 409   }
 410   JumpData* as_JumpData() const {
 411     assert(is_JumpData(), "wrong type");
 412     return is_JumpData()        ? (JumpData*)       this : nullptr;
 413   }
 414   ReceiverTypeData* as_ReceiverTypeData() const {
 415     assert(is_ReceiverTypeData(), "wrong type");
 416     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : nullptr;
 417   }
 418   VirtualCallData* as_VirtualCallData() const {
 419     assert(is_VirtualCallData(), "wrong type");
 420     return is_VirtualCallData() ? (VirtualCallData*)this : nullptr;
 421   }
 422   RetData* as_RetData() const {
 423     assert(is_RetData(), "wrong type");
 424     return is_RetData()         ? (RetData*)        this : nullptr;
 425   }
 426   BranchData* as_BranchData() const {
 427     assert(is_BranchData(), "wrong type");
 428     return is_BranchData()      ? (BranchData*)     this : nullptr;
 429   }
 430   ArrayData* as_ArrayData() const {
 431     assert(is_ArrayData(), "wrong type");
 432     return is_ArrayData()       ? (ArrayData*)      this : nullptr;
 433   }
 434   MultiBranchData* as_MultiBranchData() const {
 435     assert(is_MultiBranchData(), "wrong type");
 436     return is_MultiBranchData() ? (MultiBranchData*)this : nullptr;
 437   }
 438   ArgInfoData* as_ArgInfoData() const {
 439     assert(is_ArgInfoData(), "wrong type");
 440     return is_ArgInfoData() ? (ArgInfoData*)this : nullptr;
 441   }
 442   CallTypeData* as_CallTypeData() const {
 443     assert(is_CallTypeData(), "wrong type");
 444     return is_CallTypeData() ? (CallTypeData*)this : nullptr;
 445   }
 446   VirtualCallTypeData* as_VirtualCallTypeData() const {
 447     assert(is_VirtualCallTypeData(), "wrong type");
 448     return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr;
 449   }
 450   ParametersTypeData* as_ParametersTypeData() const {
 451     assert(is_ParametersTypeData(), "wrong type");
 452     return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr;
 453   }
 454   SpeculativeTrapData* as_SpeculativeTrapData() const {
 455     assert(is_SpeculativeTrapData(), "wrong type");
 456     return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr;
 457   }
 458 
 459 
 460   // Subclass specific initialization
 461   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
 462 
 463   // GC support
 464   virtual void clean_weak_klass_links(bool always_clean) {}
 465 
 466   // CI translation: ProfileData can represent both MethodDataOop data
 467   // as well as CIMethodData data. This function is provided for translating
 468   // an oop in a ProfileData to the ci equivalent. Generally speaking,
 469   // most ProfileData don't require any translation, so we provide the null
 470   // translation here, and the required translators are in the ci subclasses.
 471   virtual void translate_from(const ProfileData* data) {}
 472 
 473   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const {
 474     ShouldNotReachHere();
 475   }
 476 
 477   void print_data_on(outputStream* st, const MethodData* md) const;
 478 
 479   void print_shared(outputStream* st, const char* name, const char* extra) const;
 480   void tab(outputStream* st, bool first = false) const;
 481 };
 482 
 483 // BitData
 484 //
 485 // A BitData holds a flag or two in its header.
 486 class BitData : public ProfileData {
 487   friend class VMStructs;
 488   friend class JVMCIVMStructs;
 489 protected:
 490   enum : u1 {
 491     // null_seen:
 492     //  saw a null operand (cast/aastore/instanceof)
 493       null_seen_flag              = DataLayout::first_flag + 0
 494 #if INCLUDE_JVMCI
 495     // bytecode threw any exception
 496     , exception_seen_flag         = null_seen_flag + 1
 497 #endif
 498   };
 499   enum { bit_cell_count = 0 };  // no additional data fields needed.
 500 public:
 501   BitData(DataLayout* layout) : ProfileData(layout) {
 502   }
 503 
 504   virtual bool is_BitData() const { return true; }
 505 
 506   static int static_cell_count() {
 507     return bit_cell_count;
 508   }
 509 
 510   virtual int cell_count() const {
 511     return static_cell_count();
 512   }
 513 
 514   // Accessor
 515 
 516   // The null_seen flag bit is specially known to the interpreter.
 517   // Consulting it allows the compiler to avoid setting up null_check traps.
 518   bool null_seen()     { return flag_at(null_seen_flag); }
 519   void set_null_seen()    { set_flag_at(null_seen_flag); }
 520 
 521 #if INCLUDE_JVMCI
 522   // true if an exception was thrown at the specific BCI
 523   bool exception_seen() { return flag_at(exception_seen_flag); }
 524   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 525 #endif
 526 
 527   // Code generation support
 528   static u1 null_seen_byte_constant() {
 529     return flag_number_to_constant(null_seen_flag);
 530   }
 531 
 532   static ByteSize bit_data_size() {
 533     return cell_offset(bit_cell_count);
 534   }
 535 
 536   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 537 };
 538 
 539 // CounterData
 540 //
 541 // A CounterData corresponds to a simple counter.
 542 class CounterData : public BitData {
 543   friend class VMStructs;
 544   friend class JVMCIVMStructs;
 545 protected:
 546   enum {
 547     count_off,
 548     counter_cell_count
 549   };
 550 public:
 551   CounterData(DataLayout* layout) : BitData(layout) {}
 552 
 553   virtual bool is_CounterData() const { return true; }
 554 
 555   static int static_cell_count() {
 556     return counter_cell_count;
 557   }
 558 
 559   virtual int cell_count() const {
 560     return static_cell_count();
 561   }
 562 
 563   // Direct accessor
 564   int count() const {
 565     intptr_t raw_data = intptr_at(count_off);
 566     if (raw_data > max_jint) {
 567       raw_data = max_jint;
 568     } else if (raw_data < min_jint) {
 569       raw_data = min_jint;
 570     }
 571     return int(raw_data);
 572   }
 573 
 574   // Code generation support
 575   static ByteSize count_offset() {
 576     return cell_offset(count_off);
 577   }
 578   static ByteSize counter_data_size() {
 579     return cell_offset(counter_cell_count);
 580   }
 581 
 582   void set_count(int count) {
 583     set_int_at(count_off, count);
 584   }
 585 
 586   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 587 };
 588 
 589 // JumpData
 590 //
 591 // A JumpData is used to access profiling information for a direct
 592 // branch.  It is a counter, used for counting the number of branches,
 593 // plus a data displacement, used for realigning the data pointer to
 594 // the corresponding target bci.
 595 class JumpData : public ProfileData {
 596   friend class VMStructs;
 597   friend class JVMCIVMStructs;
 598 protected:
 599   enum {
 600     taken_off_set,
 601     displacement_off_set,
 602     jump_cell_count
 603   };
 604 
 605   void set_displacement(int displacement) {
 606     set_int_at(displacement_off_set, displacement);
 607   }
 608 
 609 public:
 610   JumpData(DataLayout* layout) : ProfileData(layout) {
 611     assert(layout->tag() == DataLayout::jump_data_tag ||
 612       layout->tag() == DataLayout::branch_data_tag, "wrong type");
 613   }
 614 
 615   virtual bool is_JumpData() const { return true; }
 616 
 617   static int static_cell_count() {
 618     return jump_cell_count;
 619   }
 620 
 621   virtual int cell_count() const {
 622     return static_cell_count();
 623   }
 624 
 625   // Direct accessor
 626   uint taken() const {
 627     return uint_at(taken_off_set);
 628   }
 629 
 630   void set_taken(uint cnt) {
 631     set_uint_at(taken_off_set, cnt);
 632   }
 633 
 634   // Saturating counter
 635   uint inc_taken() {
 636     uint cnt = taken() + 1;
 637     // Did we wrap? Will compiler screw us??
 638     if (cnt == 0) cnt--;
 639     set_uint_at(taken_off_set, cnt);
 640     return cnt;
 641   }
 642 
 643   int displacement() const {
 644     return int_at(displacement_off_set);
 645   }
 646 
 647   // Code generation support
 648   static ByteSize taken_offset() {
 649     return cell_offset(taken_off_set);
 650   }
 651 
 652   static ByteSize displacement_offset() {
 653     return cell_offset(displacement_off_set);
 654   }
 655 
 656   // Specific initialization.
 657   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 658 
 659   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 660 };
 661 
 662 // Entries in a ProfileData object to record types: it can either be
 663 // none (no profile), unknown (conflicting profile data) or a klass if
 664 // a single one is seen. Whether a null reference was seen is also
 665 // recorded. No counter is associated with the type and a single type
 666 // is tracked (unlike VirtualCallData).
 667 class TypeEntries {
 668 
 669 public:
 670 
 671   // A single cell is used to record information for a type:
 672   // - the cell is initialized to 0
 673   // - when a type is discovered it is stored in the cell
 674   // - bit zero of the cell is used to record whether a null reference
 675   // was encountered or not
 676   // - bit 1 is set to record a conflict in the type information
 677 
 678   enum {
 679     null_seen = 1,
 680     type_mask = ~null_seen,
 681     type_unknown = 2,
 682     status_bits = null_seen | type_unknown,
 683     type_klass_mask = ~status_bits
 684   };
 685 
 686   // what to initialize a cell to
 687   static intptr_t type_none() {
 688     return 0;
 689   }
 690 
 691   // null seen = bit 0 set?
 692   static bool was_null_seen(intptr_t v) {
 693     return (v & null_seen) != 0;
 694   }
 695 
 696   // conflicting type information = bit 1 set?
 697   static bool is_type_unknown(intptr_t v) {
 698     return (v & type_unknown) != 0;
 699   }
 700 
 701   // not type information yet = all bits cleared, ignoring bit 0?
 702   static bool is_type_none(intptr_t v) {
 703     return (v & type_mask) == 0;
 704   }
 705 
 706   // recorded type: cell without bit 0 and 1
 707   static intptr_t klass_part(intptr_t v) {
 708     intptr_t r = v & type_klass_mask;
 709     return r;
 710   }
 711 
 712   // type recorded
 713   static Klass* valid_klass(intptr_t k) {
 714     if (!is_type_none(k) &&
 715         !is_type_unknown(k)) {
 716       Klass* res = (Klass*)klass_part(k);
 717       assert(res != nullptr, "invalid");
 718       return res;
 719     } else {
 720       return nullptr;
 721     }
 722   }
 723 
 724   static intptr_t with_status(intptr_t k, intptr_t in) {
 725     return k | (in & status_bits);
 726   }
 727 
 728   static intptr_t with_status(Klass* k, intptr_t in) {
 729     return with_status((intptr_t)k, in);
 730   }
 731 
 732   static void print_klass(outputStream* st, intptr_t k);
 733 
 734 protected:
 735   // ProfileData object these entries are part of
 736   ProfileData* _pd;
 737   // offset within the ProfileData object where the entries start
 738   const int _base_off;
 739 
 740   TypeEntries(int base_off)
 741     : _pd(nullptr), _base_off(base_off) {}
 742 
 743   void set_intptr_at(int index, intptr_t value) {
 744     _pd->set_intptr_at(index, value);
 745   }
 746 
 747   intptr_t intptr_at(int index) const {
 748     return _pd->intptr_at(index);
 749   }
 750 
 751 public:
 752   void set_profile_data(ProfileData* pd) {
 753     _pd = pd;
 754   }
 755 };
 756 
 757 // Type entries used for arguments passed at a call and parameters on
 758 // method entry. 2 cells per entry: one for the type encoded as in
 759 // TypeEntries and one initialized with the stack slot where the
 760 // profiled object is to be found so that the interpreter can locate
 761 // it quickly.
 762 class TypeStackSlotEntries : public TypeEntries {
 763 
 764 private:
 765   enum {
 766     stack_slot_entry,
 767     type_entry,
 768     per_arg_cell_count
 769   };
 770 
 771   // offset of cell for stack slot for entry i within ProfileData object
 772   int stack_slot_offset(int i) const {
 773     return _base_off + stack_slot_local_offset(i);
 774   }
 775 
 776   const int _number_of_entries;
 777 
 778   // offset of cell for type for entry i within ProfileData object
 779   int type_offset_in_cells(int i) const {
 780     return _base_off + type_local_offset(i);
 781   }
 782 
 783 public:
 784 
 785   TypeStackSlotEntries(int base_off, int nb_entries)
 786     : TypeEntries(base_off), _number_of_entries(nb_entries) {}
 787 
 788   static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
 789 
 790   void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
 791 
 792   int number_of_entries() const { return _number_of_entries; }
 793 
 794   // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
 795   static int stack_slot_local_offset(int i) {
 796     return i * per_arg_cell_count + stack_slot_entry;
 797   }
 798 
 799   // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
 800   static int type_local_offset(int i) {
 801     return i * per_arg_cell_count + type_entry;
 802   }
 803 
 804   // stack slot for entry i
 805   uint stack_slot(int i) const {
 806     assert(i >= 0 && i < _number_of_entries, "oob");
 807     return _pd->uint_at(stack_slot_offset(i));
 808   }
 809 
 810   // set stack slot for entry i
 811   void set_stack_slot(int i, uint num) {
 812     assert(i >= 0 && i < _number_of_entries, "oob");
 813     _pd->set_uint_at(stack_slot_offset(i), num);
 814   }
 815 
 816   // type for entry i
 817   intptr_t type(int i) const {
 818     assert(i >= 0 && i < _number_of_entries, "oob");
 819     return _pd->intptr_at(type_offset_in_cells(i));
 820   }
 821 
 822   // set type for entry i
 823   void set_type(int i, intptr_t k) {
 824     assert(i >= 0 && i < _number_of_entries, "oob");
 825     _pd->set_intptr_at(type_offset_in_cells(i), k);
 826   }
 827 
 828   static ByteSize per_arg_size() {
 829     return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
 830   }
 831 
 832   static int per_arg_count() {
 833     return per_arg_cell_count;
 834   }
 835 
 836   ByteSize type_offset(int i) const {
 837     return DataLayout::cell_offset(type_offset_in_cells(i));
 838   }
 839 
 840   // GC support
 841   void clean_weak_klass_links(bool always_clean);
 842 
 843   void print_data_on(outputStream* st) const;
 844 };
 845 
 846 // Type entry used for return from a call. A single cell to record the
 847 // type.
 848 class ReturnTypeEntry : public TypeEntries {
 849 
 850 private:
 851   enum {
 852     cell_count = 1
 853   };
 854 
 855 public:
 856   ReturnTypeEntry(int base_off)
 857     : TypeEntries(base_off) {}
 858 
 859   void post_initialize() {
 860     set_type(type_none());
 861   }
 862 
 863   intptr_t type() const {
 864     return _pd->intptr_at(_base_off);
 865   }
 866 
 867   void set_type(intptr_t k) {
 868     _pd->set_intptr_at(_base_off, k);
 869   }
 870 
 871   static int static_cell_count() {
 872     return cell_count;
 873   }
 874 
 875   static ByteSize size() {
 876     return in_ByteSize(cell_count * DataLayout::cell_size);
 877   }
 878 
 879   ByteSize type_offset() {
 880     return DataLayout::cell_offset(_base_off);
 881   }
 882 
 883   // GC support
 884   void clean_weak_klass_links(bool always_clean);
 885 
 886   void print_data_on(outputStream* st) const;
 887 };
 888 
 889 // Entries to collect type information at a call: contains arguments
 890 // (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
 891 // number of cells. Because the number of cells for the return type is
 892 // smaller than the number of cells for the type of an arguments, the
 893 // number of cells is used to tell how many arguments are profiled and
 894 // whether a return value is profiled. See has_arguments() and
 895 // has_return().
 896 class TypeEntriesAtCall {
 897 private:
 898   static int stack_slot_local_offset(int i) {
 899     return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
 900   }
 901 
 902   static int argument_type_local_offset(int i) {
 903     return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
 904   }
 905 
 906 public:
 907 
 908   static int header_cell_count() {
 909     return 1;
 910   }
 911 
 912   static int cell_count_local_offset() {
 913     return 0;
 914   }
 915 
 916   static int compute_cell_count(BytecodeStream* stream);
 917 
 918   static void initialize(DataLayout* dl, int base, int cell_count) {
 919     int off = base + cell_count_local_offset();
 920     dl->set_cell_at(off, cell_count - base - header_cell_count());
 921   }
 922 
 923   static bool arguments_profiling_enabled();
 924   static bool return_profiling_enabled();
 925 
 926   // Code generation support
 927   static ByteSize cell_count_offset() {
 928     return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
 929   }
 930 
 931   static ByteSize args_data_offset() {
 932     return in_ByteSize(header_cell_count() * DataLayout::cell_size);
 933   }
 934 
 935   static ByteSize stack_slot_offset(int i) {
 936     return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
 937   }
 938 
 939   static ByteSize argument_type_offset(int i) {
 940     return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
 941   }
 942 
 943   static ByteSize return_only_size() {
 944     return ReturnTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
 945   }
 946 
 947 };
 948 
 949 // CallTypeData
 950 //
 951 // A CallTypeData is used to access profiling information about a non
 952 // virtual call for which we collect type information about arguments
 953 // and return value.
 954 class CallTypeData : public CounterData {
 955 private:
 956   // entries for arguments if any
 957   TypeStackSlotEntries _args;
 958   // entry for return type if any
 959   ReturnTypeEntry _ret;
 960 
 961   int cell_count_global_offset() const {
 962     return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
 963   }
 964 
 965   // number of cells not counting the header
 966   int cell_count_no_header() const {
 967     return uint_at(cell_count_global_offset());
 968   }
 969 
 970   void check_number_of_arguments(int total) {
 971     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
 972   }
 973 
 974 public:
 975   CallTypeData(DataLayout* layout) :
 976     CounterData(layout),
 977     _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
 978     _ret(cell_count() - ReturnTypeEntry::static_cell_count())
 979   {
 980     assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
 981     // Some compilers (VC++) don't want this passed in member initialization list
 982     _args.set_profile_data(this);
 983     _ret.set_profile_data(this);
 984   }
 985 
 986   const TypeStackSlotEntries* args() const {
 987     assert(has_arguments(), "no profiling of arguments");
 988     return &_args;
 989   }
 990 
 991   const ReturnTypeEntry* ret() const {
 992     assert(has_return(), "no profiling of return value");
 993     return &_ret;
 994   }
 995 
 996   virtual bool is_CallTypeData() const { return true; }
 997 
 998   static int static_cell_count() {
 999     return -1;
1000   }
1001 
1002   static int compute_cell_count(BytecodeStream* stream) {
1003     return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1004   }
1005 
1006   static void initialize(DataLayout* dl, int cell_count) {
1007     TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1008   }
1009 
1010   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1011 
1012   virtual int cell_count() const {
1013     return CounterData::static_cell_count() +
1014       TypeEntriesAtCall::header_cell_count() +
1015       int_at_unchecked(cell_count_global_offset());
1016   }
1017 
1018   int number_of_arguments() const {
1019     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1020   }
1021 
1022   void set_argument_type(int i, Klass* k) {
1023     assert(has_arguments(), "no arguments!");
1024     intptr_t current = _args.type(i);
1025     _args.set_type(i, TypeEntries::with_status(k, current));
1026   }
1027 
1028   void set_return_type(Klass* k) {
1029     assert(has_return(), "no return!");
1030     intptr_t current = _ret.type();
1031     _ret.set_type(TypeEntries::with_status(k, current));
1032   }
1033 
1034   // An entry for a return value takes less space than an entry for an
1035   // argument so if the number of cells exceeds the number of cells
1036   // needed for an argument, this object contains type information for
1037   // at least one argument.
1038   bool has_arguments() const {
1039     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1040     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1041     return res;
1042   }
1043 
1044   // An entry for a return value takes less space than an entry for an
1045   // argument, so if the remainder of the number of cells divided by
1046   // the number of cells for an argument is not null, a return value
1047   // is profiled in this object.
1048   bool has_return() const {
1049     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1050     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1051     return res;
1052   }
1053 
1054   // Code generation support
1055   static ByteSize args_data_offset() {
1056     return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1057   }
1058 
1059   ByteSize argument_type_offset(int i) {
1060     return _args.type_offset(i);
1061   }
1062 
1063   ByteSize return_type_offset() {
1064     return _ret.type_offset();
1065   }
1066 
1067   // GC support
1068   virtual void clean_weak_klass_links(bool always_clean) {
1069     if (has_arguments()) {
1070       _args.clean_weak_klass_links(always_clean);
1071     }
1072     if (has_return()) {
1073       _ret.clean_weak_klass_links(always_clean);
1074     }
1075   }
1076 
1077   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1078 };
1079 
1080 // ReceiverTypeData
1081 //
1082 // A ReceiverTypeData is used to access profiling information about a
1083 // dynamic type check.  It consists of a counter which counts the total times
1084 // that the check is reached, and a series of (Klass*, count) pairs
1085 // which are used to store a type profile for the receiver of the check.
1086 class ReceiverTypeData : public CounterData {
1087   friend class VMStructs;
1088   friend class JVMCIVMStructs;
1089 protected:
1090   enum {
1091 #if INCLUDE_JVMCI
1092     // Description of the different counters
1093     // ReceiverTypeData for instanceof/checkcast/aastore:
1094     //   count is decremented for failed type checks
1095     //   JVMCI only: nonprofiled_count is incremented on type overflow
1096     // VirtualCallData for invokevirtual/invokeinterface:
1097     //   count is incremented on type overflow
1098     //   JVMCI only: nonprofiled_count is incremented on method overflow
1099 
1100     // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
1101     nonprofiled_count_off_set = counter_cell_count,
1102     receiver0_offset,
1103 #else
1104     receiver0_offset = counter_cell_count,
1105 #endif
1106     count0_offset,
1107     receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1108   };
1109 
1110 public:
1111   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1112     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1113            layout->tag() == DataLayout::virtual_call_data_tag ||
1114            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1115   }
1116 
1117   virtual bool is_ReceiverTypeData() const { return true; }
1118 
1119   static int static_cell_count() {
1120     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
1121   }
1122 
1123   virtual int cell_count() const {
1124     return static_cell_count();
1125   }
1126 
1127   // Direct accessors
1128   static uint row_limit() {
1129     return (uint) TypeProfileWidth;
1130   }
1131   static int receiver_cell_index(uint row) {
1132     return receiver0_offset + row * receiver_type_row_cell_count;
1133   }
1134   static int receiver_count_cell_index(uint row) {
1135     return count0_offset + row * receiver_type_row_cell_count;
1136   }
1137 
1138   Klass* receiver(uint row) const {
1139     assert(row < row_limit(), "oob");
1140 
1141     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1142     assert(recv == nullptr || recv->is_klass(), "wrong type");
1143     return recv;
1144   }
1145 
1146   void set_receiver(uint row, Klass* k) {
1147     assert((uint)row < row_limit(), "oob");
1148     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1149   }
1150 
1151   uint receiver_count(uint row) const {
1152     assert(row < row_limit(), "oob");
1153     return uint_at(receiver_count_cell_index(row));
1154   }
1155 
1156   void set_receiver_count(uint row, uint count) {
1157     assert(row < row_limit(), "oob");
1158     set_uint_at(receiver_count_cell_index(row), count);
1159   }
1160 
1161   void clear_row(uint row) {
1162     assert(row < row_limit(), "oob");
1163     // Clear total count - indicator of polymorphic call site.
1164     // The site may look like as monomorphic after that but
1165     // it allow to have more accurate profiling information because
1166     // there was execution phase change since klasses were unloaded.
1167     // If the site is still polymorphic then MDO will be updated
1168     // to reflect it. But it could be the case that the site becomes
1169     // only bimorphic. Then keeping total count not 0 will be wrong.
1170     // Even if we use monomorphic (when it is not) for compilation
1171     // we will only have trap, deoptimization and recompile again
1172     // with updated MDO after executing method in Interpreter.
1173     // An additional receiver will be recorded in the cleaned row
1174     // during next call execution.
1175     //
1176     // Note: our profiling logic works with empty rows in any slot.
1177     // We do sorting a profiling info (ciCallProfile) for compilation.
1178     //
1179     set_count(0);
1180     set_receiver(row, nullptr);
1181     set_receiver_count(row, 0);
1182 #if INCLUDE_JVMCI
1183     if (!this->is_VirtualCallData()) {
1184       // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
1185       // must also be reset (see "Description of the different counters" above)
1186       set_nonprofiled_count(0);
1187     }
1188 #endif
1189   }
1190 
1191   // Code generation support
1192   static ByteSize receiver_offset(uint row) {
1193     return cell_offset(receiver_cell_index(row));
1194   }
1195   static ByteSize receiver_count_offset(uint row) {
1196     return cell_offset(receiver_count_cell_index(row));
1197   }
1198 #if INCLUDE_JVMCI
1199   static ByteSize nonprofiled_receiver_count_offset() {
1200     return cell_offset(nonprofiled_count_off_set);
1201   }
1202   uint nonprofiled_count() const {
1203     return uint_at(nonprofiled_count_off_set);
1204   }
1205   void set_nonprofiled_count(uint count) {
1206     set_uint_at(nonprofiled_count_off_set, count);
1207   }
1208 #endif // INCLUDE_JVMCI
1209   static ByteSize receiver_type_data_size() {
1210     return cell_offset(static_cell_count());
1211   }
1212 
1213   // GC support
1214   virtual void clean_weak_klass_links(bool always_clean);
1215 
1216   void print_receiver_data_on(outputStream* st) const;
1217   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1218 };
1219 
1220 // VirtualCallData
1221 //
1222 // A VirtualCallData is used to access profiling information about a
1223 // virtual call.  For now, it has nothing more than a ReceiverTypeData.
1224 class VirtualCallData : public ReceiverTypeData {
1225 public:
1226   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1227     assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1228            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1229   }
1230 
1231   virtual bool is_VirtualCallData() const { return true; }
1232 
1233   static int static_cell_count() {
1234     // At this point we could add more profile state, e.g., for arguments.
1235     // But for now it's the same size as the base record type.
1236     return ReceiverTypeData::static_cell_count();
1237   }
1238 
1239   virtual int cell_count() const {
1240     return static_cell_count();
1241   }
1242 
1243   // Direct accessors
1244   static ByteSize virtual_call_data_size() {
1245     return cell_offset(static_cell_count());
1246   }
1247 
1248   void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1249   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1250 };
1251 
1252 // VirtualCallTypeData
1253 //
1254 // A VirtualCallTypeData is used to access profiling information about
1255 // a virtual call for which we collect type information about
1256 // arguments and return value.
1257 class VirtualCallTypeData : public VirtualCallData {
1258 private:
1259   // entries for arguments if any
1260   TypeStackSlotEntries _args;
1261   // entry for return type if any
1262   ReturnTypeEntry _ret;
1263 
1264   int cell_count_global_offset() const {
1265     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1266   }
1267 
1268   // number of cells not counting the header
1269   int cell_count_no_header() const {
1270     return uint_at(cell_count_global_offset());
1271   }
1272 
1273   void check_number_of_arguments(int total) {
1274     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1275   }
1276 
1277 public:
1278   VirtualCallTypeData(DataLayout* layout) :
1279     VirtualCallData(layout),
1280     _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1281     _ret(cell_count() - ReturnTypeEntry::static_cell_count())
1282   {
1283     assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1284     // Some compilers (VC++) don't want this passed in member initialization list
1285     _args.set_profile_data(this);
1286     _ret.set_profile_data(this);
1287   }
1288 
1289   const TypeStackSlotEntries* args() const {
1290     assert(has_arguments(), "no profiling of arguments");
1291     return &_args;
1292   }
1293 
1294   const ReturnTypeEntry* ret() const {
1295     assert(has_return(), "no profiling of return value");
1296     return &_ret;
1297   }
1298 
1299   virtual bool is_VirtualCallTypeData() const { return true; }
1300 
1301   static int static_cell_count() {
1302     return -1;
1303   }
1304 
1305   static int compute_cell_count(BytecodeStream* stream) {
1306     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1307   }
1308 
1309   static void initialize(DataLayout* dl, int cell_count) {
1310     TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1311   }
1312 
1313   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1314 
1315   virtual int cell_count() const {
1316     return VirtualCallData::static_cell_count() +
1317       TypeEntriesAtCall::header_cell_count() +
1318       int_at_unchecked(cell_count_global_offset());
1319   }
1320 
1321   int number_of_arguments() const {
1322     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1323   }
1324 
1325   void set_argument_type(int i, Klass* k) {
1326     assert(has_arguments(), "no arguments!");
1327     intptr_t current = _args.type(i);
1328     _args.set_type(i, TypeEntries::with_status(k, current));
1329   }
1330 
1331   void set_return_type(Klass* k) {
1332     assert(has_return(), "no return!");
1333     intptr_t current = _ret.type();
1334     _ret.set_type(TypeEntries::with_status(k, current));
1335   }
1336 
1337   // An entry for a return value takes less space than an entry for an
1338   // argument, so if the remainder of the number of cells divided by
1339   // the number of cells for an argument is not null, a return value
1340   // is profiled in this object.
1341   bool has_return() const {
1342     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1343     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1344     return res;
1345   }
1346 
1347   // An entry for a return value takes less space than an entry for an
1348   // argument so if the number of cells exceeds the number of cells
1349   // needed for an argument, this object contains type information for
1350   // at least one argument.
1351   bool has_arguments() const {
1352     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1353     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1354     return res;
1355   }
1356 
1357   // Code generation support
1358   static ByteSize args_data_offset() {
1359     return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1360   }
1361 
1362   ByteSize argument_type_offset(int i) {
1363     return _args.type_offset(i);
1364   }
1365 
1366   ByteSize return_type_offset() {
1367     return _ret.type_offset();
1368   }
1369 
1370   // GC support
1371   virtual void clean_weak_klass_links(bool always_clean) {
1372     ReceiverTypeData::clean_weak_klass_links(always_clean);
1373     if (has_arguments()) {
1374       _args.clean_weak_klass_links(always_clean);
1375     }
1376     if (has_return()) {
1377       _ret.clean_weak_klass_links(always_clean);
1378     }
1379   }
1380 
1381   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1382 };
1383 
1384 // RetData
1385 //
1386 // A RetData is used to access profiling information for a ret bytecode.
1387 // It is composed of a count of the number of times that the ret has
1388 // been executed, followed by a series of triples of the form
1389 // (bci, count, di) which count the number of times that some bci was the
1390 // target of the ret and cache a corresponding data displacement.
1391 class RetData : public CounterData {
1392 protected:
1393   enum {
1394     bci0_offset = counter_cell_count,
1395     count0_offset,
1396     displacement0_offset,
1397     ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1398   };
1399 
1400   void set_bci(uint row, int bci) {
1401     assert((uint)row < row_limit(), "oob");
1402     set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1403   }
1404   void release_set_bci(uint row, int bci);
1405   void set_bci_count(uint row, uint count) {
1406     assert((uint)row < row_limit(), "oob");
1407     set_uint_at(count0_offset + row * ret_row_cell_count, count);
1408   }
1409   void set_bci_displacement(uint row, int disp) {
1410     set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1411   }
1412 
1413 public:
1414   RetData(DataLayout* layout) : CounterData(layout) {
1415     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1416   }
1417 
1418   virtual bool is_RetData() const { return true; }
1419 
1420   enum {
1421     no_bci = -1 // value of bci when bci1/2 are not in use.
1422   };
1423 
1424   static int static_cell_count() {
1425     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1426   }
1427 
1428   virtual int cell_count() const {
1429     return static_cell_count();
1430   }
1431 
1432   static uint row_limit() {
1433     return (uint) BciProfileWidth;
1434   }
1435   static int bci_cell_index(uint row) {
1436     return bci0_offset + row * ret_row_cell_count;
1437   }
1438   static int bci_count_cell_index(uint row) {
1439     return count0_offset + row * ret_row_cell_count;
1440   }
1441   static int bci_displacement_cell_index(uint row) {
1442     return displacement0_offset + row * ret_row_cell_count;
1443   }
1444 
1445   // Direct accessors
1446   int bci(uint row) const {
1447     return int_at(bci_cell_index(row));
1448   }
1449   uint bci_count(uint row) const {
1450     return uint_at(bci_count_cell_index(row));
1451   }
1452   int bci_displacement(uint row) const {
1453     return int_at(bci_displacement_cell_index(row));
1454   }
1455 
1456   // Interpreter Runtime support
1457   address fixup_ret(int return_bci, MethodData* mdo);
1458 
1459   // Code generation support
1460   static ByteSize bci_offset(uint row) {
1461     return cell_offset(bci_cell_index(row));
1462   }
1463   static ByteSize bci_count_offset(uint row) {
1464     return cell_offset(bci_count_cell_index(row));
1465   }
1466   static ByteSize bci_displacement_offset(uint row) {
1467     return cell_offset(bci_displacement_cell_index(row));
1468   }
1469 
1470   // Specific initialization.
1471   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1472 
1473   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1474 };
1475 
1476 // BranchData
1477 //
1478 // A BranchData is used to access profiling data for a two-way branch.
1479 // It consists of taken and not_taken counts as well as a data displacement
1480 // for the taken case.
1481 class BranchData : public JumpData {
1482   friend class VMStructs;
1483   friend class JVMCIVMStructs;
1484 protected:
1485   enum {
1486     not_taken_off_set = jump_cell_count,
1487     branch_cell_count
1488   };
1489 
1490   void set_displacement(int displacement) {
1491     set_int_at(displacement_off_set, displacement);
1492   }
1493 
1494 public:
1495   BranchData(DataLayout* layout) : JumpData(layout) {
1496     assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
1497   }
1498 
1499   virtual bool is_BranchData() const { return true; }
1500 
1501   static int static_cell_count() {
1502     return branch_cell_count;
1503   }
1504 
1505   virtual int cell_count() const {
1506     return static_cell_count();
1507   }
1508 
1509   // Direct accessor
1510   uint not_taken() const {
1511     return uint_at(not_taken_off_set);
1512   }
1513 
1514   void set_not_taken(uint cnt) {
1515     set_uint_at(not_taken_off_set, cnt);
1516   }
1517 
1518   uint inc_not_taken() {
1519     uint cnt = not_taken() + 1;
1520     // Did we wrap? Will compiler screw us??
1521     if (cnt == 0) cnt--;
1522     set_uint_at(not_taken_off_set, cnt);
1523     return cnt;
1524   }
1525 
1526   // Code generation support
1527   static ByteSize not_taken_offset() {
1528     return cell_offset(not_taken_off_set);
1529   }
1530   static ByteSize branch_data_size() {
1531     return cell_offset(branch_cell_count);
1532   }
1533 
1534   // Specific initialization.
1535   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1536 
1537   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1538 };
1539 
1540 // ArrayData
1541 //
1542 // A ArrayData is a base class for accessing profiling data which does
1543 // not have a statically known size.  It consists of an array length
1544 // and an array start.
1545 class ArrayData : public ProfileData {
1546   friend class VMStructs;
1547   friend class JVMCIVMStructs;
1548 protected:
1549   friend class DataLayout;
1550 
1551   enum {
1552     array_len_off_set,
1553     array_start_off_set
1554   };
1555 
1556   uint array_uint_at(int index) const {
1557     int aindex = index + array_start_off_set;
1558     return uint_at(aindex);
1559   }
1560   int array_int_at(int index) const {
1561     int aindex = index + array_start_off_set;
1562     return int_at(aindex);
1563   }
1564   oop array_oop_at(int index) const {
1565     int aindex = index + array_start_off_set;
1566     return oop_at(aindex);
1567   }
1568   void array_set_int_at(int index, int value) {
1569     int aindex = index + array_start_off_set;
1570     set_int_at(aindex, value);
1571   }
1572 
1573   // Code generation support for subclasses.
1574   static ByteSize array_element_offset(int index) {
1575     return cell_offset(array_start_off_set + index);
1576   }
1577 
1578 public:
1579   ArrayData(DataLayout* layout) : ProfileData(layout) {}
1580 
1581   virtual bool is_ArrayData() const { return true; }
1582 
1583   static int static_cell_count() {
1584     return -1;
1585   }
1586 
1587   int array_len() const {
1588     return int_at_unchecked(array_len_off_set);
1589   }
1590 
1591   virtual int cell_count() const {
1592     return array_len() + 1;
1593   }
1594 
1595   // Code generation support
1596   static ByteSize array_len_offset() {
1597     return cell_offset(array_len_off_set);
1598   }
1599   static ByteSize array_start_offset() {
1600     return cell_offset(array_start_off_set);
1601   }
1602 };
1603 
1604 // MultiBranchData
1605 //
1606 // A MultiBranchData is used to access profiling information for
1607 // a multi-way branch (*switch bytecodes).  It consists of a series
1608 // of (count, displacement) pairs, which count the number of times each
1609 // case was taken and specify the data displacement for each branch target.
1610 class MultiBranchData : public ArrayData {
1611   friend class VMStructs;
1612   friend class JVMCIVMStructs;
1613 protected:
1614   enum {
1615     default_count_off_set,
1616     default_disaplacement_off_set,
1617     case_array_start
1618   };
1619   enum {
1620     relative_count_off_set,
1621     relative_displacement_off_set,
1622     per_case_cell_count
1623   };
1624 
1625   void set_default_displacement(int displacement) {
1626     array_set_int_at(default_disaplacement_off_set, displacement);
1627   }
1628   void set_displacement_at(int index, int displacement) {
1629     array_set_int_at(case_array_start +
1630                      index * per_case_cell_count +
1631                      relative_displacement_off_set,
1632                      displacement);
1633   }
1634 
1635 public:
1636   MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1637     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1638   }
1639 
1640   virtual bool is_MultiBranchData() const { return true; }
1641 
1642   static int compute_cell_count(BytecodeStream* stream);
1643 
1644   int number_of_cases() const {
1645     int alen = array_len() - 2; // get rid of default case here.
1646     assert(alen % per_case_cell_count == 0, "must be even");
1647     return (alen / per_case_cell_count);
1648   }
1649 
1650   uint default_count() const {
1651     return array_uint_at(default_count_off_set);
1652   }
1653   int default_displacement() const {
1654     return array_int_at(default_disaplacement_off_set);
1655   }
1656 
1657   uint count_at(int index) const {
1658     return array_uint_at(case_array_start +
1659                          index * per_case_cell_count +
1660                          relative_count_off_set);
1661   }
1662   int displacement_at(int index) const {
1663     return array_int_at(case_array_start +
1664                         index * per_case_cell_count +
1665                         relative_displacement_off_set);
1666   }
1667 
1668   // Code generation support
1669   static ByteSize default_count_offset() {
1670     return array_element_offset(default_count_off_set);
1671   }
1672   static ByteSize default_displacement_offset() {
1673     return array_element_offset(default_disaplacement_off_set);
1674   }
1675   static ByteSize case_count_offset(int index) {
1676     return case_array_offset() +
1677            (per_case_size() * index) +
1678            relative_count_offset();
1679   }
1680   static ByteSize case_array_offset() {
1681     return array_element_offset(case_array_start);
1682   }
1683   static ByteSize per_case_size() {
1684     return in_ByteSize(per_case_cell_count) * cell_size;
1685   }
1686   static ByteSize relative_count_offset() {
1687     return in_ByteSize(relative_count_off_set) * cell_size;
1688   }
1689   static ByteSize relative_displacement_offset() {
1690     return in_ByteSize(relative_displacement_off_set) * cell_size;
1691   }
1692 
1693   // Specific initialization.
1694   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1695 
1696   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1697 };
1698 
1699 class ArgInfoData : public ArrayData {
1700 
1701 public:
1702   ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1703     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1704   }
1705 
1706   virtual bool is_ArgInfoData() const { return true; }
1707 
1708 
1709   int number_of_args() const {
1710     return array_len();
1711   }
1712 
1713   uint arg_modified(int arg) const {
1714     return array_uint_at(arg);
1715   }
1716 
1717   void set_arg_modified(int arg, uint val) {
1718     array_set_int_at(arg, val);
1719   }
1720 
1721   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1722 };
1723 
1724 // ParametersTypeData
1725 //
1726 // A ParametersTypeData is used to access profiling information about
1727 // types of parameters to a method
1728 class ParametersTypeData : public ArrayData {
1729 
1730 private:
1731   TypeStackSlotEntries _parameters;
1732 
1733   static int stack_slot_local_offset(int i) {
1734     assert_profiling_enabled();
1735     return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1736   }
1737 
1738   static int type_local_offset(int i) {
1739     assert_profiling_enabled();
1740     return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1741   }
1742 
1743   static bool profiling_enabled();
1744   static void assert_profiling_enabled() {
1745     assert(profiling_enabled(), "method parameters profiling should be on");
1746   }
1747 
1748 public:
1749   ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
1750     assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
1751     // Some compilers (VC++) don't want this passed in member initialization list
1752     _parameters.set_profile_data(this);
1753   }
1754 
1755   static int compute_cell_count(Method* m);
1756 
1757   virtual bool is_ParametersTypeData() const { return true; }
1758 
1759   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1760 
1761   int number_of_parameters() const {
1762     return array_len() / TypeStackSlotEntries::per_arg_count();
1763   }
1764 
1765   const TypeStackSlotEntries* parameters() const { return &_parameters; }
1766 
1767   uint stack_slot(int i) const {
1768     return _parameters.stack_slot(i);
1769   }
1770 
1771   void set_type(int i, Klass* k) {
1772     intptr_t current = _parameters.type(i);
1773     _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1774   }
1775 
1776   virtual void clean_weak_klass_links(bool always_clean) {
1777     _parameters.clean_weak_klass_links(always_clean);
1778   }
1779 
1780   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1781 
1782   static ByteSize stack_slot_offset(int i) {
1783     return cell_offset(stack_slot_local_offset(i));
1784   }
1785 
1786   static ByteSize type_offset(int i) {
1787     return cell_offset(type_local_offset(i));
1788   }
1789 };
1790 
1791 // SpeculativeTrapData
1792 //
1793 // A SpeculativeTrapData is used to record traps due to type
1794 // speculation. It records the root of the compilation: that type
1795 // speculation is wrong in the context of one compilation (for
1796 // method1) doesn't mean it's wrong in the context of another one (for
1797 // method2). Type speculation could have more/different data in the
1798 // context of the compilation of method2 and it's worthwhile to try an
1799 // optimization that failed for compilation of method1 in the context
1800 // of compilation of method2.
1801 // Space for SpeculativeTrapData entries is allocated from the extra
1802 // data space in the MDO. If we run out of space, the trap data for
1803 // the ProfileData at that bci is updated.
1804 class SpeculativeTrapData : public ProfileData {
1805 protected:
1806   enum {
1807     speculative_trap_method,
1808 #ifndef _LP64
1809     // The size of the area for traps is a multiple of the header
1810     // size, 2 cells on 32 bits. Packed at the end of this area are
1811     // argument info entries (with tag
1812     // DataLayout::arg_info_data_tag). The logic in
1813     // MethodData::bci_to_extra_data() that guarantees traps don't
1814     // overflow over argument info entries assumes the size of a
1815     // SpeculativeTrapData is twice the header size. On 32 bits, a
1816     // SpeculativeTrapData must be 4 cells.
1817     padding,
1818 #endif
1819     speculative_trap_cell_count
1820   };
1821 public:
1822   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
1823     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
1824   }
1825 
1826   virtual bool is_SpeculativeTrapData() const { return true; }
1827 
1828   static int static_cell_count() {
1829     return speculative_trap_cell_count;
1830   }
1831 
1832   virtual int cell_count() const {
1833     return static_cell_count();
1834   }
1835 
1836   // Direct accessor
1837   Method* method() const {
1838     return (Method*)intptr_at(speculative_trap_method);
1839   }
1840 
1841   void set_method(Method* m) {
1842     assert(!m->is_old(), "cannot add old methods");
1843     set_intptr_at(speculative_trap_method, (intptr_t)m);
1844   }
1845 
1846   static ByteSize method_offset() {
1847     return cell_offset(speculative_trap_method);
1848   }
1849 
1850   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1851 };
1852 
1853 // MethodData*
1854 //
1855 // A MethodData* holds information which has been collected about
1856 // a method.  Its layout looks like this:
1857 //
1858 // -----------------------------
1859 // | header                    |
1860 // | klass                     |
1861 // -----------------------------
1862 // | method                    |
1863 // | size of the MethodData* |
1864 // -----------------------------
1865 // | Data entries...           |
1866 // |   (variable size)         |
1867 // |                           |
1868 // .                           .
1869 // .                           .
1870 // .                           .
1871 // |                           |
1872 // -----------------------------
1873 //
1874 // The data entry area is a heterogeneous array of DataLayouts. Each
1875 // DataLayout in the array corresponds to a specific bytecode in the
1876 // method.  The entries in the array are sorted by the corresponding
1877 // bytecode.  Access to the data is via resource-allocated ProfileData,
1878 // which point to the underlying blocks of DataLayout structures.
1879 //
1880 // During interpretation, if profiling in enabled, the interpreter
1881 // maintains a method data pointer (mdp), which points at the entry
1882 // in the array corresponding to the current bci.  In the course of
1883 // interpretation, when a bytecode is encountered that has profile data
1884 // associated with it, the entry pointed to by mdp is updated, then the
1885 // mdp is adjusted to point to the next appropriate DataLayout.  If mdp
1886 // is null to begin with, the interpreter assumes that the current method
1887 // is not (yet) being profiled.
1888 //
1889 // In MethodData* parlance, "dp" is a "data pointer", the actual address
1890 // of a DataLayout element.  A "di" is a "data index", the offset in bytes
1891 // from the base of the data entry array.  A "displacement" is the byte offset
1892 // in certain ProfileData objects that indicate the amount the mdp must be
1893 // adjusted in the event of a change in control flow.
1894 //
1895 
1896 class CleanExtraDataClosure : public StackObj {
1897 public:
1898   virtual bool is_live(Method* m) = 0;
1899 };
1900 
1901 
1902 #if INCLUDE_JVMCI
1903 // Encapsulates an encoded speculation reason. These are linked together in
1904 // a list that is atomically appended to during deoptimization. Entries are
1905 // never removed from the list.
1906 // @see jdk.vm.ci.hotspot.HotSpotSpeculationLog.HotSpotSpeculationEncoding
1907 class FailedSpeculation: public CHeapObj<mtCompiler> {
1908  private:
1909   // The length of HotSpotSpeculationEncoding.toByteArray(). The data itself
1910   // is an array embedded at the end of this object.
1911   int   _data_len;
1912 
1913   // Next entry in a linked list.
1914   FailedSpeculation* _next;
1915 
1916   FailedSpeculation(address data, int data_len);
1917 
1918   FailedSpeculation** next_adr() { return &_next; }
1919 
1920   // Placement new operator for inlining the speculation data into
1921   // the FailedSpeculation object.
1922   void* operator new(size_t size, size_t fs_size) throw();
1923 
1924  public:
1925   char* data()         { return (char*)(((address) this) + sizeof(FailedSpeculation)); }
1926   int data_len() const { return _data_len; }
1927   FailedSpeculation* next() const { return _next; }
1928 
1929   // Atomically appends a speculation from nm to the list whose head is at (*failed_speculations_address).
1930   // Returns false if the FailedSpeculation object could not be allocated.
1931   static bool add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len);
1932 
1933   // Frees all entries in the linked list whose head is at (*failed_speculations_address).
1934   static void free_failed_speculations(FailedSpeculation** failed_speculations_address);
1935 };
1936 #endif
1937 
1938 class ciMethodData;
1939 
1940 class MethodData : public Metadata {
1941   friend class VMStructs;
1942   friend class JVMCIVMStructs;
1943 private:
1944   friend class ProfileData;
1945   friend class TypeEntriesAtCall;
1946   friend class ciMethodData;
1947 
1948   // If you add a new field that points to any metaspace object, you
1949   // must add this field to MethodData::metaspace_pointers_do().
1950 
1951   // Back pointer to the Method*
1952   Method* _method;
1953 
1954   // Size of this oop in bytes
1955   int _size;
1956 
1957   // Cached hint for bci_to_dp and bci_to_data
1958   int _hint_di;
1959 
1960   Mutex _extra_data_lock;
1961 
1962   MethodData(const methodHandle& method);
1963 public:
1964   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
1965 
1966   virtual bool is_methodData() const { return true; }
1967   void initialize();
1968 
1969   // Whole-method sticky bits and flags
1970   enum {
1971     _trap_hist_limit    = Deoptimization::Reason_TRAP_HISTORY_LENGTH,
1972     _trap_hist_mask     = max_jubyte,
1973     _extra_data_count   = 4     // extra DataLayout headers, for trap history
1974   }; // Public flag values
1975 
1976   // Compiler-related counters.
1977   class CompilerCounters {
1978     friend class VMStructs;
1979     friend class JVMCIVMStructs;
1980 
1981     uint _nof_decompiles;             // count of all nmethod removals
1982     uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
1983     uint _nof_overflow_traps;         // trap count, excluding _trap_hist
1984     union {
1985       intptr_t _align;
1986       // JVMCI separates trap history for OSR compilations from normal compilations
1987       u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit];
1988     } _trap_hist;
1989 
1990   public:
1991     CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0) {
1992 #ifndef ZERO
1993       // Some Zero platforms do not have expected alignment, and do not use
1994       // this code. static_assert would still fire and fail for them.
1995       static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align");
1996 #endif
1997       uint size_in_words = sizeof(_trap_hist) / HeapWordSize;
1998       Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words);
1999     }
2000 
2001     // Return (uint)-1 for overflow.
2002     uint trap_count(int reason) const {
2003       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2004       return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2005     }
2006 
2007     uint inc_trap_count(int reason) {
2008       // Count another trap, anywhere in this method.
2009       assert(reason >= 0, "must be single trap");
2010       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2011       uint cnt1 = 1 + _trap_hist._array[reason];
2012       if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
2013         _trap_hist._array[reason] = (u1)cnt1;
2014         return cnt1;
2015       } else {
2016         return _trap_hist_mask + (++_nof_overflow_traps);
2017       }
2018     }
2019 
2020     uint overflow_trap_count() const {
2021       return _nof_overflow_traps;
2022     }
2023     uint overflow_recompile_count() const {
2024       return _nof_overflow_recompiles;
2025     }
2026     uint inc_overflow_recompile_count() {
2027       return ++_nof_overflow_recompiles;
2028     }
2029     uint decompile_count() const {
2030       return _nof_decompiles;
2031     }
2032     uint inc_decompile_count() {
2033       return ++_nof_decompiles;
2034     }
2035 
2036     // Support for code generation
2037     static ByteSize trap_history_offset() {
2038       return byte_offset_of(CompilerCounters, _trap_hist._array);
2039     }
2040   };
2041 
2042 private:
2043   CompilerCounters _compiler_counters;
2044 
2045   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2046   intx              _eflags;          // flags on escape information
2047   intx              _arg_local;       // bit set of non-escaping arguments
2048   intx              _arg_stack;       // bit set of stack-allocatable arguments
2049   intx              _arg_returned;    // bit set of returned arguments
2050 
2051   int               _creation_mileage; // method mileage at MDO creation
2052 
2053   // How many invocations has this MDO seen?
2054   // These counters are used to determine the exact age of MDO.
2055   // We need those because in tiered a method can be concurrently
2056   // executed at different levels.
2057   InvocationCounter _invocation_counter;
2058   // Same for backedges.
2059   InvocationCounter _backedge_counter;
2060   // Counter values at the time profiling started.
2061   int               _invocation_counter_start;
2062   int               _backedge_counter_start;
2063   uint              _tenure_traps;
2064   int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
2065   int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
2066 
2067 #if INCLUDE_RTM_OPT
2068   // State of RTM code generation during compilation of the method
2069   int               _rtm_state;
2070 #endif
2071 
2072   // Number of loops and blocks is computed when compiling the first
2073   // time with C1. It is used to determine if method is trivial.
2074   short             _num_loops;
2075   short             _num_blocks;
2076   // Does this method contain anything worth profiling?
2077   enum WouldProfile {unknown, no_profile, profile};
2078   WouldProfile      _would_profile;
2079 
2080 #if INCLUDE_JVMCI
2081   // Support for HotSpotMethodData.setCompiledIRSize(int)
2082   int                _jvmci_ir_size;
2083   FailedSpeculation* _failed_speculations;
2084 #endif
2085 
2086   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
2087   int _data_size;
2088 
2089   // data index for the area dedicated to parameters. -1 if no
2090   // parameter profiling.
2091   enum { no_parameters = -2, parameters_uninitialized = -1 };
2092   int _parameters_type_data_di;
2093 
2094   // Beginning of the data entries
2095   intptr_t _data[1];
2096 
2097   // Helper for size computation
2098   static int compute_data_size(BytecodeStream* stream);
2099   static int bytecode_cell_count(Bytecodes::Code code);
2100   static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2101   enum { no_profile_data = -1, variable_cell_count = -2 };
2102 
2103   // Helper for initialization
2104   DataLayout* data_layout_at(int data_index) const {
2105     assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2106     return (DataLayout*) (((address)_data) + data_index);
2107   }
2108 
2109   // Initialize an individual data segment.  Returns the size of
2110   // the segment in bytes.
2111   int initialize_data(BytecodeStream* stream, int data_index);
2112 
2113   // Helper for data_at
2114   DataLayout* limit_data_position() const {
2115     return data_layout_at(_data_size);
2116   }
2117   bool out_of_bounds(int data_index) const {
2118     return data_index >= data_size();
2119   }
2120 
2121   // Give each of the data entries a chance to perform specific
2122   // data initialization.
2123   void post_initialize(BytecodeStream* stream);
2124 
2125   // hint accessors
2126   int      hint_di() const  { return _hint_di; }
2127   void set_hint_di(int di)  {
2128     assert(!out_of_bounds(di), "hint_di out of bounds");
2129     _hint_di = di;
2130   }
2131 
2132   DataLayout* data_layout_before(int bci) {
2133     // avoid SEGV on this edge case
2134     if (data_size() == 0)
2135       return nullptr;
2136     DataLayout* layout = data_layout_at(hint_di());
2137     if (layout->bci() <= bci)
2138       return layout;
2139     return data_layout_at(first_di());
2140   }
2141 
2142   // What is the index of the first data entry?
2143   int first_di() const { return 0; }
2144 
2145   ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
2146   // Find or create an extra ProfileData:
2147   ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2148 
2149   // return the argument info cell
2150   ArgInfoData *arg_info();
2151 
2152   enum {
2153     no_type_profile = 0,
2154     type_profile_jsr292 = 1,
2155     type_profile_all = 2
2156   };
2157 
2158   static bool profile_jsr292(const methodHandle& m, int bci);
2159   static bool profile_unsafe(const methodHandle& m, int bci);
2160   static bool profile_memory_access(const methodHandle& m, int bci);
2161   static int profile_arguments_flag();
2162   static bool profile_all_arguments();
2163   static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2164   static int profile_return_flag();
2165   static bool profile_all_return();
2166   static bool profile_return_for_invoke(const methodHandle& m, int bci);
2167   static int profile_parameters_flag();
2168   static bool profile_parameters_jsr292_only();
2169   static bool profile_all_parameters();
2170 
2171   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2172   void verify_extra_data_clean(CleanExtraDataClosure* cl);
2173 
2174 public:
2175   void clean_extra_data(CleanExtraDataClosure* cl);
2176 
2177   static int header_size() {
2178     return sizeof(MethodData)/wordSize;
2179   }
2180 
2181   // Compute the size of a MethodData* before it is created.
2182   static int compute_allocation_size_in_bytes(const methodHandle& method);
2183   static int compute_allocation_size_in_words(const methodHandle& method);
2184   static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2185 
2186   // Determine if a given bytecode can have profile information.
2187   static bool bytecode_has_profile(Bytecodes::Code code) {
2188     return bytecode_cell_count(code) != no_profile_data;
2189   }
2190 
2191   // reset into original state
2192   void init();
2193 
2194   // My size
2195   int size_in_bytes() const { return _size; }
2196   int size() const    { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
2197 
2198   int      creation_mileage() const { return _creation_mileage; }
2199   void set_creation_mileage(int x)  { _creation_mileage = x; }
2200 
2201   int invocation_count() {
2202     if (invocation_counter()->carry()) {
2203       return InvocationCounter::count_limit;
2204     }
2205     return invocation_counter()->count();
2206   }
2207   int backedge_count() {
2208     if (backedge_counter()->carry()) {
2209       return InvocationCounter::count_limit;
2210     }
2211     return backedge_counter()->count();
2212   }
2213 
2214   int invocation_count_start() {
2215     if (invocation_counter()->carry()) {
2216       return 0;
2217     }
2218     return _invocation_counter_start;
2219   }
2220 
2221   int backedge_count_start() {
2222     if (backedge_counter()->carry()) {
2223       return 0;
2224     }
2225     return _backedge_counter_start;
2226   }
2227 
2228   int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2229   int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
2230 
2231   void reset_start_counters() {
2232     _invocation_counter_start = invocation_count();
2233     _backedge_counter_start = backedge_count();
2234   }
2235 
2236   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
2237   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
2238 
2239 #if INCLUDE_JVMCI
2240   FailedSpeculation** get_failed_speculations_address() {
2241     return &_failed_speculations;
2242   }
2243 #endif
2244 
2245 #if INCLUDE_RTM_OPT
2246   int rtm_state() const {
2247     return _rtm_state;
2248   }
2249   void set_rtm_state(RTMState rstate) {
2250     _rtm_state = (int)rstate;
2251   }
2252   void atomic_set_rtm_state(RTMState rstate) {
2253     Atomic::store(&_rtm_state, (int)rstate);
2254   }
2255 
2256   static ByteSize rtm_state_offset() {
2257     return byte_offset_of(MethodData, _rtm_state);
2258   }
2259 #endif
2260 
2261   void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
2262   bool would_profile() const                  { return _would_profile != no_profile; }
2263 
2264   int num_loops() const                       { return _num_loops;  }
2265   void set_num_loops(short n)                 { _num_loops = n;     }
2266   int num_blocks() const                      { return _num_blocks; }
2267   void set_num_blocks(short n)                { _num_blocks = n;    }
2268 
2269   bool is_mature() const;  // consult mileage and ProfileMaturityPercentage
2270   static int mileage_of(Method* m);
2271 
2272   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2273   enum EscapeFlag {
2274     estimated    = 1 << 0,
2275     return_local = 1 << 1,
2276     return_allocated = 1 << 2,
2277     allocated_escapes = 1 << 3,
2278     unknown_modified = 1 << 4
2279   };
2280 
2281   intx eflags()                                  { return _eflags; }
2282   intx arg_local()                               { return _arg_local; }
2283   intx arg_stack()                               { return _arg_stack; }
2284   intx arg_returned()                            { return _arg_returned; }
2285   uint arg_modified(int a)                       { ArgInfoData *aid = arg_info();
2286                                                    assert(aid != nullptr, "arg_info must be not null");
2287                                                    assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2288                                                    return aid->arg_modified(a); }
2289 
2290   void set_eflags(intx v)                        { _eflags = v; }
2291   void set_arg_local(intx v)                     { _arg_local = v; }
2292   void set_arg_stack(intx v)                     { _arg_stack = v; }
2293   void set_arg_returned(intx v)                  { _arg_returned = v; }
2294   void set_arg_modified(int a, uint v)           { ArgInfoData *aid = arg_info();
2295                                                    assert(aid != nullptr, "arg_info must be not null");
2296                                                    assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2297                                                    aid->set_arg_modified(a, v); }
2298 
2299   void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2300 
2301   // Location and size of data area
2302   address data_base() const {
2303     return (address) _data;
2304   }
2305   int data_size() const {
2306     return _data_size;
2307   }
2308 
2309   int parameters_size_in_bytes() const {
2310     ParametersTypeData* param = parameters_type_data();
2311     return param == nullptr ? 0 : param->size_in_bytes();
2312   }
2313 
2314   // Accessors
2315   Method* method() const { return _method; }
2316 
2317   // Get the data at an arbitrary (sort of) data index.
2318   ProfileData* data_at(int data_index) const;
2319 
2320   // Walk through the data in order.
2321   ProfileData* first_data() const { return data_at(first_di()); }
2322   ProfileData* next_data(ProfileData* current) const;
2323   DataLayout*  next_data_layout(DataLayout* current) const;
2324   bool is_valid(ProfileData* current) const { return current != nullptr; }
2325   bool is_valid(DataLayout*  current) const { return current != nullptr; }
2326 
2327   // Convert a dp (data pointer) to a di (data index).
2328   int dp_to_di(address dp) const {
2329     return (int)(dp - ((address)_data));
2330   }
2331 
2332   // bci to di/dp conversion.
2333   address bci_to_dp(int bci);
2334   int bci_to_di(int bci) {
2335     return dp_to_di(bci_to_dp(bci));
2336   }
2337 
2338   // Get the data at an arbitrary bci, or null if there is none.
2339   ProfileData* bci_to_data(int bci);
2340 
2341   // Same, but try to create an extra_data record if one is needed:
2342   ProfileData* allocate_bci_to_data(int bci, Method* m) {
2343     ProfileData* data = nullptr;
2344     // If m not null, try to allocate a SpeculativeTrapData entry
2345     if (m == nullptr) {
2346       data = bci_to_data(bci);
2347     }
2348     if (data != nullptr) {
2349       return data;
2350     }
2351     data = bci_to_extra_data(bci, m, true);
2352     if (data != nullptr) {
2353       return data;
2354     }
2355     // If SpeculativeTrapData allocation fails try to allocate a
2356     // regular entry
2357     data = bci_to_data(bci);
2358     if (data != nullptr) {
2359       return data;
2360     }
2361     return bci_to_extra_data(bci, nullptr, true);
2362   }
2363 
2364   // Add a handful of extra data records, for trap tracking.
2365   DataLayout* extra_data_base() const  { return limit_data_position(); }
2366   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2367   DataLayout* args_data_limit() const  { return (DataLayout*)((address)this + size_in_bytes() -
2368                                                               parameters_size_in_bytes()); }
2369   int extra_data_size() const          { return (int)((address)extra_data_limit() - (address)extra_data_base()); }
2370   static DataLayout* next_extra(DataLayout* dp);
2371 
2372   // Return (uint)-1 for overflow.
2373   uint trap_count(int reason) const {
2374     return _compiler_counters.trap_count(reason);
2375   }
2376   // For loops:
2377   static uint trap_reason_limit() { return _trap_hist_limit; }
2378   static uint trap_count_limit()  { return _trap_hist_mask; }
2379   uint inc_trap_count(int reason) {
2380     return _compiler_counters.inc_trap_count(reason);
2381   }
2382 
2383   uint overflow_trap_count() const {
2384     return _compiler_counters.overflow_trap_count();
2385   }
2386   uint overflow_recompile_count() const {
2387     return _compiler_counters.overflow_recompile_count();
2388   }
2389   uint inc_overflow_recompile_count() {
2390     return _compiler_counters.inc_overflow_recompile_count();
2391   }
2392   uint decompile_count() const {
2393     return _compiler_counters.decompile_count();
2394   }
2395   uint inc_decompile_count() {
2396     uint dec_count = _compiler_counters.inc_decompile_count();
2397     if (dec_count > (uint)PerMethodRecompilationCutoff) {
2398       method()->set_not_compilable("decompile_count > PerMethodRecompilationCutoff", CompLevel_full_optimization);
2399     }
2400     return dec_count;
2401   }
2402   uint tenure_traps() const {
2403     return _tenure_traps;
2404   }
2405   void inc_tenure_traps() {
2406     _tenure_traps += 1;
2407   }
2408 
2409   // Return pointer to area dedicated to parameters in MDO
2410   ParametersTypeData* parameters_type_data() const {
2411     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2412     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : nullptr;
2413   }
2414 
2415   int parameters_type_data_di() const {
2416     assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2417     return _parameters_type_data_di;
2418   }
2419 
2420   // Support for code generation
2421   static ByteSize data_offset() {
2422     return byte_offset_of(MethodData, _data[0]);
2423   }
2424 
2425   static ByteSize trap_history_offset() {
2426     return byte_offset_of(MethodData, _compiler_counters) + CompilerCounters::trap_history_offset();
2427   }
2428 
2429   static ByteSize invocation_counter_offset() {
2430     return byte_offset_of(MethodData, _invocation_counter);
2431   }
2432 
2433   static ByteSize backedge_counter_offset() {
2434     return byte_offset_of(MethodData, _backedge_counter);
2435   }
2436 
2437   static ByteSize invoke_mask_offset() {
2438     return byte_offset_of(MethodData, _invoke_mask);
2439   }
2440 
2441   static ByteSize backedge_mask_offset() {
2442     return byte_offset_of(MethodData, _backedge_mask);
2443   }
2444 
2445   static ByteSize parameters_type_data_di_offset() {
2446     return byte_offset_of(MethodData, _parameters_type_data_di);
2447   }
2448 
2449   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
2450   virtual MetaspaceObj::Type type() const { return MethodDataType; }
2451 
2452   // Deallocation support
2453   void deallocate_contents(ClassLoaderData* loader_data);
2454   void release_C_heap_structures();
2455 
2456   // GC support
2457   void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2458 
2459   // Printing
2460   void print_on      (outputStream* st) const;
2461   void print_value_on(outputStream* st) const;
2462 
2463   // printing support for method data
2464   void print_data_on(outputStream* st) const;
2465 
2466   const char* internal_name() const { return "{method data}"; }
2467 
2468   // verification
2469   void verify_on(outputStream* st);
2470   void verify_data_on(outputStream* st);
2471 
2472   static bool profile_parameters_for_method(const methodHandle& m);
2473   static bool profile_arguments();
2474   static bool profile_arguments_jsr292_only();
2475   static bool profile_return();
2476   static bool profile_parameters();
2477   static bool profile_return_jsr292_only();
2478 
2479   void clean_method_data(bool always_clean);
2480   void clean_weak_method_links();
2481   Mutex* extra_data_lock() { return &_extra_data_lock; }
2482 };
2483 
2484 #endif // SHARE_OOPS_METHODDATA_HPP