1 /*
   2  * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_METHODDATA_HPP
  26 #define SHARE_OOPS_METHODDATA_HPP
  27 
  28 #include "interpreter/bytecodes.hpp"
  29 #include "interpreter/invocationCounter.hpp"
  30 #include "oops/metadata.hpp"
  31 #include "oops/method.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/deoptimization.hpp"
  34 #include "runtime/mutex.hpp"
  35 #include "utilities/align.hpp"
  36 #include "utilities/copy.hpp"
  37 
  38 class BytecodeStream;
  39 
  40 // The MethodData object collects counts and other profile information
  41 // during zeroth-tier (interpreter) and third-tier (C1 with full profiling)
  42 // execution.
  43 //
  44 // The profile is used later by compilation heuristics.  Some heuristics
  45 // enable use of aggressive (or "heroic") optimizations.  An aggressive
  46 // optimization often has a down-side, a corner case that it handles
  47 // poorly, but which is thought to be rare.  The profile provides
  48 // evidence of this rarity for a given method or even BCI.  It allows
  49 // the compiler to back out of the optimization at places where it
  50 // has historically been a poor choice.  Other heuristics try to use
  51 // specific information gathered about types observed at a given site.
  52 //
  53 // All data in the profile is approximate.  It is expected to be accurate
  54 // on the whole, but the system expects occasional inaccuraces, due to
  55 // counter overflow, multiprocessor races during data collection, space
  56 // limitations, missing MDO blocks, etc.  Bad or missing data will degrade
  57 // optimization quality but will not affect correctness.  Also, each MDO
  58 // can be checked for its "maturity" by calling is_mature().
  59 //
  60 // Short (<32-bit) counters are designed to overflow to a known "saturated"
  61 // state.  Also, certain recorded per-BCI events are given one-bit counters
  62 // which overflow to a saturated state which applied to all counters at
  63 // that BCI.  In other words, there is a small lattice which approximates
  64 // the ideal of an infinite-precision counter for each event at each BCI,
  65 // and the lattice quickly "bottoms out" in a state where all counters
  66 // are taken to be indefinitely large.
  67 //
  68 // The reader will find many data races in profile gathering code, starting
  69 // with invocation counter incrementation.  None of these races harm correct
  70 // execution of the compiled code.
  71 
  72 // forward decl
  73 class ProfileData;
  74 
  75 // DataLayout
  76 //
  77 // Overlay for generic profiling data.
  78 class DataLayout {
  79   friend class VMStructs;
  80   friend class JVMCIVMStructs;
  81 
  82 private:
  83   // Every data layout begins with a header.  This header
  84   // contains a tag, which is used to indicate the size/layout
  85   // of the data, 8 bits of flags, which can be used in any way,
  86   // 32 bits of trap history (none/one reason/many reasons),
  87   // and a bci, which is used to tie this piece of data to a
  88   // specific bci in the bytecodes.
  89   union {
  90     u8 _bits;
  91     struct {
  92       u1 _tag;
  93       u1 _flags;
  94       u2 _bci;
  95       u4 _traps;
  96     } _struct;
  97   } _header;
  98 
  99   // The data layout has an arbitrary number of cells, each sized
 100   // to accommodate a pointer or an integer.
 101   intptr_t _cells[1];
 102 
 103   // Some types of data layouts need a length field.
 104   static bool needs_array_len(u1 tag);
 105 
 106 public:
 107   enum {
 108     counter_increment = 1
 109   };
 110 
 111   enum {
 112     cell_size = sizeof(intptr_t)
 113   };
 114 
 115   // Tag values
 116   enum : u1 {
 117     no_tag,
 118     bit_data_tag,
 119     counter_data_tag,
 120     jump_data_tag,
 121     receiver_type_data_tag,
 122     virtual_call_data_tag,
 123     ret_data_tag,
 124     branch_data_tag,
 125     multi_branch_data_tag,
 126     arg_info_data_tag,
 127     call_type_data_tag,
 128     virtual_call_type_data_tag,
 129     parameters_type_data_tag,
 130     speculative_trap_data_tag,
 131     array_store_data_tag,
 132     array_load_data_tag,
 133     acmp_data_tag
 134   };
 135 
 136   enum {
 137     // The trap state breaks down as [recompile:1 | reason:31].
 138     // This further breakdown is defined in deoptimization.cpp.
 139     // See Deoptimization::trap_state_reason for an assert that
 140     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 141     //
 142     // The trap_state is collected only if ProfileTraps is true.
 143     trap_bits = 1+31,  // 31: enough to distinguish [0..Reason_RECORDED_LIMIT].
 144     trap_mask = -1,
 145     first_flag = 0
 146   };
 147 
 148   // Size computation
 149   static int header_size_in_bytes() {
 150     return header_size_in_cells() * cell_size;
 151   }
 152   static int header_size_in_cells() {
 153     return LP64_ONLY(1) NOT_LP64(2);
 154   }
 155 
 156   static int compute_size_in_bytes(int cell_count) {
 157     return header_size_in_bytes() + cell_count * cell_size;
 158   }
 159 
 160   // Initialization
 161   void initialize(u1 tag, u2 bci, int cell_count);
 162 
 163   // Accessors
 164   u1 tag() {
 165     return _header._struct._tag;
 166   }
 167 
 168   // Return 32 bits of trap state.
 169   // The state tells if traps with zero, one, or many reasons have occurred.
 170   // It also tells whether zero or many recompilations have occurred.
 171   // The associated trap histogram in the MDO itself tells whether
 172   // traps are common or not.  If a BCI shows that a trap X has
 173   // occurred, and the MDO shows N occurrences of X, we make the
 174   // simplifying assumption that all N occurrences can be blamed
 175   // on that BCI.
 176   uint trap_state() const {
 177     return _header._struct._traps;
 178   }
 179 
 180   void set_trap_state(uint new_state) {
 181     assert(ProfileTraps, "used only under +ProfileTraps");
 182     uint old_flags = _header._struct._traps;
 183     _header._struct._traps = new_state | old_flags;
 184   }
 185 
 186   u1 flags() const {
 187     return Atomic::load_acquire(&_header._struct._flags);
 188   }
 189 
 190   u2 bci() const {
 191     return _header._struct._bci;
 192   }
 193 
 194   void set_header(u8 value) {
 195     _header._bits = value;
 196   }
 197   u8 header() {
 198     return _header._bits;
 199   }
 200   void set_cell_at(int index, intptr_t value) {
 201     _cells[index] = value;
 202   }
 203   void release_set_cell_at(int index, intptr_t value);
 204   intptr_t cell_at(int index) const {
 205     return _cells[index];
 206   }
 207   intptr_t* cell_at_adr(int index) const {
 208     return const_cast<intptr_t*>(&_cells[index]);
 209   }
 210 
 211   bool set_flag_at(u1 flag_number) {
 212     const u1 bit = 1 << flag_number;
 213     u1 compare_value;
 214     do {
 215       compare_value = _header._struct._flags;
 216       if ((compare_value & bit) == bit) {
 217         // already set.
 218         return false;
 219       }
 220     } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, static_cast<u1>(compare_value | bit)));
 221     return true;
 222   }
 223 
 224   bool clear_flag_at(u1 flag_number) {
 225     const u1 bit = 1 << flag_number;
 226     u1 compare_value;
 227     u1 exchange_value;
 228     do {
 229       compare_value = _header._struct._flags;
 230       if ((compare_value & bit) == 0) {
 231         // already cleaed.
 232         return false;
 233       }
 234       exchange_value = compare_value & ~bit;
 235     } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, exchange_value));
 236     return true;
 237   }
 238 
 239   bool flag_at(u1 flag_number) const {
 240     return (flags() & (1 << flag_number)) != 0;
 241   }
 242 
 243   // Low-level support for code generation.
 244   static ByteSize header_offset() {
 245     return byte_offset_of(DataLayout, _header);
 246   }
 247   static ByteSize tag_offset() {
 248     return byte_offset_of(DataLayout, _header._struct._tag);
 249   }
 250   static ByteSize flags_offset() {
 251     return byte_offset_of(DataLayout, _header._struct._flags);
 252   }
 253   static ByteSize bci_offset() {
 254     return byte_offset_of(DataLayout, _header._struct._bci);
 255   }
 256   static ByteSize cell_offset(int index) {
 257     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 258   }
 259   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 260   static u1 flag_number_to_constant(u1 flag_number) {
 261     DataLayout temp; temp.set_header(0);
 262     temp.set_flag_at(flag_number);
 263     return temp._header._struct._flags;
 264   }
 265   // Return a value which, when or-ed as a word into _header, sets the flag.
 266   static u8 flag_mask_to_header_mask(u1 byte_constant) {
 267     DataLayout temp; temp.set_header(0);
 268     temp._header._struct._flags = byte_constant;
 269     return temp._header._bits;
 270   }
 271 
 272   ProfileData* data_in();
 273 
 274   int size_in_bytes() {
 275     int cells = cell_count();
 276     assert(cells >= 0, "invalid number of cells");
 277     return DataLayout::compute_size_in_bytes(cells);
 278   }
 279   int cell_count();
 280 
 281   // GC support
 282   void clean_weak_klass_links(bool always_clean);
 283 };
 284 
 285 
 286 // ProfileData class hierarchy
 287 class ProfileData;
 288 class   BitData;
 289 class     CounterData;
 290 class       ReceiverTypeData;
 291 class         VirtualCallData;
 292 class           VirtualCallTypeData;
 293 class         ArrayStoreData;
 294 class       RetData;
 295 class       CallTypeData;
 296 class   JumpData;
 297 class     BranchData;
 298 class       ACmpData;
 299 class   ArrayData;
 300 class     MultiBranchData;
 301 class     ArgInfoData;
 302 class     ParametersTypeData;
 303 class   SpeculativeTrapData;
 304 class   ArrayLoadData;
 305 
 306 // ProfileData
 307 //
 308 // A ProfileData object is created to refer to a section of profiling
 309 // data in a structured way.
 310 class ProfileData : public ResourceObj {
 311   friend class TypeEntries;
 312   friend class SingleTypeEntry;
 313   friend class TypeStackSlotEntries;
 314 private:
 315   enum {
 316     tab_width_one = 16,
 317     tab_width_two = 36
 318   };
 319 
 320   // This is a pointer to a section of profiling data.
 321   DataLayout* _data;
 322 
 323   char* print_data_on_helper(const MethodData* md) const;
 324 
 325 protected:
 326   DataLayout* data() { return _data; }
 327   const DataLayout* data() const { return _data; }
 328 
 329   enum {
 330     cell_size = DataLayout::cell_size
 331   };
 332 
 333 public:
 334   // How many cells are in this?
 335   virtual int cell_count() const {
 336     ShouldNotReachHere();
 337     return -1;
 338   }
 339 
 340   // Return the size of this data.
 341   int size_in_bytes() {
 342     return DataLayout::compute_size_in_bytes(cell_count());
 343   }
 344 
 345 protected:
 346   // Low-level accessors for underlying data
 347   void set_intptr_at(int index, intptr_t value) {
 348     assert(0 <= index && index < cell_count(), "oob");
 349     data()->set_cell_at(index, value);
 350   }
 351   void release_set_intptr_at(int index, intptr_t value);
 352   intptr_t intptr_at(int index) const {
 353     assert(0 <= index && index < cell_count(), "oob");
 354     return data()->cell_at(index);
 355   }
 356   intptr_t* intptr_at_adr(int index) const {
 357     assert(0 <= index && index < cell_count(), "oob");
 358     return data()->cell_at_adr(index);
 359   }
 360   void set_uint_at(int index, uint value) {
 361     set_intptr_at(index, (intptr_t) value);
 362   }
 363   void release_set_uint_at(int index, uint value);
 364   uint uint_at(int index) const {
 365     return (uint)intptr_at(index);
 366   }
 367   void set_int_at(int index, int value) {
 368     set_intptr_at(index, (intptr_t) value);
 369   }
 370   void release_set_int_at(int index, int value);
 371   int int_at(int index) const {
 372     return (int)intptr_at(index);
 373   }
 374   int int_at_unchecked(int index) const {
 375     return (int)data()->cell_at(index);
 376   }
 377 
 378   void set_flag_at(u1 flag_number) {
 379     data()->set_flag_at(flag_number);
 380   }
 381   bool flag_at(u1 flag_number) const {
 382     return data()->flag_at(flag_number);
 383   }
 384 
 385   // two convenient imports for use by subclasses:
 386   static ByteSize cell_offset(int index) {
 387     return DataLayout::cell_offset(index);
 388   }
 389   static u1 flag_number_to_constant(u1 flag_number) {
 390     return DataLayout::flag_number_to_constant(flag_number);
 391   }
 392 
 393   ProfileData(DataLayout* data) {
 394     _data = data;
 395   }
 396 
 397 public:
 398   // Constructor for invalid ProfileData.
 399   ProfileData();
 400 
 401   u2 bci() const {
 402     return data()->bci();
 403   }
 404 
 405   address dp() {
 406     return (address)_data;
 407   }
 408 
 409   int trap_state() const {
 410     return data()->trap_state();
 411   }
 412   void set_trap_state(int new_state) {
 413     data()->set_trap_state(new_state);
 414   }
 415 
 416   // Type checking
 417   virtual bool is_BitData()         const { return false; }
 418   virtual bool is_CounterData()     const { return false; }
 419   virtual bool is_JumpData()        const { return false; }
 420   virtual bool is_ReceiverTypeData()const { return false; }
 421   virtual bool is_VirtualCallData() const { return false; }
 422   virtual bool is_RetData()         const { return false; }
 423   virtual bool is_BranchData()      const { return false; }
 424   virtual bool is_ArrayData()       const { return false; }
 425   virtual bool is_MultiBranchData() const { return false; }
 426   virtual bool is_ArgInfoData()     const { return false; }
 427   virtual bool is_CallTypeData()    const { return false; }
 428   virtual bool is_VirtualCallTypeData()const { return false; }
 429   virtual bool is_ParametersTypeData() const { return false; }
 430   virtual bool is_SpeculativeTrapData()const { return false; }
 431   virtual bool is_ArrayStoreData() const { return false; }
 432   virtual bool is_ArrayLoadData() const { return false; }
 433   virtual bool is_ACmpData()           const { return false; }
 434 
 435 
 436   BitData* as_BitData() const {
 437     assert(is_BitData(), "wrong type");
 438     return is_BitData()         ? (BitData*)        this : nullptr;
 439   }
 440   CounterData* as_CounterData() const {
 441     assert(is_CounterData(), "wrong type");
 442     return is_CounterData()     ? (CounterData*)    this : nullptr;
 443   }
 444   JumpData* as_JumpData() const {
 445     assert(is_JumpData(), "wrong type");
 446     return is_JumpData()        ? (JumpData*)       this : nullptr;
 447   }
 448   ReceiverTypeData* as_ReceiverTypeData() const {
 449     assert(is_ReceiverTypeData(), "wrong type");
 450     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : nullptr;
 451   }
 452   VirtualCallData* as_VirtualCallData() const {
 453     assert(is_VirtualCallData(), "wrong type");
 454     return is_VirtualCallData() ? (VirtualCallData*)this : nullptr;
 455   }
 456   RetData* as_RetData() const {
 457     assert(is_RetData(), "wrong type");
 458     return is_RetData()         ? (RetData*)        this : nullptr;
 459   }
 460   BranchData* as_BranchData() const {
 461     assert(is_BranchData(), "wrong type");
 462     return is_BranchData()      ? (BranchData*)     this : nullptr;
 463   }
 464   ArrayData* as_ArrayData() const {
 465     assert(is_ArrayData(), "wrong type");
 466     return is_ArrayData()       ? (ArrayData*)      this : nullptr;
 467   }
 468   MultiBranchData* as_MultiBranchData() const {
 469     assert(is_MultiBranchData(), "wrong type");
 470     return is_MultiBranchData() ? (MultiBranchData*)this : nullptr;
 471   }
 472   ArgInfoData* as_ArgInfoData() const {
 473     assert(is_ArgInfoData(), "wrong type");
 474     return is_ArgInfoData() ? (ArgInfoData*)this : nullptr;
 475   }
 476   CallTypeData* as_CallTypeData() const {
 477     assert(is_CallTypeData(), "wrong type");
 478     return is_CallTypeData() ? (CallTypeData*)this : nullptr;
 479   }
 480   VirtualCallTypeData* as_VirtualCallTypeData() const {
 481     assert(is_VirtualCallTypeData(), "wrong type");
 482     return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr;
 483   }
 484   ParametersTypeData* as_ParametersTypeData() const {
 485     assert(is_ParametersTypeData(), "wrong type");
 486     return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr;
 487   }
 488   SpeculativeTrapData* as_SpeculativeTrapData() const {
 489     assert(is_SpeculativeTrapData(), "wrong type");
 490     return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr;
 491   }
 492   ArrayStoreData* as_ArrayStoreData() const {
 493     assert(is_ArrayStoreData(), "wrong type");
 494     return is_ArrayStoreData() ? (ArrayStoreData*)this : nullptr;
 495   }
 496   ArrayLoadData* as_ArrayLoadData() const {
 497     assert(is_ArrayLoadData(), "wrong type");
 498     return is_ArrayLoadData() ? (ArrayLoadData*)this : nullptr;
 499   }
 500   ACmpData* as_ACmpData() const {
 501     assert(is_ACmpData(), "wrong type");
 502     return is_ACmpData() ? (ACmpData*)this : nullptr;
 503   }
 504 
 505 
 506   // Subclass specific initialization
 507   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
 508 
 509   // GC support
 510   virtual void clean_weak_klass_links(bool always_clean) {}
 511 
 512   // CDS support
 513   virtual void metaspace_pointers_do(MetaspaceClosure* it) {}
 514 
 515     // CI translation: ProfileData can represent both MethodDataOop data
 516   // as well as CIMethodData data. This function is provided for translating
 517   // an oop in a ProfileData to the ci equivalent. Generally speaking,
 518   // most ProfileData don't require any translation, so we provide the null
 519   // translation here, and the required translators are in the ci subclasses.
 520   virtual void translate_from(const ProfileData* data) {}
 521 
 522   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const {
 523     ShouldNotReachHere();
 524   }
 525 
 526   void print_data_on(outputStream* st, const MethodData* md) const;
 527 
 528   void print_shared(outputStream* st, const char* name, const char* extra) const;
 529   void tab(outputStream* st, bool first = false) const;
 530 };
 531 
 532 // BitData
 533 //
 534 // A BitData holds a flag or two in its header.
 535 class BitData : public ProfileData {
 536   friend class VMStructs;
 537   friend class JVMCIVMStructs;
 538 protected:
 539   enum : u1 {
 540     // null_seen:
 541     //  saw a null operand (cast/aastore/instanceof)
 542       null_seen_flag                  = DataLayout::first_flag + 0,
 543       exception_handler_entered_flag  = null_seen_flag + 1,
 544       deprecated_method_callsite_flag = exception_handler_entered_flag + 1
 545 #if INCLUDE_JVMCI
 546     // bytecode threw any exception
 547     , exception_seen_flag             = deprecated_method_callsite_flag + 1
 548 #endif
 549     , last_bit_data_flag
 550   };
 551   enum { bit_cell_count = 0 };  // no additional data fields needed.
 552 public:
 553   BitData(DataLayout* layout) : ProfileData(layout) {
 554   }
 555 
 556   virtual bool is_BitData() const { return true; }
 557 
 558   static int static_cell_count() {
 559     return bit_cell_count;
 560   }
 561 
 562   virtual int cell_count() const {
 563     return static_cell_count();
 564   }
 565 
 566   // Accessor
 567 
 568   // The null_seen flag bit is specially known to the interpreter.
 569   // Consulting it allows the compiler to avoid setting up null_check traps.
 570   bool null_seen() const  { return flag_at(null_seen_flag); }
 571   void set_null_seen()    { set_flag_at(null_seen_flag); }
 572   bool deprecated_method_call_site() const { return flag_at(deprecated_method_callsite_flag); }
 573   bool set_deprecated_method_call_site() { return data()->set_flag_at(deprecated_method_callsite_flag); }
 574   bool clear_deprecated_method_call_site() { return data()->clear_flag_at(deprecated_method_callsite_flag); }
 575 
 576 #if INCLUDE_JVMCI
 577   // true if an exception was thrown at the specific BCI
 578   bool exception_seen() { return flag_at(exception_seen_flag); }
 579   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 580 #endif
 581 
 582   // true if a ex handler block at this bci was entered
 583   bool exception_handler_entered() { return flag_at(exception_handler_entered_flag); }
 584   void set_exception_handler_entered() { set_flag_at(exception_handler_entered_flag); }
 585 
 586   // Code generation support
 587   static u1 null_seen_byte_constant() {
 588     return flag_number_to_constant(null_seen_flag);
 589   }
 590 
 591   static ByteSize bit_data_size() {
 592     return cell_offset(bit_cell_count);
 593   }
 594 
 595   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 596 };
 597 
 598 // CounterData
 599 //
 600 // A CounterData corresponds to a simple counter.
 601 class CounterData : public BitData {
 602   friend class VMStructs;
 603   friend class JVMCIVMStructs;
 604 protected:
 605   enum {
 606     count_off,
 607     counter_cell_count
 608   };
 609 public:
 610   CounterData(DataLayout* layout) : BitData(layout) {}
 611 
 612   virtual bool is_CounterData() const { return true; }
 613 
 614   static int static_cell_count() {
 615     return counter_cell_count;
 616   }
 617 
 618   virtual int cell_count() const {
 619     return static_cell_count();
 620   }
 621 
 622   // Direct accessor
 623   int count() const {
 624     intptr_t raw_data = intptr_at(count_off);
 625     if (raw_data > max_jint) {
 626       raw_data = max_jint;
 627     } else if (raw_data < min_jint) {
 628       raw_data = min_jint;
 629     }
 630     return int(raw_data);
 631   }
 632 
 633   // Code generation support
 634   static ByteSize count_offset() {
 635     return cell_offset(count_off);
 636   }
 637   static ByteSize counter_data_size() {
 638     return cell_offset(counter_cell_count);
 639   }
 640 
 641   void set_count(int count) {
 642     set_int_at(count_off, count);
 643   }
 644 
 645   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 646 };
 647 
 648 // JumpData
 649 //
 650 // A JumpData is used to access profiling information for a direct
 651 // branch.  It is a counter, used for counting the number of branches,
 652 // plus a data displacement, used for realigning the data pointer to
 653 // the corresponding target bci.
 654 class JumpData : public ProfileData {
 655   friend class VMStructs;
 656   friend class JVMCIVMStructs;
 657 protected:
 658   enum {
 659     taken_off_set,
 660     displacement_off_set,
 661     jump_cell_count
 662   };
 663 
 664   void set_displacement(int displacement) {
 665     set_int_at(displacement_off_set, displacement);
 666   }
 667 
 668 public:
 669   JumpData(DataLayout* layout) : ProfileData(layout) {
 670     assert(layout->tag() == DataLayout::jump_data_tag ||
 671       layout->tag() == DataLayout::branch_data_tag ||
 672       layout->tag() == DataLayout::acmp_data_tag, "wrong type");
 673   }
 674 
 675   virtual bool is_JumpData() const { return true; }
 676 
 677   static int static_cell_count() {
 678     return jump_cell_count;
 679   }
 680 
 681   virtual int cell_count() const {
 682     return static_cell_count();
 683   }
 684 
 685   // Direct accessor
 686   uint taken() const {
 687     return uint_at(taken_off_set);
 688   }
 689 
 690   void set_taken(uint cnt) {
 691     set_uint_at(taken_off_set, cnt);
 692   }
 693 
 694   // Saturating counter
 695   uint inc_taken() {
 696     uint cnt = taken() + 1;
 697     // Did we wrap? Will compiler screw us??
 698     if (cnt == 0) cnt--;
 699     set_uint_at(taken_off_set, cnt);
 700     return cnt;
 701   }
 702 
 703   int displacement() const {
 704     return int_at(displacement_off_set);
 705   }
 706 
 707   // Code generation support
 708   static ByteSize taken_offset() {
 709     return cell_offset(taken_off_set);
 710   }
 711 
 712   static ByteSize displacement_offset() {
 713     return cell_offset(displacement_off_set);
 714   }
 715 
 716   // Specific initialization.
 717   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 718 
 719   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 720 };
 721 
 722 // Entries in a ProfileData object to record types: it can either be
 723 // none (no profile), unknown (conflicting profile data) or a klass if
 724 // a single one is seen. Whether a null reference was seen is also
 725 // recorded. No counter is associated with the type and a single type
 726 // is tracked (unlike VirtualCallData).
 727 class TypeEntries {
 728 
 729 public:
 730 
 731   // A single cell is used to record information for a type:
 732   // - the cell is initialized to 0
 733   // - when a type is discovered it is stored in the cell
 734   // - bit zero of the cell is used to record whether a null reference
 735   // was encountered or not
 736   // - bit 1 is set to record a conflict in the type information
 737 
 738   enum {
 739     null_seen = 1,
 740     type_mask = ~null_seen,
 741     type_unknown = 2,
 742     status_bits = null_seen | type_unknown,
 743     type_klass_mask = ~status_bits
 744   };
 745 
 746   // what to initialize a cell to
 747   static intptr_t type_none() {
 748     return 0;
 749   }
 750 
 751   // null seen = bit 0 set?
 752   static bool was_null_seen(intptr_t v) {
 753     return (v & null_seen) != 0;
 754   }
 755 
 756   // conflicting type information = bit 1 set?
 757   static bool is_type_unknown(intptr_t v) {
 758     return (v & type_unknown) != 0;
 759   }
 760 
 761   // not type information yet = all bits cleared, ignoring bit 0?
 762   static bool is_type_none(intptr_t v) {
 763     return (v & type_mask) == 0;
 764   }
 765 
 766   // recorded type: cell without bit 0 and 1
 767   static intptr_t klass_part(intptr_t v) {
 768     intptr_t r = v & type_klass_mask;
 769     return r;
 770   }
 771 
 772   // type recorded
 773   static Klass* valid_klass(intptr_t k) {
 774     if (!is_type_none(k) &&
 775         !is_type_unknown(k)) {
 776       Klass* res = (Klass*)klass_part(k);
 777       assert(res != nullptr, "invalid");
 778       return res;
 779     } else {
 780       return nullptr;
 781     }
 782   }
 783 
 784   static intptr_t with_status(intptr_t k, intptr_t in) {
 785     return k | (in & status_bits);
 786   }
 787 
 788   static intptr_t with_status(Klass* k, intptr_t in) {
 789     return with_status((intptr_t)k, in);
 790   }
 791 
 792   static void print_klass(outputStream* st, intptr_t k);
 793 
 794 protected:
 795   // ProfileData object these entries are part of
 796   ProfileData* _pd;
 797   // offset within the ProfileData object where the entries start
 798   const int _base_off;
 799 
 800   TypeEntries(int base_off)
 801     : _pd(nullptr), _base_off(base_off) {}
 802 
 803   void set_intptr_at(int index, intptr_t value) {
 804     _pd->set_intptr_at(index, value);
 805   }
 806 
 807   intptr_t intptr_at(int index) const {
 808     return _pd->intptr_at(index);
 809   }
 810 
 811 public:
 812   void set_profile_data(ProfileData* pd) {
 813     _pd = pd;
 814   }
 815 };
 816 
 817 // Type entries used for arguments passed at a call and parameters on
 818 // method entry. 2 cells per entry: one for the type encoded as in
 819 // TypeEntries and one initialized with the stack slot where the
 820 // profiled object is to be found so that the interpreter can locate
 821 // it quickly.
 822 class TypeStackSlotEntries : public TypeEntries {
 823 
 824 private:
 825   enum {
 826     stack_slot_entry,
 827     type_entry,
 828     per_arg_cell_count
 829   };
 830 
 831   // offset of cell for stack slot for entry i within ProfileData object
 832   int stack_slot_offset(int i) const {
 833     return _base_off + stack_slot_local_offset(i);
 834   }
 835 
 836   const int _number_of_entries;
 837 
 838   // offset of cell for type for entry i within ProfileData object
 839   int type_offset_in_cells(int i) const {
 840     return _base_off + type_local_offset(i);
 841   }
 842 
 843 public:
 844 
 845   TypeStackSlotEntries(int base_off, int nb_entries)
 846     : TypeEntries(base_off), _number_of_entries(nb_entries) {}
 847 
 848   static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
 849 
 850   void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
 851 
 852   int number_of_entries() const { return _number_of_entries; }
 853 
 854   // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
 855   static int stack_slot_local_offset(int i) {
 856     return i * per_arg_cell_count + stack_slot_entry;
 857   }
 858 
 859   // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
 860   static int type_local_offset(int i) {
 861     return i * per_arg_cell_count + type_entry;
 862   }
 863 
 864   // stack slot for entry i
 865   uint stack_slot(int i) const {
 866     assert(i >= 0 && i < _number_of_entries, "oob");
 867     return _pd->uint_at(stack_slot_offset(i));
 868   }
 869 
 870   // set stack slot for entry i
 871   void set_stack_slot(int i, uint num) {
 872     assert(i >= 0 && i < _number_of_entries, "oob");
 873     _pd->set_uint_at(stack_slot_offset(i), num);
 874   }
 875 
 876   // type for entry i
 877   intptr_t type(int i) const {
 878     assert(i >= 0 && i < _number_of_entries, "oob");
 879     return _pd->intptr_at(type_offset_in_cells(i));
 880   }
 881 
 882   intptr_t* type_adr(int i) const {
 883     assert(i >= 0 && i < _number_of_entries, "oob");
 884     return _pd->intptr_at_adr(type_offset_in_cells(i));
 885   }
 886 
 887   // set type for entry i
 888   void set_type(int i, intptr_t k) {
 889     assert(i >= 0 && i < _number_of_entries, "oob");
 890     _pd->set_intptr_at(type_offset_in_cells(i), k);
 891   }
 892 
 893   static ByteSize per_arg_size() {
 894     return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
 895   }
 896 
 897   static int per_arg_count() {
 898     return per_arg_cell_count;
 899   }
 900 
 901   ByteSize type_offset(int i) const {
 902     return DataLayout::cell_offset(type_offset_in_cells(i));
 903   }
 904 
 905   // GC support
 906   void clean_weak_klass_links(bool always_clean);
 907 
 908   // CDS support
 909   virtual void metaspace_pointers_do(MetaspaceClosure* it);
 910 
 911   void print_data_on(outputStream* st) const;
 912 };
 913 
 914 // Type entry used for return from a call. A single cell to record the
 915 // type.
 916 class SingleTypeEntry : public TypeEntries {
 917 
 918 private:
 919   enum {
 920     cell_count = 1
 921   };
 922 
 923 public:
 924   SingleTypeEntry(int base_off)
 925     : TypeEntries(base_off) {}
 926 
 927   void post_initialize() {
 928     set_type(type_none());
 929   }
 930 
 931   intptr_t type() const {
 932     return _pd->intptr_at(_base_off);
 933   }
 934 
 935   intptr_t* type_adr() const {
 936     return _pd->intptr_at_adr(_base_off);
 937   }
 938 
 939   void set_type(intptr_t k) {
 940     _pd->set_intptr_at(_base_off, k);
 941   }
 942 
 943   static int static_cell_count() {
 944     return cell_count;
 945   }
 946 
 947   static ByteSize size() {
 948     return in_ByteSize(cell_count * DataLayout::cell_size);
 949   }
 950 
 951   ByteSize type_offset() {
 952     return DataLayout::cell_offset(_base_off);
 953   }
 954 
 955   // GC support
 956   void clean_weak_klass_links(bool always_clean);
 957 
 958   // CDS support
 959   virtual void metaspace_pointers_do(MetaspaceClosure* it);
 960 
 961   void print_data_on(outputStream* st) const;
 962 };
 963 
 964 // Entries to collect type information at a call: contains arguments
 965 // (TypeStackSlotEntries), a return type (SingleTypeEntry) and a
 966 // number of cells. Because the number of cells for the return type is
 967 // smaller than the number of cells for the type of an arguments, the
 968 // number of cells is used to tell how many arguments are profiled and
 969 // whether a return value is profiled. See has_arguments() and
 970 // has_return().
 971 class TypeEntriesAtCall {
 972 private:
 973   static int stack_slot_local_offset(int i) {
 974     return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
 975   }
 976 
 977   static int argument_type_local_offset(int i) {
 978     return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
 979   }
 980 
 981 public:
 982 
 983   static int header_cell_count() {
 984     return 1;
 985   }
 986 
 987   static int cell_count_local_offset() {
 988     return 0;
 989   }
 990 
 991   static int compute_cell_count(BytecodeStream* stream);
 992 
 993   static void initialize(DataLayout* dl, int base, int cell_count) {
 994     int off = base + cell_count_local_offset();
 995     dl->set_cell_at(off, cell_count - base - header_cell_count());
 996   }
 997 
 998   static bool arguments_profiling_enabled();
 999   static bool return_profiling_enabled();
1000 
1001   // Code generation support
1002   static ByteSize cell_count_offset() {
1003     return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
1004   }
1005 
1006   static ByteSize args_data_offset() {
1007     return in_ByteSize(header_cell_count() * DataLayout::cell_size);
1008   }
1009 
1010   static ByteSize stack_slot_offset(int i) {
1011     return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
1012   }
1013 
1014   static ByteSize argument_type_offset(int i) {
1015     return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
1016   }
1017 
1018   static ByteSize return_only_size() {
1019     return SingleTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
1020   }
1021 
1022 };
1023 
1024 // CallTypeData
1025 //
1026 // A CallTypeData is used to access profiling information about a non
1027 // virtual call for which we collect type information about arguments
1028 // and return value.
1029 class CallTypeData : public CounterData {
1030 private:
1031   // entries for arguments if any
1032   TypeStackSlotEntries _args;
1033   // entry for return type if any
1034   SingleTypeEntry _ret;
1035 
1036   int cell_count_global_offset() const {
1037     return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1038   }
1039 
1040   // number of cells not counting the header
1041   int cell_count_no_header() const {
1042     return uint_at(cell_count_global_offset());
1043   }
1044 
1045   void check_number_of_arguments(int total) {
1046     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1047   }
1048 
1049 public:
1050   CallTypeData(DataLayout* layout) :
1051     CounterData(layout),
1052     _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1053     _ret(cell_count() - SingleTypeEntry::static_cell_count())
1054   {
1055     assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
1056     // Some compilers (VC++) don't want this passed in member initialization list
1057     _args.set_profile_data(this);
1058     _ret.set_profile_data(this);
1059   }
1060 
1061   const TypeStackSlotEntries* args() const {
1062     assert(has_arguments(), "no profiling of arguments");
1063     return &_args;
1064   }
1065 
1066   const SingleTypeEntry* ret() const {
1067     assert(has_return(), "no profiling of return value");
1068     return &_ret;
1069   }
1070 
1071   virtual bool is_CallTypeData() const { return true; }
1072 
1073   static int static_cell_count() {
1074     return -1;
1075   }
1076 
1077   static int compute_cell_count(BytecodeStream* stream) {
1078     return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1079   }
1080 
1081   static void initialize(DataLayout* dl, int cell_count) {
1082     TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1083   }
1084 
1085   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1086 
1087   virtual int cell_count() const {
1088     return CounterData::static_cell_count() +
1089       TypeEntriesAtCall::header_cell_count() +
1090       int_at_unchecked(cell_count_global_offset());
1091   }
1092 
1093   int number_of_arguments() const {
1094     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1095   }
1096 
1097   void set_argument_type(int i, Klass* k) {
1098     assert(has_arguments(), "no arguments!");
1099     intptr_t current = _args.type(i);
1100     _args.set_type(i, TypeEntries::with_status(k, current));
1101   }
1102 
1103   void set_return_type(Klass* k) {
1104     assert(has_return(), "no return!");
1105     intptr_t current = _ret.type();
1106     _ret.set_type(TypeEntries::with_status(k, current));
1107   }
1108 
1109   // An entry for a return value takes less space than an entry for an
1110   // argument so if the number of cells exceeds the number of cells
1111   // needed for an argument, this object contains type information for
1112   // at least one argument.
1113   bool has_arguments() const {
1114     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1115     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1116     return res;
1117   }
1118 
1119   // An entry for a return value takes less space than an entry for an
1120   // argument, so if the remainder of the number of cells divided by
1121   // the number of cells for an argument is not null, a return value
1122   // is profiled in this object.
1123   bool has_return() const {
1124     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1125     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1126     return res;
1127   }
1128 
1129   // Code generation support
1130   static ByteSize args_data_offset() {
1131     return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1132   }
1133 
1134   ByteSize argument_type_offset(int i) {
1135     return _args.type_offset(i);
1136   }
1137 
1138   ByteSize return_type_offset() {
1139     return _ret.type_offset();
1140   }
1141 
1142   // GC support
1143   virtual void clean_weak_klass_links(bool always_clean) {
1144     if (has_arguments()) {
1145       _args.clean_weak_klass_links(always_clean);
1146     }
1147     if (has_return()) {
1148       _ret.clean_weak_klass_links(always_clean);
1149     }
1150   }
1151 
1152   // CDS support
1153   virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1154     if (has_arguments()) {
1155       _args.metaspace_pointers_do(it);
1156     }
1157     if (has_return()) {
1158       _ret.metaspace_pointers_do(it);
1159     }
1160   }
1161 
1162   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1163 };
1164 
1165 // ReceiverTypeData
1166 //
1167 // A ReceiverTypeData is used to access profiling information about a
1168 // dynamic type check.  It consists of a series of (Klass*, count)
1169 // pairs which are used to store a type profile for the receiver of
1170 // the check, the associated count is incremented every time the type
1171 // is seen. A per ReceiverTypeData counter is incremented on type
1172 // overflow (when there's no more room for a not yet profiled Klass*).
1173 //
1174 class ReceiverTypeData : public CounterData {
1175   friend class VMStructs;
1176   friend class JVMCIVMStructs;
1177 protected:
1178   enum {
1179     receiver0_offset = counter_cell_count,
1180     count0_offset,
1181     receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1182   };
1183 
1184 public:
1185   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1186     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1187            layout->tag() == DataLayout::virtual_call_data_tag ||
1188            layout->tag() == DataLayout::virtual_call_type_data_tag ||
1189            layout->tag() == DataLayout::array_store_data_tag, "wrong type");
1190   }
1191 
1192   virtual bool is_ReceiverTypeData() const { return true; }
1193 
1194   static int static_cell_count() {
1195     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
1196   }
1197 
1198   virtual int cell_count() const {
1199     return static_cell_count();
1200   }
1201 
1202   // Direct accessors
1203   static uint row_limit() {
1204     return (uint) TypeProfileWidth;
1205   }
1206   static int receiver_cell_index(uint row) {
1207     return receiver0_offset + row * receiver_type_row_cell_count;
1208   }
1209   static int receiver_count_cell_index(uint row) {
1210     return count0_offset + row * receiver_type_row_cell_count;
1211   }
1212 
1213   Klass* receiver(uint row) const {
1214     assert(row < row_limit(), "oob");
1215 
1216     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1217     assert(recv == nullptr || recv->is_klass(), "wrong type");
1218     return recv;
1219   }
1220 
1221   void set_receiver(uint row, Klass* k) {
1222     assert((uint)row < row_limit(), "oob");
1223     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1224   }
1225 
1226   uint receiver_count(uint row) const {
1227     assert(row < row_limit(), "oob");
1228     return uint_at(receiver_count_cell_index(row));
1229   }
1230 
1231   void set_receiver_count(uint row, uint count) {
1232     assert(row < row_limit(), "oob");
1233     set_uint_at(receiver_count_cell_index(row), count);
1234   }
1235 
1236   void clear_row(uint row) {
1237     assert(row < row_limit(), "oob");
1238     // Clear total count - indicator of polymorphic call site.
1239     // The site may look like as monomorphic after that but
1240     // it allow to have more accurate profiling information because
1241     // there was execution phase change since klasses were unloaded.
1242     // If the site is still polymorphic then MDO will be updated
1243     // to reflect it. But it could be the case that the site becomes
1244     // only bimorphic. Then keeping total count not 0 will be wrong.
1245     // Even if we use monomorphic (when it is not) for compilation
1246     // we will only have trap, deoptimization and recompile again
1247     // with updated MDO after executing method in Interpreter.
1248     // An additional receiver will be recorded in the cleaned row
1249     // during next call execution.
1250     //
1251     // Note: our profiling logic works with empty rows in any slot.
1252     // We do sorting a profiling info (ciCallProfile) for compilation.
1253     //
1254     set_count(0);
1255     set_receiver(row, nullptr);
1256     set_receiver_count(row, 0);
1257   }
1258 
1259   // Code generation support
1260   static ByteSize receiver_offset(uint row) {
1261     return cell_offset(receiver_cell_index(row));
1262   }
1263   static ByteSize receiver_count_offset(uint row) {
1264     return cell_offset(receiver_count_cell_index(row));
1265   }
1266   static ByteSize receiver_type_data_size() {
1267     return cell_offset(static_cell_count());
1268   }
1269 
1270   // GC support
1271   virtual void clean_weak_klass_links(bool always_clean);
1272 
1273   // CDS support
1274   virtual void metaspace_pointers_do(MetaspaceClosure* it);
1275 
1276   void print_receiver_data_on(outputStream* st) const;
1277   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1278 };
1279 
1280 // VirtualCallData
1281 //
1282 // A VirtualCallData is used to access profiling information about a
1283 // virtual call.  For now, it has nothing more than a ReceiverTypeData.
1284 class VirtualCallData : public ReceiverTypeData {
1285 public:
1286   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1287     assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1288            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1289   }
1290 
1291   virtual bool is_VirtualCallData() const { return true; }
1292 
1293   static int static_cell_count() {
1294     // At this point we could add more profile state, e.g., for arguments.
1295     // But for now it's the same size as the base record type.
1296     return ReceiverTypeData::static_cell_count();
1297   }
1298 
1299   virtual int cell_count() const {
1300     return static_cell_count();
1301   }
1302 
1303   // Direct accessors
1304   static ByteSize virtual_call_data_size() {
1305     return cell_offset(static_cell_count());
1306   }
1307 
1308   void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1309   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1310 };
1311 
1312 // VirtualCallTypeData
1313 //
1314 // A VirtualCallTypeData is used to access profiling information about
1315 // a virtual call for which we collect type information about
1316 // arguments and return value.
1317 class VirtualCallTypeData : public VirtualCallData {
1318 private:
1319   // entries for arguments if any
1320   TypeStackSlotEntries _args;
1321   // entry for return type if any
1322   SingleTypeEntry _ret;
1323 
1324   int cell_count_global_offset() const {
1325     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1326   }
1327 
1328   // number of cells not counting the header
1329   int cell_count_no_header() const {
1330     return uint_at(cell_count_global_offset());
1331   }
1332 
1333   void check_number_of_arguments(int total) {
1334     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1335   }
1336 
1337 public:
1338   VirtualCallTypeData(DataLayout* layout) :
1339     VirtualCallData(layout),
1340     _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1341     _ret(cell_count() - SingleTypeEntry::static_cell_count())
1342   {
1343     assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1344     // Some compilers (VC++) don't want this passed in member initialization list
1345     _args.set_profile_data(this);
1346     _ret.set_profile_data(this);
1347   }
1348 
1349   const TypeStackSlotEntries* args() const {
1350     assert(has_arguments(), "no profiling of arguments");
1351     return &_args;
1352   }
1353 
1354   const SingleTypeEntry* ret() const {
1355     assert(has_return(), "no profiling of return value");
1356     return &_ret;
1357   }
1358 
1359   virtual bool is_VirtualCallTypeData() const { return true; }
1360 
1361   static int static_cell_count() {
1362     return -1;
1363   }
1364 
1365   static int compute_cell_count(BytecodeStream* stream) {
1366     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1367   }
1368 
1369   static void initialize(DataLayout* dl, int cell_count) {
1370     TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1371   }
1372 
1373   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1374 
1375   virtual int cell_count() const {
1376     return VirtualCallData::static_cell_count() +
1377       TypeEntriesAtCall::header_cell_count() +
1378       int_at_unchecked(cell_count_global_offset());
1379   }
1380 
1381   int number_of_arguments() const {
1382     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1383   }
1384 
1385   void set_argument_type(int i, Klass* k) {
1386     assert(has_arguments(), "no arguments!");
1387     intptr_t current = _args.type(i);
1388     _args.set_type(i, TypeEntries::with_status(k, current));
1389   }
1390 
1391   void set_return_type(Klass* k) {
1392     assert(has_return(), "no return!");
1393     intptr_t current = _ret.type();
1394     _ret.set_type(TypeEntries::with_status(k, current));
1395   }
1396 
1397   // An entry for a return value takes less space than an entry for an
1398   // argument, so if the remainder of the number of cells divided by
1399   // the number of cells for an argument is not null, a return value
1400   // is profiled in this object.
1401   bool has_return() const {
1402     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1403     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1404     return res;
1405   }
1406 
1407   // An entry for a return value takes less space than an entry for an
1408   // argument so if the number of cells exceeds the number of cells
1409   // needed for an argument, this object contains type information for
1410   // at least one argument.
1411   bool has_arguments() const {
1412     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1413     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1414     return res;
1415   }
1416 
1417   // Code generation support
1418   static ByteSize args_data_offset() {
1419     return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1420   }
1421 
1422   ByteSize argument_type_offset(int i) {
1423     return _args.type_offset(i);
1424   }
1425 
1426   ByteSize return_type_offset() {
1427     return _ret.type_offset();
1428   }
1429 
1430   // GC support
1431   virtual void clean_weak_klass_links(bool always_clean) {
1432     ReceiverTypeData::clean_weak_klass_links(always_clean);
1433     if (has_arguments()) {
1434       _args.clean_weak_klass_links(always_clean);
1435     }
1436     if (has_return()) {
1437       _ret.clean_weak_klass_links(always_clean);
1438     }
1439   }
1440 
1441   // CDS support
1442   virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1443     ReceiverTypeData::metaspace_pointers_do(it);
1444     if (has_arguments()) {
1445       _args.metaspace_pointers_do(it);
1446     }
1447     if (has_return()) {
1448       _ret.metaspace_pointers_do(it);
1449     }
1450   }
1451 
1452   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1453 };
1454 
1455 // RetData
1456 //
1457 // A RetData is used to access profiling information for a ret bytecode.
1458 // It is composed of a count of the number of times that the ret has
1459 // been executed, followed by a series of triples of the form
1460 // (bci, count, di) which count the number of times that some bci was the
1461 // target of the ret and cache a corresponding data displacement.
1462 class RetData : public CounterData {
1463 protected:
1464   enum {
1465     bci0_offset = counter_cell_count,
1466     count0_offset,
1467     displacement0_offset,
1468     ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1469   };
1470 
1471   void set_bci(uint row, int bci) {
1472     assert((uint)row < row_limit(), "oob");
1473     set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1474   }
1475   void release_set_bci(uint row, int bci);
1476   void set_bci_count(uint row, uint count) {
1477     assert((uint)row < row_limit(), "oob");
1478     set_uint_at(count0_offset + row * ret_row_cell_count, count);
1479   }
1480   void set_bci_displacement(uint row, int disp) {
1481     set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1482   }
1483 
1484 public:
1485   RetData(DataLayout* layout) : CounterData(layout) {
1486     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1487   }
1488 
1489   virtual bool is_RetData() const { return true; }
1490 
1491   enum {
1492     no_bci = -1 // value of bci when bci1/2 are not in use.
1493   };
1494 
1495   static int static_cell_count() {
1496     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1497   }
1498 
1499   virtual int cell_count() const {
1500     return static_cell_count();
1501   }
1502 
1503   static uint row_limit() {
1504     return (uint) BciProfileWidth;
1505   }
1506   static int bci_cell_index(uint row) {
1507     return bci0_offset + row * ret_row_cell_count;
1508   }
1509   static int bci_count_cell_index(uint row) {
1510     return count0_offset + row * ret_row_cell_count;
1511   }
1512   static int bci_displacement_cell_index(uint row) {
1513     return displacement0_offset + row * ret_row_cell_count;
1514   }
1515 
1516   // Direct accessors
1517   int bci(uint row) const {
1518     return int_at(bci_cell_index(row));
1519   }
1520   uint bci_count(uint row) const {
1521     return uint_at(bci_count_cell_index(row));
1522   }
1523   int bci_displacement(uint row) const {
1524     return int_at(bci_displacement_cell_index(row));
1525   }
1526 
1527   // Interpreter Runtime support
1528   address fixup_ret(int return_bci, MethodData* mdo);
1529 
1530   // Code generation support
1531   static ByteSize bci_offset(uint row) {
1532     return cell_offset(bci_cell_index(row));
1533   }
1534   static ByteSize bci_count_offset(uint row) {
1535     return cell_offset(bci_count_cell_index(row));
1536   }
1537   static ByteSize bci_displacement_offset(uint row) {
1538     return cell_offset(bci_displacement_cell_index(row));
1539   }
1540 
1541   // Specific initialization.
1542   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1543 
1544   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1545 };
1546 
1547 // BranchData
1548 //
1549 // A BranchData is used to access profiling data for a two-way branch.
1550 // It consists of taken and not_taken counts as well as a data displacement
1551 // for the taken case.
1552 class BranchData : public JumpData {
1553   friend class VMStructs;
1554   friend class JVMCIVMStructs;
1555 protected:
1556   enum {
1557     not_taken_off_set = jump_cell_count,
1558     branch_cell_count
1559   };
1560 
1561   void set_displacement(int displacement) {
1562     set_int_at(displacement_off_set, displacement);
1563   }
1564 
1565 public:
1566   BranchData(DataLayout* layout) : JumpData(layout) {
1567     assert(layout->tag() == DataLayout::branch_data_tag || layout->tag() == DataLayout::acmp_data_tag, "wrong type");
1568   }
1569 
1570   virtual bool is_BranchData() const { return true; }
1571 
1572   static int static_cell_count() {
1573     return branch_cell_count;
1574   }
1575 
1576   virtual int cell_count() const {
1577     return static_cell_count();
1578   }
1579 
1580   // Direct accessor
1581   uint not_taken() const {
1582     return uint_at(not_taken_off_set);
1583   }
1584 
1585   void set_not_taken(uint cnt) {
1586     set_uint_at(not_taken_off_set, cnt);
1587   }
1588 
1589   uint inc_not_taken() {
1590     uint cnt = not_taken() + 1;
1591     // Did we wrap? Will compiler screw us??
1592     if (cnt == 0) cnt--;
1593     set_uint_at(not_taken_off_set, cnt);
1594     return cnt;
1595   }
1596 
1597   // Code generation support
1598   static ByteSize not_taken_offset() {
1599     return cell_offset(not_taken_off_set);
1600   }
1601   static ByteSize branch_data_size() {
1602     return cell_offset(branch_cell_count);
1603   }
1604 
1605   // Specific initialization.
1606   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1607 
1608   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1609 };
1610 
1611 // ArrayData
1612 //
1613 // A ArrayData is a base class for accessing profiling data which does
1614 // not have a statically known size.  It consists of an array length
1615 // and an array start.
1616 class ArrayData : public ProfileData {
1617   friend class VMStructs;
1618   friend class JVMCIVMStructs;
1619 protected:
1620   friend class DataLayout;
1621 
1622   enum {
1623     array_len_off_set,
1624     array_start_off_set
1625   };
1626 
1627   uint array_uint_at(int index) const {
1628     int aindex = index + array_start_off_set;
1629     return uint_at(aindex);
1630   }
1631   int array_int_at(int index) const {
1632     int aindex = index + array_start_off_set;
1633     return int_at(aindex);
1634   }
1635   void array_set_int_at(int index, int value) {
1636     int aindex = index + array_start_off_set;
1637     set_int_at(aindex, value);
1638   }
1639 
1640   // Code generation support for subclasses.
1641   static ByteSize array_element_offset(int index) {
1642     return cell_offset(array_start_off_set + index);
1643   }
1644 
1645 public:
1646   ArrayData(DataLayout* layout) : ProfileData(layout) {}
1647 
1648   virtual bool is_ArrayData() const { return true; }
1649 
1650   static int static_cell_count() {
1651     return -1;
1652   }
1653 
1654   int array_len() const {
1655     return int_at_unchecked(array_len_off_set);
1656   }
1657 
1658   virtual int cell_count() const {
1659     return array_len() + 1;
1660   }
1661 
1662   // Code generation support
1663   static ByteSize array_len_offset() {
1664     return cell_offset(array_len_off_set);
1665   }
1666   static ByteSize array_start_offset() {
1667     return cell_offset(array_start_off_set);
1668   }
1669 };
1670 
1671 // MultiBranchData
1672 //
1673 // A MultiBranchData is used to access profiling information for
1674 // a multi-way branch (*switch bytecodes).  It consists of a series
1675 // of (count, displacement) pairs, which count the number of times each
1676 // case was taken and specify the data displacement for each branch target.
1677 class MultiBranchData : public ArrayData {
1678   friend class VMStructs;
1679   friend class JVMCIVMStructs;
1680 protected:
1681   enum {
1682     default_count_off_set,
1683     default_disaplacement_off_set,
1684     case_array_start
1685   };
1686   enum {
1687     relative_count_off_set,
1688     relative_displacement_off_set,
1689     per_case_cell_count
1690   };
1691 
1692   void set_default_displacement(int displacement) {
1693     array_set_int_at(default_disaplacement_off_set, displacement);
1694   }
1695   void set_displacement_at(int index, int displacement) {
1696     array_set_int_at(case_array_start +
1697                      index * per_case_cell_count +
1698                      relative_displacement_off_set,
1699                      displacement);
1700   }
1701 
1702 public:
1703   MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1704     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1705   }
1706 
1707   virtual bool is_MultiBranchData() const { return true; }
1708 
1709   static int compute_cell_count(BytecodeStream* stream);
1710 
1711   int number_of_cases() const {
1712     int alen = array_len() - 2; // get rid of default case here.
1713     assert(alen % per_case_cell_count == 0, "must be even");
1714     return (alen / per_case_cell_count);
1715   }
1716 
1717   uint default_count() const {
1718     return array_uint_at(default_count_off_set);
1719   }
1720   int default_displacement() const {
1721     return array_int_at(default_disaplacement_off_set);
1722   }
1723 
1724   uint count_at(int index) const {
1725     return array_uint_at(case_array_start +
1726                          index * per_case_cell_count +
1727                          relative_count_off_set);
1728   }
1729   int displacement_at(int index) const {
1730     return array_int_at(case_array_start +
1731                         index * per_case_cell_count +
1732                         relative_displacement_off_set);
1733   }
1734 
1735   // Code generation support
1736   static ByteSize default_count_offset() {
1737     return array_element_offset(default_count_off_set);
1738   }
1739   static ByteSize default_displacement_offset() {
1740     return array_element_offset(default_disaplacement_off_set);
1741   }
1742   static ByteSize case_count_offset(int index) {
1743     return case_array_offset() +
1744            (per_case_size() * index) +
1745            relative_count_offset();
1746   }
1747   static ByteSize case_array_offset() {
1748     return array_element_offset(case_array_start);
1749   }
1750   static ByteSize per_case_size() {
1751     return in_ByteSize(per_case_cell_count) * cell_size;
1752   }
1753   static ByteSize relative_count_offset() {
1754     return in_ByteSize(relative_count_off_set) * cell_size;
1755   }
1756   static ByteSize relative_displacement_offset() {
1757     return in_ByteSize(relative_displacement_off_set) * cell_size;
1758   }
1759 
1760   // Specific initialization.
1761   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1762 
1763   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1764 };
1765 
1766 class ArgInfoData : public ArrayData {
1767 
1768 public:
1769   ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1770     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1771   }
1772 
1773   virtual bool is_ArgInfoData() const { return true; }
1774 
1775 
1776   int number_of_args() const {
1777     return array_len();
1778   }
1779 
1780   uint arg_modified(int arg) const {
1781     return array_uint_at(arg);
1782   }
1783 
1784   void set_arg_modified(int arg, uint val) {
1785     array_set_int_at(arg, val);
1786   }
1787 
1788   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1789 };
1790 
1791 // ParametersTypeData
1792 //
1793 // A ParametersTypeData is used to access profiling information about
1794 // types of parameters to a method
1795 class ParametersTypeData : public ArrayData {
1796 
1797 private:
1798   TypeStackSlotEntries _parameters;
1799 
1800   static int stack_slot_local_offset(int i) {
1801     assert_profiling_enabled();
1802     return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1803   }
1804 
1805   static int type_local_offset(int i) {
1806     assert_profiling_enabled();
1807     return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1808   }
1809 
1810   static bool profiling_enabled();
1811   static void assert_profiling_enabled() {
1812     assert(profiling_enabled(), "method parameters profiling should be on");
1813   }
1814 
1815 public:
1816   ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
1817     assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
1818     // Some compilers (VC++) don't want this passed in member initialization list
1819     _parameters.set_profile_data(this);
1820   }
1821 
1822   static int compute_cell_count(Method* m);
1823 
1824   virtual bool is_ParametersTypeData() const { return true; }
1825 
1826   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1827 
1828   int number_of_parameters() const {
1829     return array_len() / TypeStackSlotEntries::per_arg_count();
1830   }
1831 
1832   const TypeStackSlotEntries* parameters() const { return &_parameters; }
1833 
1834   uint stack_slot(int i) const {
1835     return _parameters.stack_slot(i);
1836   }
1837 
1838   void set_type(int i, Klass* k) {
1839     intptr_t current = _parameters.type(i);
1840     _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1841   }
1842 
1843   virtual void clean_weak_klass_links(bool always_clean) {
1844     _parameters.clean_weak_klass_links(always_clean);
1845   }
1846 
1847   // CDS support
1848   virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1849     _parameters.metaspace_pointers_do(it);
1850   }
1851 
1852   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1853 
1854   static ByteSize stack_slot_offset(int i) {
1855     return cell_offset(stack_slot_local_offset(i));
1856   }
1857 
1858   static ByteSize type_offset(int i) {
1859     return cell_offset(type_local_offset(i));
1860   }
1861 };
1862 
1863 // SpeculativeTrapData
1864 //
1865 // A SpeculativeTrapData is used to record traps due to type
1866 // speculation. It records the root of the compilation: that type
1867 // speculation is wrong in the context of one compilation (for
1868 // method1) doesn't mean it's wrong in the context of another one (for
1869 // method2). Type speculation could have more/different data in the
1870 // context of the compilation of method2 and it's worthwhile to try an
1871 // optimization that failed for compilation of method1 in the context
1872 // of compilation of method2.
1873 // Space for SpeculativeTrapData entries is allocated from the extra
1874 // data space in the MDO. If we run out of space, the trap data for
1875 // the ProfileData at that bci is updated.
1876 class SpeculativeTrapData : public ProfileData {
1877 protected:
1878   enum {
1879     speculative_trap_method,
1880 #ifndef _LP64
1881     // The size of the area for traps is a multiple of the header
1882     // size, 2 cells on 32 bits. Packed at the end of this area are
1883     // argument info entries (with tag
1884     // DataLayout::arg_info_data_tag). The logic in
1885     // MethodData::bci_to_extra_data() that guarantees traps don't
1886     // overflow over argument info entries assumes the size of a
1887     // SpeculativeTrapData is twice the header size. On 32 bits, a
1888     // SpeculativeTrapData must be 4 cells.
1889     padding,
1890 #endif
1891     speculative_trap_cell_count
1892   };
1893 public:
1894   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
1895     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
1896   }
1897 
1898   virtual bool is_SpeculativeTrapData() const { return true; }
1899 
1900   static int static_cell_count() {
1901     return speculative_trap_cell_count;
1902   }
1903 
1904   virtual int cell_count() const {
1905     return static_cell_count();
1906   }
1907 
1908   // Direct accessor
1909   Method* method() const {
1910     return (Method*)intptr_at(speculative_trap_method);
1911   }
1912 
1913   void set_method(Method* m) {
1914     assert(!m->is_old(), "cannot add old methods");
1915     set_intptr_at(speculative_trap_method, (intptr_t)m);
1916   }
1917 
1918   static ByteSize method_offset() {
1919     return cell_offset(speculative_trap_method);
1920   }
1921 
1922   // CDS support
1923   virtual void metaspace_pointers_do(MetaspaceClosure* it);
1924 
1925   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1926 };
1927 
1928 class ArrayStoreData : public ReceiverTypeData {
1929 private:
1930   enum {
1931     flat_array_flag = BitData::last_bit_data_flag,
1932     null_free_array_flag = flat_array_flag + 1,
1933   };
1934 
1935   SingleTypeEntry _array;
1936 
1937 public:
1938   ArrayStoreData(DataLayout* layout) :
1939     ReceiverTypeData(layout),
1940     _array(ReceiverTypeData::static_cell_count()) {
1941     assert(layout->tag() == DataLayout::array_store_data_tag, "wrong type");
1942     _array.set_profile_data(this);
1943   }
1944 
1945   const SingleTypeEntry* array() const {
1946     return &_array;
1947   }
1948 
1949   virtual bool is_ArrayStoreData() const { return true; }
1950 
1951   static int static_cell_count() {
1952     return ReceiverTypeData::static_cell_count() + SingleTypeEntry::static_cell_count();
1953   }
1954 
1955   virtual int cell_count() const {
1956     return static_cell_count();
1957   }
1958 
1959   void set_flat_array() { set_flag_at(flat_array_flag); }
1960   bool flat_array() const { return flag_at(flat_array_flag); }
1961 
1962   void set_null_free_array() { set_flag_at(null_free_array_flag); }
1963   bool null_free_array() const { return flag_at(null_free_array_flag); }
1964 
1965   // Code generation support
1966   static int flat_array_byte_constant() {
1967     return flag_number_to_constant(flat_array_flag);
1968   }
1969 
1970   static int null_free_array_byte_constant() {
1971     return flag_number_to_constant(null_free_array_flag);
1972   }
1973 
1974   static ByteSize array_offset() {
1975     return cell_offset(ReceiverTypeData::static_cell_count());
1976   }
1977 
1978   virtual void clean_weak_klass_links(bool always_clean) {
1979     ReceiverTypeData::clean_weak_klass_links(always_clean);
1980     _array.clean_weak_klass_links(always_clean);
1981   }
1982 
1983   virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1984     ReceiverTypeData::metaspace_pointers_do(it);
1985     _array.metaspace_pointers_do(it);
1986   }
1987 
1988   static ByteSize array_store_data_size() {
1989     return cell_offset(static_cell_count());
1990   }
1991 
1992   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1993 };
1994 
1995 class ArrayLoadData : public ProfileData {
1996 private:
1997   enum {
1998     flat_array_flag = DataLayout::first_flag,
1999     null_free_array_flag = flat_array_flag + 1,
2000   };
2001 
2002   SingleTypeEntry _array;
2003   SingleTypeEntry _element;
2004 
2005 public:
2006   ArrayLoadData(DataLayout* layout) :
2007     ProfileData(layout),
2008     _array(0),
2009     _element(SingleTypeEntry::static_cell_count()) {
2010     assert(layout->tag() == DataLayout::array_load_data_tag, "wrong type");
2011     _array.set_profile_data(this);
2012     _element.set_profile_data(this);
2013   }
2014 
2015   const SingleTypeEntry* array() const {
2016     return &_array;
2017   }
2018 
2019   const SingleTypeEntry* element() const {
2020     return &_element;
2021   }
2022 
2023   virtual bool is_ArrayLoadData() const { return true; }
2024 
2025   static int static_cell_count() {
2026     return SingleTypeEntry::static_cell_count() * 2;
2027   }
2028 
2029   virtual int cell_count() const {
2030     return static_cell_count();
2031   }
2032 
2033   void set_flat_array() { set_flag_at(flat_array_flag); }
2034   bool flat_array() const { return flag_at(flat_array_flag); }
2035 
2036   void set_null_free_array() { set_flag_at(null_free_array_flag); }
2037   bool null_free_array() const { return flag_at(null_free_array_flag); }
2038 
2039   // Code generation support
2040   static int flat_array_byte_constant() {
2041     return flag_number_to_constant(flat_array_flag);
2042   }
2043 
2044   static int null_free_array_byte_constant() {
2045     return flag_number_to_constant(null_free_array_flag);
2046   }
2047 
2048   static ByteSize array_offset() {
2049     return cell_offset(0);
2050   }
2051 
2052   static ByteSize element_offset() {
2053     return cell_offset(SingleTypeEntry::static_cell_count());
2054   }
2055 
2056   virtual void clean_weak_klass_links(bool always_clean) {
2057     _array.clean_weak_klass_links(always_clean);
2058     _element.clean_weak_klass_links(always_clean);
2059   }
2060 
2061   virtual void metaspace_pointers_do(MetaspaceClosure* it) {
2062     _array.metaspace_pointers_do(it);
2063     _element.metaspace_pointers_do(it);
2064   }
2065 
2066   static ByteSize array_load_data_size() {
2067     return cell_offset(static_cell_count());
2068   }
2069 
2070   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
2071 };
2072 
2073 class ACmpData : public BranchData {
2074 private:
2075   enum {
2076     left_inline_type_flag = DataLayout::first_flag,
2077     right_inline_type_flag
2078   };
2079 
2080   SingleTypeEntry _left;
2081   SingleTypeEntry _right;
2082 
2083 public:
2084   ACmpData(DataLayout* layout) :
2085     BranchData(layout),
2086     _left(BranchData::static_cell_count()),
2087     _right(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count()) {
2088     assert(layout->tag() == DataLayout::acmp_data_tag, "wrong type");
2089     _left.set_profile_data(this);
2090     _right.set_profile_data(this);
2091   }
2092 
2093   const SingleTypeEntry* left() const {
2094     return &_left;
2095   }
2096 
2097   const SingleTypeEntry* right() const {
2098     return &_right;
2099   }
2100 
2101   virtual bool is_ACmpData() const { return true; }
2102 
2103   static int static_cell_count() {
2104     return BranchData::static_cell_count() + SingleTypeEntry::static_cell_count() * 2;
2105   }
2106 
2107   virtual int cell_count() const {
2108     return static_cell_count();
2109   }
2110 
2111   void set_left_inline_type() { set_flag_at(left_inline_type_flag); }
2112   bool left_inline_type() const { return flag_at(left_inline_type_flag); }
2113 
2114   void set_right_inline_type() { set_flag_at(right_inline_type_flag); }
2115   bool right_inline_type() const { return flag_at(right_inline_type_flag); }
2116 
2117   // Code generation support
2118   static int left_inline_type_byte_constant() {
2119     return flag_number_to_constant(left_inline_type_flag);
2120   }
2121 
2122   static int right_inline_type_byte_constant() {
2123     return flag_number_to_constant(right_inline_type_flag);
2124   }
2125 
2126   static ByteSize left_offset() {
2127     return cell_offset(BranchData::static_cell_count());
2128   }
2129 
2130   static ByteSize right_offset() {
2131     return cell_offset(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count());
2132   }
2133 
2134   virtual void clean_weak_klass_links(bool always_clean) {
2135     _left.clean_weak_klass_links(always_clean);
2136     _right.clean_weak_klass_links(always_clean);
2137   }
2138 
2139   virtual void metaspace_pointers_do(MetaspaceClosure* it) {
2140     _left.metaspace_pointers_do(it);
2141     _right.metaspace_pointers_do(it);
2142   }
2143 
2144   static ByteSize acmp_data_size() {
2145     return cell_offset(static_cell_count());
2146   }
2147 
2148   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
2149 };
2150 
2151 // MethodData*
2152 //
2153 // A MethodData* holds information which has been collected about
2154 // a method.  Its layout looks like this:
2155 //
2156 // -----------------------------
2157 // | header                    |
2158 // | klass                     |
2159 // -----------------------------
2160 // | method                    |
2161 // | size of the MethodData* |
2162 // -----------------------------
2163 // | Data entries...           |
2164 // |   (variable size)         |
2165 // |                           |
2166 // .                           .
2167 // .                           .
2168 // .                           .
2169 // |                           |
2170 // -----------------------------
2171 //
2172 // The data entry area is a heterogeneous array of DataLayouts. Each
2173 // DataLayout in the array corresponds to a specific bytecode in the
2174 // method.  The entries in the array are sorted by the corresponding
2175 // bytecode.  Access to the data is via resource-allocated ProfileData,
2176 // which point to the underlying blocks of DataLayout structures.
2177 //
2178 // During interpretation, if profiling in enabled, the interpreter
2179 // maintains a method data pointer (mdp), which points at the entry
2180 // in the array corresponding to the current bci.  In the course of
2181 // interpretation, when a bytecode is encountered that has profile data
2182 // associated with it, the entry pointed to by mdp is updated, then the
2183 // mdp is adjusted to point to the next appropriate DataLayout.  If mdp
2184 // is null to begin with, the interpreter assumes that the current method
2185 // is not (yet) being profiled.
2186 //
2187 // In MethodData* parlance, "dp" is a "data pointer", the actual address
2188 // of a DataLayout element.  A "di" is a "data index", the offset in bytes
2189 // from the base of the data entry array.  A "displacement" is the byte offset
2190 // in certain ProfileData objects that indicate the amount the mdp must be
2191 // adjusted in the event of a change in control flow.
2192 //
2193 
2194 class CleanExtraDataClosure : public StackObj {
2195 public:
2196   virtual bool is_live(Method* m) = 0;
2197 };
2198 
2199 
2200 #if INCLUDE_JVMCI
2201 // Encapsulates an encoded speculation reason. These are linked together in
2202 // a list that is atomically appended to during deoptimization. Entries are
2203 // never removed from the list.
2204 // @see jdk.vm.ci.hotspot.HotSpotSpeculationLog.HotSpotSpeculationEncoding
2205 class FailedSpeculation: public CHeapObj<mtCompiler> {
2206  private:
2207   // The length of HotSpotSpeculationEncoding.toByteArray(). The data itself
2208   // is an array embedded at the end of this object.
2209   int   _data_len;
2210 
2211   // Next entry in a linked list.
2212   FailedSpeculation* _next;
2213 
2214   FailedSpeculation(address data, int data_len);
2215 
2216   FailedSpeculation** next_adr() { return &_next; }
2217 
2218   // Placement new operator for inlining the speculation data into
2219   // the FailedSpeculation object.
2220   void* operator new(size_t size, size_t fs_size) throw();
2221 
2222  public:
2223   char* data()         { return (char*)(((address) this) + sizeof(FailedSpeculation)); }
2224   int data_len() const { return _data_len; }
2225   FailedSpeculation* next() const { return _next; }
2226 
2227   // Atomically appends a speculation from nm to the list whose head is at (*failed_speculations_address).
2228   // Returns false if the FailedSpeculation object could not be allocated.
2229   static bool add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len);
2230 
2231   // Frees all entries in the linked list whose head is at (*failed_speculations_address).
2232   static void free_failed_speculations(FailedSpeculation** failed_speculations_address);
2233 };
2234 #endif
2235 
2236 class ciMethodData;
2237 
2238 class MethodData : public Metadata {
2239   friend class VMStructs;
2240   friend class JVMCIVMStructs;
2241   friend class ProfileData;
2242   friend class TypeEntriesAtCall;
2243   friend class ciMethodData;
2244   friend class VM_ReinitializeMDO;
2245 
2246   // If you add a new field that points to any metaspace object, you
2247   // must add this field to MethodData::metaspace_pointers_do().
2248 
2249   // Back pointer to the Method*
2250   Method* _method;
2251 
2252   // Size of this oop in bytes
2253   int _size;
2254 
2255   // Cached hint for bci_to_dp and bci_to_data
2256   int _hint_di;
2257 
2258   Mutex* volatile _extra_data_lock;
2259 
2260   MethodData(const methodHandle& method);
2261 
2262   void initialize();
2263 
2264 public:
2265   MethodData();
2266 
2267   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2268 
2269   virtual bool is_methodData() const { return true; }
2270 
2271   // Safely reinitialize the data in the MDO.  This is intended as a testing facility as the
2272   // reinitialization is performed at a safepoint so it's isn't cheap and it doesn't ensure that all
2273   // readers will see consistent profile data.
2274   void reinitialize();
2275 
2276   // Whole-method sticky bits and flags
2277   enum {
2278     _trap_hist_limit    = Deoptimization::Reason_TRAP_HISTORY_LENGTH,
2279     _trap_hist_mask     = max_jubyte,
2280     _extra_data_count   = 4     // extra DataLayout headers, for trap history
2281   }; // Public flag values
2282 
2283   // Compiler-related counters.
2284   class CompilerCounters {
2285     friend class VMStructs;
2286     friend class JVMCIVMStructs;
2287 
2288     uint _nof_decompiles;             // count of all nmethod removals
2289     uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2290     uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2291     union {
2292       intptr_t _align;
2293       // JVMCI separates trap history for OSR compilations from normal compilations
2294       u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit];
2295     } _trap_hist;
2296 
2297   public:
2298     CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0) {
2299 #ifndef ZERO
2300       // Some Zero platforms do not have expected alignment, and do not use
2301       // this code. static_assert would still fire and fail for them.
2302       static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align");
2303 #endif
2304       uint size_in_words = sizeof(_trap_hist) / HeapWordSize;
2305       Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words);
2306     }
2307 
2308     // Return (uint)-1 for overflow.
2309     uint trap_count(int reason) const {
2310       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2311       return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2312     }
2313 
2314     uint inc_trap_count(int reason) {
2315       // Count another trap, anywhere in this method.
2316       assert(reason >= 0, "must be single trap");
2317       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2318       uint cnt1 = 1 + _trap_hist._array[reason];
2319       if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
2320         _trap_hist._array[reason] = (u1)cnt1;
2321         return cnt1;
2322       } else {
2323         return _trap_hist_mask + (++_nof_overflow_traps);
2324       }
2325     }
2326 
2327     uint overflow_trap_count() const {
2328       return _nof_overflow_traps;
2329     }
2330     uint overflow_recompile_count() const {
2331       return _nof_overflow_recompiles;
2332     }
2333     uint inc_overflow_recompile_count() {
2334       return ++_nof_overflow_recompiles;
2335     }
2336     uint decompile_count() const {
2337       return _nof_decompiles;
2338     }
2339     uint inc_decompile_count() {
2340       return ++_nof_decompiles;
2341     }
2342 
2343     // Support for code generation
2344     static ByteSize trap_history_offset() {
2345       return byte_offset_of(CompilerCounters, _trap_hist._array);
2346     }
2347   };
2348 
2349 private:
2350   CompilerCounters _compiler_counters;
2351 
2352   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2353   intx              _eflags;          // flags on escape information
2354   intx              _arg_local;       // bit set of non-escaping arguments
2355   intx              _arg_stack;       // bit set of stack-allocatable arguments
2356   intx              _arg_returned;    // bit set of returned arguments
2357 
2358   // How many invocations has this MDO seen?
2359   // These counters are used to determine the exact age of MDO.
2360   // We need those because in tiered a method can be concurrently
2361   // executed at different levels.
2362   InvocationCounter _invocation_counter;
2363   // Same for backedges.
2364   InvocationCounter _backedge_counter;
2365   // Counter values at the time profiling started.
2366   int               _invocation_counter_start;
2367   int               _backedge_counter_start;
2368   uint              _tenure_traps;
2369   int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
2370   int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
2371 
2372   // Number of loops and blocks is computed when compiling the first
2373   // time with C1. It is used to determine if method is trivial.
2374   short             _num_loops;
2375   short             _num_blocks;
2376   // Does this method contain anything worth profiling?
2377   enum WouldProfile {unknown, no_profile, profile};
2378   WouldProfile      _would_profile;
2379 
2380 #if INCLUDE_JVMCI
2381   // Support for HotSpotMethodData.setCompiledIRSize(int)
2382   FailedSpeculation* _failed_speculations;
2383   int                _jvmci_ir_size;
2384 #endif
2385 
2386   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
2387   int _data_size;
2388 
2389   // data index for the area dedicated to parameters. -1 if no
2390   // parameter profiling.
2391   enum { no_parameters = -2, parameters_uninitialized = -1 };
2392   int _parameters_type_data_di;
2393 
2394   // data index of exception handler profiling data
2395   int _exception_handler_data_di;
2396 
2397   // Beginning of the data entries
2398   // See comment in ciMethodData::load_data
2399   intptr_t _data[1];
2400 
2401   // Helper for size computation
2402   static int compute_data_size(BytecodeStream* stream);
2403   static int bytecode_cell_count(Bytecodes::Code code);
2404   static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2405   enum { no_profile_data = -1, variable_cell_count = -2 };
2406 
2407   // Helper for initialization
2408   DataLayout* data_layout_at(int data_index) const {
2409     assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2410     return (DataLayout*) (((address)_data) + data_index);
2411   }
2412 
2413   static int single_exception_handler_data_cell_count() {
2414     return BitData::static_cell_count();
2415   }
2416 
2417   static int single_exception_handler_data_size() {
2418     return DataLayout::compute_size_in_bytes(single_exception_handler_data_cell_count());
2419   }
2420 
2421   DataLayout* exception_handler_data_at(int exception_handler_index) const {
2422     return data_layout_at(_exception_handler_data_di + (exception_handler_index * single_exception_handler_data_size()));
2423   }
2424 
2425   int num_exception_handler_data() const {
2426     return exception_handlers_data_size() / single_exception_handler_data_size();
2427   }
2428 
2429   // Initialize an individual data segment.  Returns the size of
2430   // the segment in bytes.
2431   int initialize_data(BytecodeStream* stream, int data_index);
2432 
2433   // Helper for data_at
2434   DataLayout* limit_data_position() const {
2435     return data_layout_at(_data_size);
2436   }
2437   bool out_of_bounds(int data_index) const {
2438     return data_index >= data_size();
2439   }
2440 
2441   // Give each of the data entries a chance to perform specific
2442   // data initialization.
2443   void post_initialize(BytecodeStream* stream);
2444 
2445   // hint accessors
2446   int      hint_di() const  { return _hint_di; }
2447   void set_hint_di(int di)  {
2448     assert(!out_of_bounds(di), "hint_di out of bounds");
2449     _hint_di = di;
2450   }
2451 
2452   DataLayout* data_layout_before(int bci) {
2453     // avoid SEGV on this edge case
2454     if (data_size() == 0)
2455       return nullptr;
2456     DataLayout* layout = data_layout_at(hint_di());
2457     if (layout->bci() <= bci)
2458       return layout;
2459     return data_layout_at(first_di());
2460   }
2461 
2462   // What is the index of the first data entry?
2463   int first_di() const { return 0; }
2464 
2465   ProfileData* bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp);
2466   // Find or create an extra ProfileData:
2467   ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2468 
2469   // return the argument info cell
2470   ArgInfoData *arg_info();
2471 
2472   enum {
2473     no_type_profile = 0,
2474     type_profile_jsr292 = 1,
2475     type_profile_all = 2
2476   };
2477 
2478   static bool profile_jsr292(const methodHandle& m, int bci);
2479   static bool profile_unsafe(const methodHandle& m, int bci);
2480   static bool profile_memory_access(const methodHandle& m, int bci);
2481   static int profile_arguments_flag();
2482   static bool profile_all_arguments();
2483   static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2484   static int profile_return_flag();
2485   static bool profile_all_return();
2486   static bool profile_return_for_invoke(const methodHandle& m, int bci);
2487   static int profile_parameters_flag();
2488   static bool profile_parameters_jsr292_only();
2489   static bool profile_all_parameters();
2490 
2491   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2492   void verify_extra_data_clean(CleanExtraDataClosure* cl);
2493 
2494   DataLayout* exception_handler_bci_to_data_helper(int bci);
2495 
2496 public:
2497   void clean_extra_data(CleanExtraDataClosure* cl);
2498 
2499   static int header_size() {
2500     return sizeof(MethodData)/wordSize;
2501   }
2502 
2503   // Compute the size of a MethodData* before it is created.
2504   static int compute_allocation_size_in_bytes(const methodHandle& method);
2505   static int compute_allocation_size_in_words(const methodHandle& method);
2506   static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2507 
2508   // Determine if a given bytecode can have profile information.
2509   static bool bytecode_has_profile(Bytecodes::Code code) {
2510     return bytecode_cell_count(code) != no_profile_data;
2511   }
2512 
2513   // reset into original state
2514   void init();
2515 
2516   // My size
2517   int size_in_bytes() const { return _size; }
2518   int size() const    { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
2519 
2520   int invocation_count() {
2521     if (invocation_counter()->carry()) {
2522       return InvocationCounter::count_limit;
2523     }
2524     return invocation_counter()->count();
2525   }
2526   int backedge_count() {
2527     if (backedge_counter()->carry()) {
2528       return InvocationCounter::count_limit;
2529     }
2530     return backedge_counter()->count();
2531   }
2532 
2533   int invocation_count_start() {
2534     if (invocation_counter()->carry()) {
2535       return 0;
2536     }
2537     return _invocation_counter_start;
2538   }
2539 
2540   int backedge_count_start() {
2541     if (backedge_counter()->carry()) {
2542       return 0;
2543     }
2544     return _backedge_counter_start;
2545   }
2546 
2547   int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2548   int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
2549 
2550   void reset_start_counters() {
2551     _invocation_counter_start = invocation_count();
2552     _backedge_counter_start = backedge_count();
2553   }
2554 
2555   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
2556   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
2557 
2558 #if INCLUDE_JVMCI
2559   FailedSpeculation** get_failed_speculations_address() {
2560     return &_failed_speculations;
2561   }
2562 #endif
2563 
2564 #if INCLUDE_CDS
2565   void remove_unshareable_info();
2566   void restore_unshareable_info(TRAPS);
2567 #endif
2568 
2569   void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
2570   bool would_profile() const                  { return _would_profile != no_profile; }
2571 
2572   int num_loops() const                       { return _num_loops;  }
2573   void set_num_loops(short n)                 { _num_loops = n;     }
2574   int num_blocks() const                      { return _num_blocks; }
2575   void set_num_blocks(short n)                { _num_blocks = n;    }
2576 
2577   bool is_mature() const;
2578 
2579   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2580   enum EscapeFlag {
2581     estimated    = 1 << 0,
2582     return_local = 1 << 1,
2583     return_allocated = 1 << 2,
2584     allocated_escapes = 1 << 3,
2585     unknown_modified = 1 << 4
2586   };
2587 
2588   intx eflags()                                  { return _eflags; }
2589   intx arg_local()                               { return _arg_local; }
2590   intx arg_stack()                               { return _arg_stack; }
2591   intx arg_returned()                            { return _arg_returned; }
2592   uint arg_modified(int a);
2593   void set_eflags(intx v)                        { _eflags = v; }
2594   void set_arg_local(intx v)                     { _arg_local = v; }
2595   void set_arg_stack(intx v)                     { _arg_stack = v; }
2596   void set_arg_returned(intx v)                  { _arg_returned = v; }
2597   void set_arg_modified(int a, uint v);
2598   void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2599 
2600   // Location and size of data area
2601   address data_base() const {
2602     return (address) _data;
2603   }
2604   int data_size() const {
2605     return _data_size;
2606   }
2607 
2608   int parameters_size_in_bytes() const {
2609     return pointer_delta_as_int((address) parameters_data_limit(), (address) parameters_data_base());
2610   }
2611 
2612   int exception_handlers_data_size() const {
2613     return pointer_delta_as_int((address) exception_handler_data_limit(), (address) exception_handler_data_base());
2614   }
2615 
2616   // Accessors
2617   Method* method() const { return _method; }
2618 
2619   // Get the data at an arbitrary (sort of) data index.
2620   ProfileData* data_at(int data_index) const;
2621 
2622   // Walk through the data in order.
2623   ProfileData* first_data() const { return data_at(first_di()); }
2624   ProfileData* next_data(ProfileData* current) const;
2625   DataLayout*  next_data_layout(DataLayout* current) const;
2626   bool is_valid(ProfileData* current) const { return current != nullptr; }
2627   bool is_valid(DataLayout*  current) const { return current != nullptr; }
2628 
2629   // Convert a dp (data pointer) to a di (data index).
2630   int dp_to_di(address dp) const {
2631     return (int)(dp - ((address)_data));
2632   }
2633 
2634   // bci to di/dp conversion.
2635   address bci_to_dp(int bci);
2636   int bci_to_di(int bci) {
2637     return dp_to_di(bci_to_dp(bci));
2638   }
2639 
2640   // Get the data at an arbitrary bci, or null if there is none.
2641   ProfileData* bci_to_data(int bci);
2642 
2643   // Same, but try to create an extra_data record if one is needed:
2644   ProfileData* allocate_bci_to_data(int bci, Method* m) {
2645     check_extra_data_locked();
2646 
2647     ProfileData* data = nullptr;
2648     // If m not null, try to allocate a SpeculativeTrapData entry
2649     if (m == nullptr) {
2650       data = bci_to_data(bci);
2651     }
2652     if (data != nullptr) {
2653       return data;
2654     }
2655     data = bci_to_extra_data(bci, m, true);
2656     if (data != nullptr) {
2657       return data;
2658     }
2659     // If SpeculativeTrapData allocation fails try to allocate a
2660     // regular entry
2661     data = bci_to_data(bci);
2662     if (data != nullptr) {
2663       return data;
2664     }
2665     return bci_to_extra_data(bci, nullptr, true);
2666   }
2667 
2668   BitData* exception_handler_bci_to_data_or_null(int bci);
2669   BitData exception_handler_bci_to_data(int bci);
2670 
2671   // Add a handful of extra data records, for trap tracking.
2672   // Only valid after 'set_size' is called at the end of MethodData::initialize
2673   DataLayout* extra_data_base() const  {
2674     check_extra_data_locked();
2675     return limit_data_position();
2676   }
2677   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2678   // pointers to sections in extra data
2679   DataLayout* args_data_limit() const  { return parameters_data_base(); }
2680   DataLayout* parameters_data_base() const {
2681     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2682     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di) : parameters_data_limit();
2683   }
2684   DataLayout* parameters_data_limit() const {
2685     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2686     return exception_handler_data_base();
2687   }
2688   DataLayout* exception_handler_data_base() const { return data_layout_at(_exception_handler_data_di); }
2689   DataLayout* exception_handler_data_limit() const { return extra_data_limit(); }
2690 
2691   int extra_data_size() const          { return (int)((address)extra_data_limit() - (address)limit_data_position()); }
2692   static DataLayout* next_extra(DataLayout* dp);
2693 
2694   // Return (uint)-1 for overflow.
2695   uint trap_count(int reason) const {
2696     return _compiler_counters.trap_count(reason);
2697   }
2698   // For loops:
2699   static uint trap_reason_limit() { return _trap_hist_limit; }
2700   static uint trap_count_limit()  { return _trap_hist_mask; }
2701   uint inc_trap_count(int reason) {
2702     return _compiler_counters.inc_trap_count(reason);
2703   }
2704 
2705   uint overflow_trap_count() const {
2706     return _compiler_counters.overflow_trap_count();
2707   }
2708   uint overflow_recompile_count() const {
2709     return _compiler_counters.overflow_recompile_count();
2710   }
2711   uint inc_overflow_recompile_count() {
2712     return _compiler_counters.inc_overflow_recompile_count();
2713   }
2714   uint decompile_count() const {
2715     return _compiler_counters.decompile_count();
2716   }
2717   uint inc_decompile_count() {
2718     uint dec_count = _compiler_counters.inc_decompile_count();
2719     if (dec_count > (uint)PerMethodRecompilationCutoff) {
2720       method()->set_not_compilable("decompile_count > PerMethodRecompilationCutoff", CompLevel_full_optimization);
2721     }
2722     return dec_count;
2723   }
2724   uint tenure_traps() const {
2725     return _tenure_traps;
2726   }
2727   void inc_tenure_traps() {
2728     _tenure_traps += 1;
2729   }
2730 
2731   // Return pointer to area dedicated to parameters in MDO
2732   ParametersTypeData* parameters_type_data() const {
2733     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2734     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : nullptr;
2735   }
2736 
2737   int parameters_type_data_di() const {
2738     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2739     return _parameters_type_data_di != no_parameters ? _parameters_type_data_di : exception_handlers_data_di();
2740   }
2741 
2742   int exception_handlers_data_di() const {
2743     return _exception_handler_data_di;
2744   }
2745 
2746   // Support for code generation
2747   static ByteSize data_offset() {
2748     return byte_offset_of(MethodData, _data[0]);
2749   }
2750 
2751   static ByteSize trap_history_offset() {
2752     return byte_offset_of(MethodData, _compiler_counters) + CompilerCounters::trap_history_offset();
2753   }
2754 
2755   static ByteSize invocation_counter_offset() {
2756     return byte_offset_of(MethodData, _invocation_counter);
2757   }
2758 
2759   static ByteSize backedge_counter_offset() {
2760     return byte_offset_of(MethodData, _backedge_counter);
2761   }
2762 
2763   static ByteSize invoke_mask_offset() {
2764     return byte_offset_of(MethodData, _invoke_mask);
2765   }
2766 
2767   static ByteSize backedge_mask_offset() {
2768     return byte_offset_of(MethodData, _backedge_mask);
2769   }
2770 
2771   static ByteSize parameters_type_data_di_offset() {
2772     return byte_offset_of(MethodData, _parameters_type_data_di);
2773   }
2774 
2775   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
2776   virtual MetaspaceObj::Type type() const { return MethodDataType; }
2777 
2778   // Deallocation support
2779   void deallocate_contents(ClassLoaderData* loader_data);
2780   void release_C_heap_structures();
2781 
2782   // GC support
2783   void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2784 
2785   // Printing
2786   void print_on      (outputStream* st) const;
2787   void print_value_on(outputStream* st) const;
2788 
2789   // printing support for method data
2790   void print_data_on(outputStream* st) const;
2791 
2792   const char* internal_name() const { return "{method data}"; }
2793 
2794   // verification
2795   void verify_on(outputStream* st);
2796   void verify_data_on(outputStream* st);
2797 
2798   static bool profile_parameters_for_method(const methodHandle& m);
2799   static bool profile_arguments();
2800   static bool profile_arguments_jsr292_only();
2801   static bool profile_return();
2802   static bool profile_parameters();
2803   static bool profile_return_jsr292_only();
2804 
2805   void clean_method_data(bool always_clean);
2806   void clean_weak_method_links();
2807   Mutex* extra_data_lock();
2808   void check_extra_data_locked() const NOT_DEBUG_RETURN;
2809 };
2810 
2811 #endif // SHARE_OOPS_METHODDATA_HPP