1 /*
   2  * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_METHODDATA_HPP
  26 #define SHARE_OOPS_METHODDATA_HPP
  27 
  28 #include "interpreter/bytecodes.hpp"
  29 #include "interpreter/invocationCounter.hpp"
  30 #include "oops/metadata.hpp"
  31 #include "oops/method.hpp"
  32 #include "oops/oop.hpp"
  33 #include "runtime/atomic.hpp"
  34 #include "runtime/deoptimization.hpp"
  35 #include "runtime/mutex.hpp"
  36 #include "utilities/align.hpp"
  37 #include "utilities/copy.hpp"
  38 
  39 class BytecodeStream;
  40 
  41 // The MethodData object collects counts and other profile information
  42 // during zeroth-tier (interpreter) and third-tier (C1 with full profiling)
  43 // execution.
  44 //
  45 // The profile is used later by compilation heuristics.  Some heuristics
  46 // enable use of aggressive (or "heroic") optimizations.  An aggressive
  47 // optimization often has a down-side, a corner case that it handles
  48 // poorly, but which is thought to be rare.  The profile provides
  49 // evidence of this rarity for a given method or even BCI.  It allows
  50 // the compiler to back out of the optimization at places where it
  51 // has historically been a poor choice.  Other heuristics try to use
  52 // specific information gathered about types observed at a given site.
  53 //
  54 // All data in the profile is approximate.  It is expected to be accurate
  55 // on the whole, but the system expects occasional inaccuraces, due to
  56 // counter overflow, multiprocessor races during data collection, space
  57 // limitations, missing MDO blocks, etc.  Bad or missing data will degrade
  58 // optimization quality but will not affect correctness.  Also, each MDO
  59 // can be checked for its "maturity" by calling is_mature().
  60 //
  61 // Short (<32-bit) counters are designed to overflow to a known "saturated"
  62 // state.  Also, certain recorded per-BCI events are given one-bit counters
  63 // which overflow to a saturated state which applied to all counters at
  64 // that BCI.  In other words, there is a small lattice which approximates
  65 // the ideal of an infinite-precision counter for each event at each BCI,
  66 // and the lattice quickly "bottoms out" in a state where all counters
  67 // are taken to be indefinitely large.
  68 //
  69 // The reader will find many data races in profile gathering code, starting
  70 // with invocation counter incrementation.  None of these races harm correct
  71 // execution of the compiled code.
  72 
  73 // forward decl
  74 class ProfileData;
  75 
  76 // DataLayout
  77 //
  78 // Overlay for generic profiling data.
  79 class DataLayout {
  80   friend class VMStructs;
  81   friend class JVMCIVMStructs;
  82 
  83 private:
  84   // Every data layout begins with a header.  This header
  85   // contains a tag, which is used to indicate the size/layout
  86   // of the data, 8 bits of flags, which can be used in any way,
  87   // 32 bits of trap history (none/one reason/many reasons),
  88   // and a bci, which is used to tie this piece of data to a
  89   // specific bci in the bytecodes.
  90   union {
  91     u8 _bits;
  92     struct {
  93       u1 _tag;
  94       u1 _flags;
  95       u2 _bci;
  96       u4 _traps;
  97     } _struct;
  98   } _header;
  99 
 100   // The data layout has an arbitrary number of cells, each sized
 101   // to accommodate a pointer or an integer.
 102   intptr_t _cells[1];
 103 
 104   // Some types of data layouts need a length field.
 105   static bool needs_array_len(u1 tag);
 106 
 107 public:
 108   enum {
 109     counter_increment = 1
 110   };
 111 
 112   enum {
 113     cell_size = sizeof(intptr_t)
 114   };
 115 
 116   // Tag values
 117   enum : u1 {
 118     no_tag,
 119     bit_data_tag,
 120     counter_data_tag,
 121     jump_data_tag,
 122     receiver_type_data_tag,
 123     virtual_call_data_tag,
 124     ret_data_tag,
 125     branch_data_tag,
 126     multi_branch_data_tag,
 127     arg_info_data_tag,
 128     call_type_data_tag,
 129     virtual_call_type_data_tag,
 130     parameters_type_data_tag,
 131     speculative_trap_data_tag,
 132     array_store_data_tag,
 133     array_load_data_tag,
 134     acmp_data_tag
 135   };
 136 
 137   enum {
 138     // The trap state breaks down as [recompile:1 | reason:31].
 139     // This further breakdown is defined in deoptimization.cpp.
 140     // See Deoptimization::trap_state_reason for an assert that
 141     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 142     //
 143     // The trap_state is collected only if ProfileTraps is true.
 144     trap_bits = 1+31,  // 31: enough to distinguish [0..Reason_RECORDED_LIMIT].
 145     trap_mask = -1,
 146     first_flag = 0
 147   };
 148 
 149   // Size computation
 150   static int header_size_in_bytes() {
 151     return header_size_in_cells() * cell_size;
 152   }
 153   static int header_size_in_cells() {
 154     return LP64_ONLY(1) NOT_LP64(2);
 155   }
 156 
 157   static int compute_size_in_bytes(int cell_count) {
 158     return header_size_in_bytes() + cell_count * cell_size;
 159   }
 160 
 161   // Initialization
 162   void initialize(u1 tag, u2 bci, int cell_count);
 163 
 164   // Accessors
 165   u1 tag() {
 166     return _header._struct._tag;
 167   }
 168 
 169   // Return 32 bits of trap state.
 170   // The state tells if traps with zero, one, or many reasons have occurred.
 171   // It also tells whether zero or many recompilations have occurred.
 172   // The associated trap histogram in the MDO itself tells whether
 173   // traps are common or not.  If a BCI shows that a trap X has
 174   // occurred, and the MDO shows N occurrences of X, we make the
 175   // simplifying assumption that all N occurrences can be blamed
 176   // on that BCI.
 177   uint trap_state() const {
 178     return _header._struct._traps;
 179   }
 180 
 181   void set_trap_state(uint new_state) {
 182     assert(ProfileTraps, "used only under +ProfileTraps");
 183     uint old_flags = _header._struct._traps;
 184     _header._struct._traps = new_state | old_flags;
 185   }
 186 
 187   u1 flags() const {
 188     return Atomic::load_acquire(&_header._struct._flags);
 189   }
 190 
 191   u2 bci() const {
 192     return _header._struct._bci;
 193   }
 194 
 195   void set_header(u8 value) {
 196     _header._bits = value;
 197   }
 198   u8 header() {
 199     return _header._bits;
 200   }
 201   void set_cell_at(int index, intptr_t value) {
 202     _cells[index] = value;
 203   }
 204   void release_set_cell_at(int index, intptr_t value);
 205   intptr_t cell_at(int index) const {
 206     return _cells[index];
 207   }
 208 
 209   bool set_flag_at(u1 flag_number) {
 210     const u1 bit = 1 << flag_number;
 211     u1 compare_value;
 212     do {
 213       compare_value = _header._struct._flags;
 214       if ((compare_value & bit) == bit) {
 215         // already set.
 216         return false;
 217       }
 218     } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, static_cast<u1>(compare_value | bit)));
 219     return true;
 220   }
 221 
 222   bool clear_flag_at(u1 flag_number) {
 223     const u1 bit = 1 << flag_number;
 224     u1 compare_value;
 225     u1 exchange_value;
 226     do {
 227       compare_value = _header._struct._flags;
 228       if ((compare_value & bit) == 0) {
 229         // already cleaed.
 230         return false;
 231       }
 232       exchange_value = compare_value & ~bit;
 233     } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, exchange_value));
 234     return true;
 235   }
 236 
 237   bool flag_at(u1 flag_number) const {
 238     return (flags() & (1 << flag_number)) != 0;
 239   }
 240 
 241   // Low-level support for code generation.
 242   static ByteSize header_offset() {
 243     return byte_offset_of(DataLayout, _header);
 244   }
 245   static ByteSize tag_offset() {
 246     return byte_offset_of(DataLayout, _header._struct._tag);
 247   }
 248   static ByteSize flags_offset() {
 249     return byte_offset_of(DataLayout, _header._struct._flags);
 250   }
 251   static ByteSize bci_offset() {
 252     return byte_offset_of(DataLayout, _header._struct._bci);
 253   }
 254   static ByteSize cell_offset(int index) {
 255     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 256   }
 257   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 258   static u1 flag_number_to_constant(u1 flag_number) {
 259     DataLayout temp; temp.set_header(0);
 260     temp.set_flag_at(flag_number);
 261     return temp._header._struct._flags;
 262   }
 263   // Return a value which, when or-ed as a word into _header, sets the flag.
 264   static u8 flag_mask_to_header_mask(u1 byte_constant) {
 265     DataLayout temp; temp.set_header(0);
 266     temp._header._struct._flags = byte_constant;
 267     return temp._header._bits;
 268   }
 269 
 270   ProfileData* data_in();
 271 
 272   int size_in_bytes() {
 273     int cells = cell_count();
 274     assert(cells >= 0, "invalid number of cells");
 275     return DataLayout::compute_size_in_bytes(cells);
 276   }
 277   int cell_count();
 278 
 279   // GC support
 280   void clean_weak_klass_links(bool always_clean);
 281 };
 282 
 283 
 284 // ProfileData class hierarchy
 285 class ProfileData;
 286 class   BitData;
 287 class     CounterData;
 288 class       ReceiverTypeData;
 289 class         VirtualCallData;
 290 class           VirtualCallTypeData;
 291 class         ArrayStoreData;
 292 class       RetData;
 293 class       CallTypeData;
 294 class   JumpData;
 295 class     BranchData;
 296 class       ACmpData;
 297 class   ArrayData;
 298 class     MultiBranchData;
 299 class     ArgInfoData;
 300 class     ParametersTypeData;
 301 class   SpeculativeTrapData;
 302 class   ArrayLoadData;
 303 
 304 // ProfileData
 305 //
 306 // A ProfileData object is created to refer to a section of profiling
 307 // data in a structured way.
 308 class ProfileData : public ResourceObj {
 309   friend class TypeEntries;
 310   friend class SingleTypeEntry;
 311   friend class TypeStackSlotEntries;
 312 private:
 313   enum {
 314     tab_width_one = 16,
 315     tab_width_two = 36
 316   };
 317 
 318   // This is a pointer to a section of profiling data.
 319   DataLayout* _data;
 320 
 321   char* print_data_on_helper(const MethodData* md) const;
 322 
 323 protected:
 324   DataLayout* data() { return _data; }
 325   const DataLayout* data() const { return _data; }
 326 
 327   enum {
 328     cell_size = DataLayout::cell_size
 329   };
 330 
 331 public:
 332   // How many cells are in this?
 333   virtual int cell_count() const {
 334     ShouldNotReachHere();
 335     return -1;
 336   }
 337 
 338   // Return the size of this data.
 339   int size_in_bytes() {
 340     return DataLayout::compute_size_in_bytes(cell_count());
 341   }
 342 
 343 protected:
 344   // Low-level accessors for underlying data
 345   void set_intptr_at(int index, intptr_t value) {
 346     assert(0 <= index && index < cell_count(), "oob");
 347     data()->set_cell_at(index, value);
 348   }
 349   void release_set_intptr_at(int index, intptr_t value);
 350   intptr_t intptr_at(int index) const {
 351     assert(0 <= index && index < cell_count(), "oob");
 352     return data()->cell_at(index);
 353   }
 354   void set_uint_at(int index, uint value) {
 355     set_intptr_at(index, (intptr_t) value);
 356   }
 357   void release_set_uint_at(int index, uint value);
 358   uint uint_at(int index) const {
 359     return (uint)intptr_at(index);
 360   }
 361   void set_int_at(int index, int value) {
 362     set_intptr_at(index, (intptr_t) value);
 363   }
 364   void release_set_int_at(int index, int value);
 365   int int_at(int index) const {
 366     return (int)intptr_at(index);
 367   }
 368   int int_at_unchecked(int index) const {
 369     return (int)data()->cell_at(index);
 370   }
 371   void set_oop_at(int index, oop value) {
 372     set_intptr_at(index, cast_from_oop<intptr_t>(value));
 373   }
 374   oop oop_at(int index) const {
 375     return cast_to_oop(intptr_at(index));
 376   }
 377 
 378   void set_flag_at(u1 flag_number) {
 379     data()->set_flag_at(flag_number);
 380   }
 381   bool flag_at(u1 flag_number) const {
 382     return data()->flag_at(flag_number);
 383   }
 384 
 385   // two convenient imports for use by subclasses:
 386   static ByteSize cell_offset(int index) {
 387     return DataLayout::cell_offset(index);
 388   }
 389   static u1 flag_number_to_constant(u1 flag_number) {
 390     return DataLayout::flag_number_to_constant(flag_number);
 391   }
 392 
 393   ProfileData(DataLayout* data) {
 394     _data = data;
 395   }
 396 
 397 public:
 398   // Constructor for invalid ProfileData.
 399   ProfileData();
 400 
 401   u2 bci() const {
 402     return data()->bci();
 403   }
 404 
 405   address dp() {
 406     return (address)_data;
 407   }
 408 
 409   int trap_state() const {
 410     return data()->trap_state();
 411   }
 412   void set_trap_state(int new_state) {
 413     data()->set_trap_state(new_state);
 414   }
 415 
 416   // Type checking
 417   virtual bool is_BitData()         const { return false; }
 418   virtual bool is_CounterData()     const { return false; }
 419   virtual bool is_JumpData()        const { return false; }
 420   virtual bool is_ReceiverTypeData()const { return false; }
 421   virtual bool is_VirtualCallData() const { return false; }
 422   virtual bool is_RetData()         const { return false; }
 423   virtual bool is_BranchData()      const { return false; }
 424   virtual bool is_ArrayData()       const { return false; }
 425   virtual bool is_MultiBranchData() const { return false; }
 426   virtual bool is_ArgInfoData()     const { return false; }
 427   virtual bool is_CallTypeData()    const { return false; }
 428   virtual bool is_VirtualCallTypeData()const { return false; }
 429   virtual bool is_ParametersTypeData() const { return false; }
 430   virtual bool is_SpeculativeTrapData()const { return false; }
 431   virtual bool is_ArrayStoreData() const { return false; }
 432   virtual bool is_ArrayLoadData() const { return false; }
 433   virtual bool is_ACmpData()           const { return false; }
 434 
 435 
 436   BitData* as_BitData() const {
 437     assert(is_BitData(), "wrong type");
 438     return is_BitData()         ? (BitData*)        this : nullptr;
 439   }
 440   CounterData* as_CounterData() const {
 441     assert(is_CounterData(), "wrong type");
 442     return is_CounterData()     ? (CounterData*)    this : nullptr;
 443   }
 444   JumpData* as_JumpData() const {
 445     assert(is_JumpData(), "wrong type");
 446     return is_JumpData()        ? (JumpData*)       this : nullptr;
 447   }
 448   ReceiverTypeData* as_ReceiverTypeData() const {
 449     assert(is_ReceiverTypeData(), "wrong type");
 450     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : nullptr;
 451   }
 452   VirtualCallData* as_VirtualCallData() const {
 453     assert(is_VirtualCallData(), "wrong type");
 454     return is_VirtualCallData() ? (VirtualCallData*)this : nullptr;
 455   }
 456   RetData* as_RetData() const {
 457     assert(is_RetData(), "wrong type");
 458     return is_RetData()         ? (RetData*)        this : nullptr;
 459   }
 460   BranchData* as_BranchData() const {
 461     assert(is_BranchData(), "wrong type");
 462     return is_BranchData()      ? (BranchData*)     this : nullptr;
 463   }
 464   ArrayData* as_ArrayData() const {
 465     assert(is_ArrayData(), "wrong type");
 466     return is_ArrayData()       ? (ArrayData*)      this : nullptr;
 467   }
 468   MultiBranchData* as_MultiBranchData() const {
 469     assert(is_MultiBranchData(), "wrong type");
 470     return is_MultiBranchData() ? (MultiBranchData*)this : nullptr;
 471   }
 472   ArgInfoData* as_ArgInfoData() const {
 473     assert(is_ArgInfoData(), "wrong type");
 474     return is_ArgInfoData() ? (ArgInfoData*)this : nullptr;
 475   }
 476   CallTypeData* as_CallTypeData() const {
 477     assert(is_CallTypeData(), "wrong type");
 478     return is_CallTypeData() ? (CallTypeData*)this : nullptr;
 479   }
 480   VirtualCallTypeData* as_VirtualCallTypeData() const {
 481     assert(is_VirtualCallTypeData(), "wrong type");
 482     return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr;
 483   }
 484   ParametersTypeData* as_ParametersTypeData() const {
 485     assert(is_ParametersTypeData(), "wrong type");
 486     return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr;
 487   }
 488   SpeculativeTrapData* as_SpeculativeTrapData() const {
 489     assert(is_SpeculativeTrapData(), "wrong type");
 490     return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr;
 491   }
 492   ArrayStoreData* as_ArrayStoreData() const {
 493     assert(is_ArrayStoreData(), "wrong type");
 494     return is_ArrayStoreData() ? (ArrayStoreData*)this : nullptr;
 495   }
 496   ArrayLoadData* as_ArrayLoadData() const {
 497     assert(is_ArrayLoadData(), "wrong type");
 498     return is_ArrayLoadData() ? (ArrayLoadData*)this : nullptr;
 499   }
 500   ACmpData* as_ACmpData() const {
 501     assert(is_ACmpData(), "wrong type");
 502     return is_ACmpData() ? (ACmpData*)this : nullptr;
 503   }
 504 
 505 
 506   // Subclass specific initialization
 507   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
 508 
 509   // GC support
 510   virtual void clean_weak_klass_links(bool always_clean) {}
 511 
 512   // CI translation: ProfileData can represent both MethodDataOop data
 513   // as well as CIMethodData data. This function is provided for translating
 514   // an oop in a ProfileData to the ci equivalent. Generally speaking,
 515   // most ProfileData don't require any translation, so we provide the null
 516   // translation here, and the required translators are in the ci subclasses.
 517   virtual void translate_from(const ProfileData* data) {}
 518 
 519   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const {
 520     ShouldNotReachHere();
 521   }
 522 
 523   void print_data_on(outputStream* st, const MethodData* md) const;
 524 
 525   void print_shared(outputStream* st, const char* name, const char* extra) const;
 526   void tab(outputStream* st, bool first = false) const;
 527 };
 528 
 529 // BitData
 530 //
 531 // A BitData holds a flag or two in its header.
 532 class BitData : public ProfileData {
 533   friend class VMStructs;
 534   friend class JVMCIVMStructs;
 535 protected:
 536   enum : u1 {
 537     // null_seen:
 538     //  saw a null operand (cast/aastore/instanceof)
 539       null_seen_flag                  = DataLayout::first_flag + 0,
 540       exception_handler_entered_flag  = null_seen_flag + 1,
 541       deprecated_method_callsite_flag = exception_handler_entered_flag + 1
 542 #if INCLUDE_JVMCI
 543     // bytecode threw any exception
 544     , exception_seen_flag             = deprecated_method_callsite_flag + 1
 545 #endif
 546     , last_bit_data_flag
 547   };
 548   enum { bit_cell_count = 0 };  // no additional data fields needed.
 549 public:
 550   BitData(DataLayout* layout) : ProfileData(layout) {
 551   }
 552 
 553   virtual bool is_BitData() const { return true; }
 554 
 555   static int static_cell_count() {
 556     return bit_cell_count;
 557   }
 558 
 559   virtual int cell_count() const {
 560     return static_cell_count();
 561   }
 562 
 563   // Accessor
 564 
 565   // The null_seen flag bit is specially known to the interpreter.
 566   // Consulting it allows the compiler to avoid setting up null_check traps.
 567   bool null_seen() const  { return flag_at(null_seen_flag); }
 568   void set_null_seen()    { set_flag_at(null_seen_flag); }
 569   bool deprecated_method_call_site() const { return flag_at(deprecated_method_callsite_flag); }
 570   bool set_deprecated_method_call_site() { return data()->set_flag_at(deprecated_method_callsite_flag); }
 571   bool clear_deprecated_method_call_site() { return data()->clear_flag_at(deprecated_method_callsite_flag); }
 572 
 573 #if INCLUDE_JVMCI
 574   // true if an exception was thrown at the specific BCI
 575   bool exception_seen() { return flag_at(exception_seen_flag); }
 576   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 577 #endif
 578 
 579   // true if a ex handler block at this bci was entered
 580   bool exception_handler_entered() { return flag_at(exception_handler_entered_flag); }
 581   void set_exception_handler_entered() { set_flag_at(exception_handler_entered_flag); }
 582 
 583   // Code generation support
 584   static u1 null_seen_byte_constant() {
 585     return flag_number_to_constant(null_seen_flag);
 586   }
 587 
 588   static ByteSize bit_data_size() {
 589     return cell_offset(bit_cell_count);
 590   }
 591 
 592   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 593 };
 594 
 595 // CounterData
 596 //
 597 // A CounterData corresponds to a simple counter.
 598 class CounterData : public BitData {
 599   friend class VMStructs;
 600   friend class JVMCIVMStructs;
 601 protected:
 602   enum {
 603     count_off,
 604     counter_cell_count
 605   };
 606 public:
 607   CounterData(DataLayout* layout) : BitData(layout) {}
 608 
 609   virtual bool is_CounterData() const { return true; }
 610 
 611   static int static_cell_count() {
 612     return counter_cell_count;
 613   }
 614 
 615   virtual int cell_count() const {
 616     return static_cell_count();
 617   }
 618 
 619   // Direct accessor
 620   int count() const {
 621     intptr_t raw_data = intptr_at(count_off);
 622     if (raw_data > max_jint) {
 623       raw_data = max_jint;
 624     } else if (raw_data < min_jint) {
 625       raw_data = min_jint;
 626     }
 627     return int(raw_data);
 628   }
 629 
 630   // Code generation support
 631   static ByteSize count_offset() {
 632     return cell_offset(count_off);
 633   }
 634   static ByteSize counter_data_size() {
 635     return cell_offset(counter_cell_count);
 636   }
 637 
 638   void set_count(int count) {
 639     set_int_at(count_off, count);
 640   }
 641 
 642   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 643 };
 644 
 645 // JumpData
 646 //
 647 // A JumpData is used to access profiling information for a direct
 648 // branch.  It is a counter, used for counting the number of branches,
 649 // plus a data displacement, used for realigning the data pointer to
 650 // the corresponding target bci.
 651 class JumpData : public ProfileData {
 652   friend class VMStructs;
 653   friend class JVMCIVMStructs;
 654 protected:
 655   enum {
 656     taken_off_set,
 657     displacement_off_set,
 658     jump_cell_count
 659   };
 660 
 661   void set_displacement(int displacement) {
 662     set_int_at(displacement_off_set, displacement);
 663   }
 664 
 665 public:
 666   JumpData(DataLayout* layout) : ProfileData(layout) {
 667     assert(layout->tag() == DataLayout::jump_data_tag ||
 668       layout->tag() == DataLayout::branch_data_tag ||
 669       layout->tag() == DataLayout::acmp_data_tag, "wrong type");
 670   }
 671 
 672   virtual bool is_JumpData() const { return true; }
 673 
 674   static int static_cell_count() {
 675     return jump_cell_count;
 676   }
 677 
 678   virtual int cell_count() const {
 679     return static_cell_count();
 680   }
 681 
 682   // Direct accessor
 683   uint taken() const {
 684     return uint_at(taken_off_set);
 685   }
 686 
 687   void set_taken(uint cnt) {
 688     set_uint_at(taken_off_set, cnt);
 689   }
 690 
 691   // Saturating counter
 692   uint inc_taken() {
 693     uint cnt = taken() + 1;
 694     // Did we wrap? Will compiler screw us??
 695     if (cnt == 0) cnt--;
 696     set_uint_at(taken_off_set, cnt);
 697     return cnt;
 698   }
 699 
 700   int displacement() const {
 701     return int_at(displacement_off_set);
 702   }
 703 
 704   // Code generation support
 705   static ByteSize taken_offset() {
 706     return cell_offset(taken_off_set);
 707   }
 708 
 709   static ByteSize displacement_offset() {
 710     return cell_offset(displacement_off_set);
 711   }
 712 
 713   // Specific initialization.
 714   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 715 
 716   void print_data_on(outputStream* st, const char* extra = nullptr) const;
 717 };
 718 
 719 // Entries in a ProfileData object to record types: it can either be
 720 // none (no profile), unknown (conflicting profile data) or a klass if
 721 // a single one is seen. Whether a null reference was seen is also
 722 // recorded. No counter is associated with the type and a single type
 723 // is tracked (unlike VirtualCallData).
 724 class TypeEntries {
 725 
 726 public:
 727 
 728   // A single cell is used to record information for a type:
 729   // - the cell is initialized to 0
 730   // - when a type is discovered it is stored in the cell
 731   // - bit zero of the cell is used to record whether a null reference
 732   // was encountered or not
 733   // - bit 1 is set to record a conflict in the type information
 734 
 735   enum {
 736     null_seen = 1,
 737     type_mask = ~null_seen,
 738     type_unknown = 2,
 739     status_bits = null_seen | type_unknown,
 740     type_klass_mask = ~status_bits
 741   };
 742 
 743   // what to initialize a cell to
 744   static intptr_t type_none() {
 745     return 0;
 746   }
 747 
 748   // null seen = bit 0 set?
 749   static bool was_null_seen(intptr_t v) {
 750     return (v & null_seen) != 0;
 751   }
 752 
 753   // conflicting type information = bit 1 set?
 754   static bool is_type_unknown(intptr_t v) {
 755     return (v & type_unknown) != 0;
 756   }
 757 
 758   // not type information yet = all bits cleared, ignoring bit 0?
 759   static bool is_type_none(intptr_t v) {
 760     return (v & type_mask) == 0;
 761   }
 762 
 763   // recorded type: cell without bit 0 and 1
 764   static intptr_t klass_part(intptr_t v) {
 765     intptr_t r = v & type_klass_mask;
 766     return r;
 767   }
 768 
 769   // type recorded
 770   static Klass* valid_klass(intptr_t k) {
 771     if (!is_type_none(k) &&
 772         !is_type_unknown(k)) {
 773       Klass* res = (Klass*)klass_part(k);
 774       assert(res != nullptr, "invalid");
 775       return res;
 776     } else {
 777       return nullptr;
 778     }
 779   }
 780 
 781   static intptr_t with_status(intptr_t k, intptr_t in) {
 782     return k | (in & status_bits);
 783   }
 784 
 785   static intptr_t with_status(Klass* k, intptr_t in) {
 786     return with_status((intptr_t)k, in);
 787   }
 788 
 789   static void print_klass(outputStream* st, intptr_t k);
 790 
 791 protected:
 792   // ProfileData object these entries are part of
 793   ProfileData* _pd;
 794   // offset within the ProfileData object where the entries start
 795   const int _base_off;
 796 
 797   TypeEntries(int base_off)
 798     : _pd(nullptr), _base_off(base_off) {}
 799 
 800   void set_intptr_at(int index, intptr_t value) {
 801     _pd->set_intptr_at(index, value);
 802   }
 803 
 804   intptr_t intptr_at(int index) const {
 805     return _pd->intptr_at(index);
 806   }
 807 
 808 public:
 809   void set_profile_data(ProfileData* pd) {
 810     _pd = pd;
 811   }
 812 };
 813 
 814 // Type entries used for arguments passed at a call and parameters on
 815 // method entry. 2 cells per entry: one for the type encoded as in
 816 // TypeEntries and one initialized with the stack slot where the
 817 // profiled object is to be found so that the interpreter can locate
 818 // it quickly.
 819 class TypeStackSlotEntries : public TypeEntries {
 820 
 821 private:
 822   enum {
 823     stack_slot_entry,
 824     type_entry,
 825     per_arg_cell_count
 826   };
 827 
 828   // offset of cell for stack slot for entry i within ProfileData object
 829   int stack_slot_offset(int i) const {
 830     return _base_off + stack_slot_local_offset(i);
 831   }
 832 
 833   const int _number_of_entries;
 834 
 835   // offset of cell for type for entry i within ProfileData object
 836   int type_offset_in_cells(int i) const {
 837     return _base_off + type_local_offset(i);
 838   }
 839 
 840 public:
 841 
 842   TypeStackSlotEntries(int base_off, int nb_entries)
 843     : TypeEntries(base_off), _number_of_entries(nb_entries) {}
 844 
 845   static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
 846 
 847   void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
 848 
 849   int number_of_entries() const { return _number_of_entries; }
 850 
 851   // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
 852   static int stack_slot_local_offset(int i) {
 853     return i * per_arg_cell_count + stack_slot_entry;
 854   }
 855 
 856   // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
 857   static int type_local_offset(int i) {
 858     return i * per_arg_cell_count + type_entry;
 859   }
 860 
 861   // stack slot for entry i
 862   uint stack_slot(int i) const {
 863     assert(i >= 0 && i < _number_of_entries, "oob");
 864     return _pd->uint_at(stack_slot_offset(i));
 865   }
 866 
 867   // set stack slot for entry i
 868   void set_stack_slot(int i, uint num) {
 869     assert(i >= 0 && i < _number_of_entries, "oob");
 870     _pd->set_uint_at(stack_slot_offset(i), num);
 871   }
 872 
 873   // type for entry i
 874   intptr_t type(int i) const {
 875     assert(i >= 0 && i < _number_of_entries, "oob");
 876     return _pd->intptr_at(type_offset_in_cells(i));
 877   }
 878 
 879   // set type for entry i
 880   void set_type(int i, intptr_t k) {
 881     assert(i >= 0 && i < _number_of_entries, "oob");
 882     _pd->set_intptr_at(type_offset_in_cells(i), k);
 883   }
 884 
 885   static ByteSize per_arg_size() {
 886     return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
 887   }
 888 
 889   static int per_arg_count() {
 890     return per_arg_cell_count;
 891   }
 892 
 893   ByteSize type_offset(int i) const {
 894     return DataLayout::cell_offset(type_offset_in_cells(i));
 895   }
 896 
 897   // GC support
 898   void clean_weak_klass_links(bool always_clean);
 899 
 900   void print_data_on(outputStream* st) const;
 901 };
 902 
 903 // Type entry used for return from a call. A single cell to record the
 904 // type.
 905 class SingleTypeEntry : public TypeEntries {
 906 
 907 private:
 908   enum {
 909     cell_count = 1
 910   };
 911 
 912 public:
 913   SingleTypeEntry(int base_off)
 914     : TypeEntries(base_off) {}
 915 
 916   void post_initialize() {
 917     set_type(type_none());
 918   }
 919 
 920   intptr_t type() const {
 921     return _pd->intptr_at(_base_off);
 922   }
 923 
 924   void set_type(intptr_t k) {
 925     _pd->set_intptr_at(_base_off, k);
 926   }
 927 
 928   static int static_cell_count() {
 929     return cell_count;
 930   }
 931 
 932   static ByteSize size() {
 933     return in_ByteSize(cell_count * DataLayout::cell_size);
 934   }
 935 
 936   ByteSize type_offset() {
 937     return DataLayout::cell_offset(_base_off);
 938   }
 939 
 940   // GC support
 941   void clean_weak_klass_links(bool always_clean);
 942 
 943   void print_data_on(outputStream* st) const;
 944 };
 945 
 946 // Entries to collect type information at a call: contains arguments
 947 // (TypeStackSlotEntries), a return type (SingleTypeEntry) and a
 948 // number of cells. Because the number of cells for the return type is
 949 // smaller than the number of cells for the type of an arguments, the
 950 // number of cells is used to tell how many arguments are profiled and
 951 // whether a return value is profiled. See has_arguments() and
 952 // has_return().
 953 class TypeEntriesAtCall {
 954 private:
 955   static int stack_slot_local_offset(int i) {
 956     return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
 957   }
 958 
 959   static int argument_type_local_offset(int i) {
 960     return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
 961   }
 962 
 963 public:
 964 
 965   static int header_cell_count() {
 966     return 1;
 967   }
 968 
 969   static int cell_count_local_offset() {
 970     return 0;
 971   }
 972 
 973   static int compute_cell_count(BytecodeStream* stream);
 974 
 975   static void initialize(DataLayout* dl, int base, int cell_count) {
 976     int off = base + cell_count_local_offset();
 977     dl->set_cell_at(off, cell_count - base - header_cell_count());
 978   }
 979 
 980   static bool arguments_profiling_enabled();
 981   static bool return_profiling_enabled();
 982 
 983   // Code generation support
 984   static ByteSize cell_count_offset() {
 985     return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
 986   }
 987 
 988   static ByteSize args_data_offset() {
 989     return in_ByteSize(header_cell_count() * DataLayout::cell_size);
 990   }
 991 
 992   static ByteSize stack_slot_offset(int i) {
 993     return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
 994   }
 995 
 996   static ByteSize argument_type_offset(int i) {
 997     return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
 998   }
 999 
1000   static ByteSize return_only_size() {
1001     return SingleTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
1002   }
1003 
1004 };
1005 
1006 // CallTypeData
1007 //
1008 // A CallTypeData is used to access profiling information about a non
1009 // virtual call for which we collect type information about arguments
1010 // and return value.
1011 class CallTypeData : public CounterData {
1012 private:
1013   // entries for arguments if any
1014   TypeStackSlotEntries _args;
1015   // entry for return type if any
1016   SingleTypeEntry _ret;
1017 
1018   int cell_count_global_offset() const {
1019     return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1020   }
1021 
1022   // number of cells not counting the header
1023   int cell_count_no_header() const {
1024     return uint_at(cell_count_global_offset());
1025   }
1026 
1027   void check_number_of_arguments(int total) {
1028     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1029   }
1030 
1031 public:
1032   CallTypeData(DataLayout* layout) :
1033     CounterData(layout),
1034     _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1035     _ret(cell_count() - SingleTypeEntry::static_cell_count())
1036   {
1037     assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
1038     // Some compilers (VC++) don't want this passed in member initialization list
1039     _args.set_profile_data(this);
1040     _ret.set_profile_data(this);
1041   }
1042 
1043   const TypeStackSlotEntries* args() const {
1044     assert(has_arguments(), "no profiling of arguments");
1045     return &_args;
1046   }
1047 
1048   const SingleTypeEntry* ret() const {
1049     assert(has_return(), "no profiling of return value");
1050     return &_ret;
1051   }
1052 
1053   virtual bool is_CallTypeData() const { return true; }
1054 
1055   static int static_cell_count() {
1056     return -1;
1057   }
1058 
1059   static int compute_cell_count(BytecodeStream* stream) {
1060     return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1061   }
1062 
1063   static void initialize(DataLayout* dl, int cell_count) {
1064     TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1065   }
1066 
1067   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1068 
1069   virtual int cell_count() const {
1070     return CounterData::static_cell_count() +
1071       TypeEntriesAtCall::header_cell_count() +
1072       int_at_unchecked(cell_count_global_offset());
1073   }
1074 
1075   int number_of_arguments() const {
1076     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1077   }
1078 
1079   void set_argument_type(int i, Klass* k) {
1080     assert(has_arguments(), "no arguments!");
1081     intptr_t current = _args.type(i);
1082     _args.set_type(i, TypeEntries::with_status(k, current));
1083   }
1084 
1085   void set_return_type(Klass* k) {
1086     assert(has_return(), "no return!");
1087     intptr_t current = _ret.type();
1088     _ret.set_type(TypeEntries::with_status(k, current));
1089   }
1090 
1091   // An entry for a return value takes less space than an entry for an
1092   // argument so if the number of cells exceeds the number of cells
1093   // needed for an argument, this object contains type information for
1094   // at least one argument.
1095   bool has_arguments() const {
1096     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1097     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1098     return res;
1099   }
1100 
1101   // An entry for a return value takes less space than an entry for an
1102   // argument, so if the remainder of the number of cells divided by
1103   // the number of cells for an argument is not null, a return value
1104   // is profiled in this object.
1105   bool has_return() const {
1106     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1107     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1108     return res;
1109   }
1110 
1111   // Code generation support
1112   static ByteSize args_data_offset() {
1113     return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1114   }
1115 
1116   ByteSize argument_type_offset(int i) {
1117     return _args.type_offset(i);
1118   }
1119 
1120   ByteSize return_type_offset() {
1121     return _ret.type_offset();
1122   }
1123 
1124   // GC support
1125   virtual void clean_weak_klass_links(bool always_clean) {
1126     if (has_arguments()) {
1127       _args.clean_weak_klass_links(always_clean);
1128     }
1129     if (has_return()) {
1130       _ret.clean_weak_klass_links(always_clean);
1131     }
1132   }
1133 
1134   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1135 };
1136 
1137 // ReceiverTypeData
1138 //
1139 // A ReceiverTypeData is used to access profiling information about a
1140 // dynamic type check.  It consists of a series of (Klass*, count)
1141 // pairs which are used to store a type profile for the receiver of
1142 // the check, the associated count is incremented every time the type
1143 // is seen. A per ReceiverTypeData counter is incremented on type
1144 // overflow (when there's no more room for a not yet profiled Klass*).
1145 //
1146 class ReceiverTypeData : public CounterData {
1147   friend class VMStructs;
1148   friend class JVMCIVMStructs;
1149 protected:
1150   enum {
1151     receiver0_offset = counter_cell_count,
1152     count0_offset,
1153     receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1154   };
1155 
1156 public:
1157   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1158     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1159            layout->tag() == DataLayout::virtual_call_data_tag ||
1160            layout->tag() == DataLayout::virtual_call_type_data_tag ||
1161            layout->tag() == DataLayout::array_store_data_tag, "wrong type");
1162   }
1163 
1164   virtual bool is_ReceiverTypeData() const { return true; }
1165 
1166   static int static_cell_count() {
1167     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
1168   }
1169 
1170   virtual int cell_count() const {
1171     return static_cell_count();
1172   }
1173 
1174   // Direct accessors
1175   static uint row_limit() {
1176     return (uint) TypeProfileWidth;
1177   }
1178   static int receiver_cell_index(uint row) {
1179     return receiver0_offset + row * receiver_type_row_cell_count;
1180   }
1181   static int receiver_count_cell_index(uint row) {
1182     return count0_offset + row * receiver_type_row_cell_count;
1183   }
1184 
1185   Klass* receiver(uint row) const {
1186     assert(row < row_limit(), "oob");
1187 
1188     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1189     assert(recv == nullptr || recv->is_klass(), "wrong type");
1190     return recv;
1191   }
1192 
1193   void set_receiver(uint row, Klass* k) {
1194     assert((uint)row < row_limit(), "oob");
1195     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1196   }
1197 
1198   uint receiver_count(uint row) const {
1199     assert(row < row_limit(), "oob");
1200     return uint_at(receiver_count_cell_index(row));
1201   }
1202 
1203   void set_receiver_count(uint row, uint count) {
1204     assert(row < row_limit(), "oob");
1205     set_uint_at(receiver_count_cell_index(row), count);
1206   }
1207 
1208   void clear_row(uint row) {
1209     assert(row < row_limit(), "oob");
1210     // Clear total count - indicator of polymorphic call site.
1211     // The site may look like as monomorphic after that but
1212     // it allow to have more accurate profiling information because
1213     // there was execution phase change since klasses were unloaded.
1214     // If the site is still polymorphic then MDO will be updated
1215     // to reflect it. But it could be the case that the site becomes
1216     // only bimorphic. Then keeping total count not 0 will be wrong.
1217     // Even if we use monomorphic (when it is not) for compilation
1218     // we will only have trap, deoptimization and recompile again
1219     // with updated MDO after executing method in Interpreter.
1220     // An additional receiver will be recorded in the cleaned row
1221     // during next call execution.
1222     //
1223     // Note: our profiling logic works with empty rows in any slot.
1224     // We do sorting a profiling info (ciCallProfile) for compilation.
1225     //
1226     set_count(0);
1227     set_receiver(row, nullptr);
1228     set_receiver_count(row, 0);
1229   }
1230 
1231   // Code generation support
1232   static ByteSize receiver_offset(uint row) {
1233     return cell_offset(receiver_cell_index(row));
1234   }
1235   static ByteSize receiver_count_offset(uint row) {
1236     return cell_offset(receiver_count_cell_index(row));
1237   }
1238   static ByteSize receiver_type_data_size() {
1239     return cell_offset(static_cell_count());
1240   }
1241 
1242   // GC support
1243   virtual void clean_weak_klass_links(bool always_clean);
1244 
1245   void print_receiver_data_on(outputStream* st) const;
1246   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1247 };
1248 
1249 // VirtualCallData
1250 //
1251 // A VirtualCallData is used to access profiling information about a
1252 // virtual call.  For now, it has nothing more than a ReceiverTypeData.
1253 class VirtualCallData : public ReceiverTypeData {
1254 public:
1255   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1256     assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1257            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1258   }
1259 
1260   virtual bool is_VirtualCallData() const { return true; }
1261 
1262   static int static_cell_count() {
1263     // At this point we could add more profile state, e.g., for arguments.
1264     // But for now it's the same size as the base record type.
1265     return ReceiverTypeData::static_cell_count();
1266   }
1267 
1268   virtual int cell_count() const {
1269     return static_cell_count();
1270   }
1271 
1272   // Direct accessors
1273   static ByteSize virtual_call_data_size() {
1274     return cell_offset(static_cell_count());
1275   }
1276 
1277   void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1278   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1279 };
1280 
1281 // VirtualCallTypeData
1282 //
1283 // A VirtualCallTypeData is used to access profiling information about
1284 // a virtual call for which we collect type information about
1285 // arguments and return value.
1286 class VirtualCallTypeData : public VirtualCallData {
1287 private:
1288   // entries for arguments if any
1289   TypeStackSlotEntries _args;
1290   // entry for return type if any
1291   SingleTypeEntry _ret;
1292 
1293   int cell_count_global_offset() const {
1294     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1295   }
1296 
1297   // number of cells not counting the header
1298   int cell_count_no_header() const {
1299     return uint_at(cell_count_global_offset());
1300   }
1301 
1302   void check_number_of_arguments(int total) {
1303     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1304   }
1305 
1306 public:
1307   VirtualCallTypeData(DataLayout* layout) :
1308     VirtualCallData(layout),
1309     _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1310     _ret(cell_count() - SingleTypeEntry::static_cell_count())
1311   {
1312     assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1313     // Some compilers (VC++) don't want this passed in member initialization list
1314     _args.set_profile_data(this);
1315     _ret.set_profile_data(this);
1316   }
1317 
1318   const TypeStackSlotEntries* args() const {
1319     assert(has_arguments(), "no profiling of arguments");
1320     return &_args;
1321   }
1322 
1323   const SingleTypeEntry* ret() const {
1324     assert(has_return(), "no profiling of return value");
1325     return &_ret;
1326   }
1327 
1328   virtual bool is_VirtualCallTypeData() const { return true; }
1329 
1330   static int static_cell_count() {
1331     return -1;
1332   }
1333 
1334   static int compute_cell_count(BytecodeStream* stream) {
1335     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1336   }
1337 
1338   static void initialize(DataLayout* dl, int cell_count) {
1339     TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1340   }
1341 
1342   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1343 
1344   virtual int cell_count() const {
1345     return VirtualCallData::static_cell_count() +
1346       TypeEntriesAtCall::header_cell_count() +
1347       int_at_unchecked(cell_count_global_offset());
1348   }
1349 
1350   int number_of_arguments() const {
1351     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1352   }
1353 
1354   void set_argument_type(int i, Klass* k) {
1355     assert(has_arguments(), "no arguments!");
1356     intptr_t current = _args.type(i);
1357     _args.set_type(i, TypeEntries::with_status(k, current));
1358   }
1359 
1360   void set_return_type(Klass* k) {
1361     assert(has_return(), "no return!");
1362     intptr_t current = _ret.type();
1363     _ret.set_type(TypeEntries::with_status(k, current));
1364   }
1365 
1366   // An entry for a return value takes less space than an entry for an
1367   // argument, so if the remainder of the number of cells divided by
1368   // the number of cells for an argument is not null, a return value
1369   // is profiled in this object.
1370   bool has_return() const {
1371     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1372     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1373     return res;
1374   }
1375 
1376   // An entry for a return value takes less space than an entry for an
1377   // argument so if the number of cells exceeds the number of cells
1378   // needed for an argument, this object contains type information for
1379   // at least one argument.
1380   bool has_arguments() const {
1381     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1382     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1383     return res;
1384   }
1385 
1386   // Code generation support
1387   static ByteSize args_data_offset() {
1388     return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1389   }
1390 
1391   ByteSize argument_type_offset(int i) {
1392     return _args.type_offset(i);
1393   }
1394 
1395   ByteSize return_type_offset() {
1396     return _ret.type_offset();
1397   }
1398 
1399   // GC support
1400   virtual void clean_weak_klass_links(bool always_clean) {
1401     ReceiverTypeData::clean_weak_klass_links(always_clean);
1402     if (has_arguments()) {
1403       _args.clean_weak_klass_links(always_clean);
1404     }
1405     if (has_return()) {
1406       _ret.clean_weak_klass_links(always_clean);
1407     }
1408   }
1409 
1410   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1411 };
1412 
1413 // RetData
1414 //
1415 // A RetData is used to access profiling information for a ret bytecode.
1416 // It is composed of a count of the number of times that the ret has
1417 // been executed, followed by a series of triples of the form
1418 // (bci, count, di) which count the number of times that some bci was the
1419 // target of the ret and cache a corresponding data displacement.
1420 class RetData : public CounterData {
1421 protected:
1422   enum {
1423     bci0_offset = counter_cell_count,
1424     count0_offset,
1425     displacement0_offset,
1426     ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1427   };
1428 
1429   void set_bci(uint row, int bci) {
1430     assert((uint)row < row_limit(), "oob");
1431     set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1432   }
1433   void release_set_bci(uint row, int bci);
1434   void set_bci_count(uint row, uint count) {
1435     assert((uint)row < row_limit(), "oob");
1436     set_uint_at(count0_offset + row * ret_row_cell_count, count);
1437   }
1438   void set_bci_displacement(uint row, int disp) {
1439     set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1440   }
1441 
1442 public:
1443   RetData(DataLayout* layout) : CounterData(layout) {
1444     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1445   }
1446 
1447   virtual bool is_RetData() const { return true; }
1448 
1449   enum {
1450     no_bci = -1 // value of bci when bci1/2 are not in use.
1451   };
1452 
1453   static int static_cell_count() {
1454     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1455   }
1456 
1457   virtual int cell_count() const {
1458     return static_cell_count();
1459   }
1460 
1461   static uint row_limit() {
1462     return (uint) BciProfileWidth;
1463   }
1464   static int bci_cell_index(uint row) {
1465     return bci0_offset + row * ret_row_cell_count;
1466   }
1467   static int bci_count_cell_index(uint row) {
1468     return count0_offset + row * ret_row_cell_count;
1469   }
1470   static int bci_displacement_cell_index(uint row) {
1471     return displacement0_offset + row * ret_row_cell_count;
1472   }
1473 
1474   // Direct accessors
1475   int bci(uint row) const {
1476     return int_at(bci_cell_index(row));
1477   }
1478   uint bci_count(uint row) const {
1479     return uint_at(bci_count_cell_index(row));
1480   }
1481   int bci_displacement(uint row) const {
1482     return int_at(bci_displacement_cell_index(row));
1483   }
1484 
1485   // Interpreter Runtime support
1486   address fixup_ret(int return_bci, MethodData* mdo);
1487 
1488   // Code generation support
1489   static ByteSize bci_offset(uint row) {
1490     return cell_offset(bci_cell_index(row));
1491   }
1492   static ByteSize bci_count_offset(uint row) {
1493     return cell_offset(bci_count_cell_index(row));
1494   }
1495   static ByteSize bci_displacement_offset(uint row) {
1496     return cell_offset(bci_displacement_cell_index(row));
1497   }
1498 
1499   // Specific initialization.
1500   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1501 
1502   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1503 };
1504 
1505 // BranchData
1506 //
1507 // A BranchData is used to access profiling data for a two-way branch.
1508 // It consists of taken and not_taken counts as well as a data displacement
1509 // for the taken case.
1510 class BranchData : public JumpData {
1511   friend class VMStructs;
1512   friend class JVMCIVMStructs;
1513 protected:
1514   enum {
1515     not_taken_off_set = jump_cell_count,
1516     branch_cell_count
1517   };
1518 
1519   void set_displacement(int displacement) {
1520     set_int_at(displacement_off_set, displacement);
1521   }
1522 
1523 public:
1524   BranchData(DataLayout* layout) : JumpData(layout) {
1525     assert(layout->tag() == DataLayout::branch_data_tag || layout->tag() == DataLayout::acmp_data_tag, "wrong type");
1526   }
1527 
1528   virtual bool is_BranchData() const { return true; }
1529 
1530   static int static_cell_count() {
1531     return branch_cell_count;
1532   }
1533 
1534   virtual int cell_count() const {
1535     return static_cell_count();
1536   }
1537 
1538   // Direct accessor
1539   uint not_taken() const {
1540     return uint_at(not_taken_off_set);
1541   }
1542 
1543   void set_not_taken(uint cnt) {
1544     set_uint_at(not_taken_off_set, cnt);
1545   }
1546 
1547   uint inc_not_taken() {
1548     uint cnt = not_taken() + 1;
1549     // Did we wrap? Will compiler screw us??
1550     if (cnt == 0) cnt--;
1551     set_uint_at(not_taken_off_set, cnt);
1552     return cnt;
1553   }
1554 
1555   // Code generation support
1556   static ByteSize not_taken_offset() {
1557     return cell_offset(not_taken_off_set);
1558   }
1559   static ByteSize branch_data_size() {
1560     return cell_offset(branch_cell_count);
1561   }
1562 
1563   // Specific initialization.
1564   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1565 
1566   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1567 };
1568 
1569 // ArrayData
1570 //
1571 // A ArrayData is a base class for accessing profiling data which does
1572 // not have a statically known size.  It consists of an array length
1573 // and an array start.
1574 class ArrayData : public ProfileData {
1575   friend class VMStructs;
1576   friend class JVMCIVMStructs;
1577 protected:
1578   friend class DataLayout;
1579 
1580   enum {
1581     array_len_off_set,
1582     array_start_off_set
1583   };
1584 
1585   uint array_uint_at(int index) const {
1586     int aindex = index + array_start_off_set;
1587     return uint_at(aindex);
1588   }
1589   int array_int_at(int index) const {
1590     int aindex = index + array_start_off_set;
1591     return int_at(aindex);
1592   }
1593   oop array_oop_at(int index) const {
1594     int aindex = index + array_start_off_set;
1595     return oop_at(aindex);
1596   }
1597   void array_set_int_at(int index, int value) {
1598     int aindex = index + array_start_off_set;
1599     set_int_at(aindex, value);
1600   }
1601 
1602   // Code generation support for subclasses.
1603   static ByteSize array_element_offset(int index) {
1604     return cell_offset(array_start_off_set + index);
1605   }
1606 
1607 public:
1608   ArrayData(DataLayout* layout) : ProfileData(layout) {}
1609 
1610   virtual bool is_ArrayData() const { return true; }
1611 
1612   static int static_cell_count() {
1613     return -1;
1614   }
1615 
1616   int array_len() const {
1617     return int_at_unchecked(array_len_off_set);
1618   }
1619 
1620   virtual int cell_count() const {
1621     return array_len() + 1;
1622   }
1623 
1624   // Code generation support
1625   static ByteSize array_len_offset() {
1626     return cell_offset(array_len_off_set);
1627   }
1628   static ByteSize array_start_offset() {
1629     return cell_offset(array_start_off_set);
1630   }
1631 };
1632 
1633 // MultiBranchData
1634 //
1635 // A MultiBranchData is used to access profiling information for
1636 // a multi-way branch (*switch bytecodes).  It consists of a series
1637 // of (count, displacement) pairs, which count the number of times each
1638 // case was taken and specify the data displacement for each branch target.
1639 class MultiBranchData : public ArrayData {
1640   friend class VMStructs;
1641   friend class JVMCIVMStructs;
1642 protected:
1643   enum {
1644     default_count_off_set,
1645     default_disaplacement_off_set,
1646     case_array_start
1647   };
1648   enum {
1649     relative_count_off_set,
1650     relative_displacement_off_set,
1651     per_case_cell_count
1652   };
1653 
1654   void set_default_displacement(int displacement) {
1655     array_set_int_at(default_disaplacement_off_set, displacement);
1656   }
1657   void set_displacement_at(int index, int displacement) {
1658     array_set_int_at(case_array_start +
1659                      index * per_case_cell_count +
1660                      relative_displacement_off_set,
1661                      displacement);
1662   }
1663 
1664 public:
1665   MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1666     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1667   }
1668 
1669   virtual bool is_MultiBranchData() const { return true; }
1670 
1671   static int compute_cell_count(BytecodeStream* stream);
1672 
1673   int number_of_cases() const {
1674     int alen = array_len() - 2; // get rid of default case here.
1675     assert(alen % per_case_cell_count == 0, "must be even");
1676     return (alen / per_case_cell_count);
1677   }
1678 
1679   uint default_count() const {
1680     return array_uint_at(default_count_off_set);
1681   }
1682   int default_displacement() const {
1683     return array_int_at(default_disaplacement_off_set);
1684   }
1685 
1686   uint count_at(int index) const {
1687     return array_uint_at(case_array_start +
1688                          index * per_case_cell_count +
1689                          relative_count_off_set);
1690   }
1691   int displacement_at(int index) const {
1692     return array_int_at(case_array_start +
1693                         index * per_case_cell_count +
1694                         relative_displacement_off_set);
1695   }
1696 
1697   // Code generation support
1698   static ByteSize default_count_offset() {
1699     return array_element_offset(default_count_off_set);
1700   }
1701   static ByteSize default_displacement_offset() {
1702     return array_element_offset(default_disaplacement_off_set);
1703   }
1704   static ByteSize case_count_offset(int index) {
1705     return case_array_offset() +
1706            (per_case_size() * index) +
1707            relative_count_offset();
1708   }
1709   static ByteSize case_array_offset() {
1710     return array_element_offset(case_array_start);
1711   }
1712   static ByteSize per_case_size() {
1713     return in_ByteSize(per_case_cell_count) * cell_size;
1714   }
1715   static ByteSize relative_count_offset() {
1716     return in_ByteSize(relative_count_off_set) * cell_size;
1717   }
1718   static ByteSize relative_displacement_offset() {
1719     return in_ByteSize(relative_displacement_off_set) * cell_size;
1720   }
1721 
1722   // Specific initialization.
1723   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1724 
1725   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1726 };
1727 
1728 class ArgInfoData : public ArrayData {
1729 
1730 public:
1731   ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1732     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1733   }
1734 
1735   virtual bool is_ArgInfoData() const { return true; }
1736 
1737 
1738   int number_of_args() const {
1739     return array_len();
1740   }
1741 
1742   uint arg_modified(int arg) const {
1743     return array_uint_at(arg);
1744   }
1745 
1746   void set_arg_modified(int arg, uint val) {
1747     array_set_int_at(arg, val);
1748   }
1749 
1750   void print_data_on(outputStream* st, const char* extra = nullptr) const;
1751 };
1752 
1753 // ParametersTypeData
1754 //
1755 // A ParametersTypeData is used to access profiling information about
1756 // types of parameters to a method
1757 class ParametersTypeData : public ArrayData {
1758 
1759 private:
1760   TypeStackSlotEntries _parameters;
1761 
1762   static int stack_slot_local_offset(int i) {
1763     assert_profiling_enabled();
1764     return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1765   }
1766 
1767   static int type_local_offset(int i) {
1768     assert_profiling_enabled();
1769     return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1770   }
1771 
1772   static bool profiling_enabled();
1773   static void assert_profiling_enabled() {
1774     assert(profiling_enabled(), "method parameters profiling should be on");
1775   }
1776 
1777 public:
1778   ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
1779     assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
1780     // Some compilers (VC++) don't want this passed in member initialization list
1781     _parameters.set_profile_data(this);
1782   }
1783 
1784   static int compute_cell_count(Method* m);
1785 
1786   virtual bool is_ParametersTypeData() const { return true; }
1787 
1788   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1789 
1790   int number_of_parameters() const {
1791     return array_len() / TypeStackSlotEntries::per_arg_count();
1792   }
1793 
1794   const TypeStackSlotEntries* parameters() const { return &_parameters; }
1795 
1796   uint stack_slot(int i) const {
1797     return _parameters.stack_slot(i);
1798   }
1799 
1800   void set_type(int i, Klass* k) {
1801     intptr_t current = _parameters.type(i);
1802     _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1803   }
1804 
1805   virtual void clean_weak_klass_links(bool always_clean) {
1806     _parameters.clean_weak_klass_links(always_clean);
1807   }
1808 
1809   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1810 
1811   static ByteSize stack_slot_offset(int i) {
1812     return cell_offset(stack_slot_local_offset(i));
1813   }
1814 
1815   static ByteSize type_offset(int i) {
1816     return cell_offset(type_local_offset(i));
1817   }
1818 };
1819 
1820 // SpeculativeTrapData
1821 //
1822 // A SpeculativeTrapData is used to record traps due to type
1823 // speculation. It records the root of the compilation: that type
1824 // speculation is wrong in the context of one compilation (for
1825 // method1) doesn't mean it's wrong in the context of another one (for
1826 // method2). Type speculation could have more/different data in the
1827 // context of the compilation of method2 and it's worthwhile to try an
1828 // optimization that failed for compilation of method1 in the context
1829 // of compilation of method2.
1830 // Space for SpeculativeTrapData entries is allocated from the extra
1831 // data space in the MDO. If we run out of space, the trap data for
1832 // the ProfileData at that bci is updated.
1833 class SpeculativeTrapData : public ProfileData {
1834 protected:
1835   enum {
1836     speculative_trap_method,
1837 #ifndef _LP64
1838     // The size of the area for traps is a multiple of the header
1839     // size, 2 cells on 32 bits. Packed at the end of this area are
1840     // argument info entries (with tag
1841     // DataLayout::arg_info_data_tag). The logic in
1842     // MethodData::bci_to_extra_data() that guarantees traps don't
1843     // overflow over argument info entries assumes the size of a
1844     // SpeculativeTrapData is twice the header size. On 32 bits, a
1845     // SpeculativeTrapData must be 4 cells.
1846     padding,
1847 #endif
1848     speculative_trap_cell_count
1849   };
1850 public:
1851   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
1852     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
1853   }
1854 
1855   virtual bool is_SpeculativeTrapData() const { return true; }
1856 
1857   static int static_cell_count() {
1858     return speculative_trap_cell_count;
1859   }
1860 
1861   virtual int cell_count() const {
1862     return static_cell_count();
1863   }
1864 
1865   // Direct accessor
1866   Method* method() const {
1867     return (Method*)intptr_at(speculative_trap_method);
1868   }
1869 
1870   void set_method(Method* m) {
1871     assert(!m->is_old(), "cannot add old methods");
1872     set_intptr_at(speculative_trap_method, (intptr_t)m);
1873   }
1874 
1875   static ByteSize method_offset() {
1876     return cell_offset(speculative_trap_method);
1877   }
1878 
1879   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1880 };
1881 
1882 class ArrayStoreData : public ReceiverTypeData {
1883 private:
1884   enum {
1885     flat_array_flag = BitData::last_bit_data_flag,
1886     null_free_array_flag = flat_array_flag + 1,
1887   };
1888 
1889   SingleTypeEntry _array;
1890 
1891 public:
1892   ArrayStoreData(DataLayout* layout) :
1893     ReceiverTypeData(layout),
1894     _array(ReceiverTypeData::static_cell_count()) {
1895     assert(layout->tag() == DataLayout::array_store_data_tag, "wrong type");
1896     _array.set_profile_data(this);
1897   }
1898 
1899   const SingleTypeEntry* array() const {
1900     return &_array;
1901   }
1902 
1903   virtual bool is_ArrayStoreData() const { return true; }
1904 
1905   static int static_cell_count() {
1906     return ReceiverTypeData::static_cell_count() + SingleTypeEntry::static_cell_count();
1907   }
1908 
1909   virtual int cell_count() const {
1910     return static_cell_count();
1911   }
1912 
1913   void set_flat_array() { set_flag_at(flat_array_flag); }
1914   bool flat_array() const { return flag_at(flat_array_flag); }
1915 
1916   void set_null_free_array() { set_flag_at(null_free_array_flag); }
1917   bool null_free_array() const { return flag_at(null_free_array_flag); }
1918 
1919   // Code generation support
1920   static int flat_array_byte_constant() {
1921     return flag_number_to_constant(flat_array_flag);
1922   }
1923 
1924   static int null_free_array_byte_constant() {
1925     return flag_number_to_constant(null_free_array_flag);
1926   }
1927 
1928   static ByteSize array_offset() {
1929     return cell_offset(ReceiverTypeData::static_cell_count());
1930   }
1931 
1932   virtual void clean_weak_klass_links(bool always_clean) {
1933     ReceiverTypeData::clean_weak_klass_links(always_clean);
1934     _array.clean_weak_klass_links(always_clean);
1935   }
1936 
1937   static ByteSize array_store_data_size() {
1938     return cell_offset(static_cell_count());
1939   }
1940 
1941   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1942 };
1943 
1944 class ArrayLoadData : public ProfileData {
1945 private:
1946   enum {
1947     flat_array_flag = DataLayout::first_flag,
1948     null_free_array_flag = flat_array_flag + 1,
1949   };
1950 
1951   SingleTypeEntry _array;
1952   SingleTypeEntry _element;
1953 
1954 public:
1955   ArrayLoadData(DataLayout* layout) :
1956     ProfileData(layout),
1957     _array(0),
1958     _element(SingleTypeEntry::static_cell_count()) {
1959     assert(layout->tag() == DataLayout::array_load_data_tag, "wrong type");
1960     _array.set_profile_data(this);
1961     _element.set_profile_data(this);
1962   }
1963 
1964   const SingleTypeEntry* array() const {
1965     return &_array;
1966   }
1967 
1968   const SingleTypeEntry* element() const {
1969     return &_element;
1970   }
1971 
1972   virtual bool is_ArrayLoadData() const { return true; }
1973 
1974   static int static_cell_count() {
1975     return SingleTypeEntry::static_cell_count() * 2;
1976   }
1977 
1978   virtual int cell_count() const {
1979     return static_cell_count();
1980   }
1981 
1982   void set_flat_array() { set_flag_at(flat_array_flag); }
1983   bool flat_array() const { return flag_at(flat_array_flag); }
1984 
1985   void set_null_free_array() { set_flag_at(null_free_array_flag); }
1986   bool null_free_array() const { return flag_at(null_free_array_flag); }
1987 
1988   // Code generation support
1989   static int flat_array_byte_constant() {
1990     return flag_number_to_constant(flat_array_flag);
1991   }
1992 
1993   static int null_free_array_byte_constant() {
1994     return flag_number_to_constant(null_free_array_flag);
1995   }
1996 
1997   static ByteSize array_offset() {
1998     return cell_offset(0);
1999   }
2000 
2001   static ByteSize element_offset() {
2002     return cell_offset(SingleTypeEntry::static_cell_count());
2003   }
2004 
2005   virtual void clean_weak_klass_links(bool always_clean) {
2006     _array.clean_weak_klass_links(always_clean);
2007     _element.clean_weak_klass_links(always_clean);
2008   }
2009 
2010   static ByteSize array_load_data_size() {
2011     return cell_offset(static_cell_count());
2012   }
2013 
2014   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
2015 };
2016 
2017 class ACmpData : public BranchData {
2018 private:
2019   enum {
2020     left_inline_type_flag = DataLayout::first_flag,
2021     right_inline_type_flag
2022   };
2023 
2024   SingleTypeEntry _left;
2025   SingleTypeEntry _right;
2026 
2027 public:
2028   ACmpData(DataLayout* layout) :
2029     BranchData(layout),
2030     _left(BranchData::static_cell_count()),
2031     _right(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count()) {
2032     assert(layout->tag() == DataLayout::acmp_data_tag, "wrong type");
2033     _left.set_profile_data(this);
2034     _right.set_profile_data(this);
2035   }
2036 
2037   const SingleTypeEntry* left() const {
2038     return &_left;
2039   }
2040 
2041   const SingleTypeEntry* right() const {
2042     return &_right;
2043   }
2044 
2045   virtual bool is_ACmpData() const { return true; }
2046 
2047   static int static_cell_count() {
2048     return BranchData::static_cell_count() + SingleTypeEntry::static_cell_count() * 2;
2049   }
2050 
2051   virtual int cell_count() const {
2052     return static_cell_count();
2053   }
2054 
2055   void set_left_inline_type() { set_flag_at(left_inline_type_flag); }
2056   bool left_inline_type() const { return flag_at(left_inline_type_flag); }
2057 
2058   void set_right_inline_type() { set_flag_at(right_inline_type_flag); }
2059   bool right_inline_type() const { return flag_at(right_inline_type_flag); }
2060 
2061   // Code generation support
2062   static int left_inline_type_byte_constant() {
2063     return flag_number_to_constant(left_inline_type_flag);
2064   }
2065 
2066   static int right_inline_type_byte_constant() {
2067     return flag_number_to_constant(right_inline_type_flag);
2068   }
2069 
2070   static ByteSize left_offset() {
2071     return cell_offset(BranchData::static_cell_count());
2072   }
2073 
2074   static ByteSize right_offset() {
2075     return cell_offset(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count());
2076   }
2077 
2078   virtual void clean_weak_klass_links(bool always_clean) {
2079     _left.clean_weak_klass_links(always_clean);
2080     _right.clean_weak_klass_links(always_clean);
2081   }
2082 
2083   static ByteSize acmp_data_size() {
2084     return cell_offset(static_cell_count());
2085   }
2086 
2087   virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
2088 };
2089 
2090 // MethodData*
2091 //
2092 // A MethodData* holds information which has been collected about
2093 // a method.  Its layout looks like this:
2094 //
2095 // -----------------------------
2096 // | header                    |
2097 // | klass                     |
2098 // -----------------------------
2099 // | method                    |
2100 // | size of the MethodData* |
2101 // -----------------------------
2102 // | Data entries...           |
2103 // |   (variable size)         |
2104 // |                           |
2105 // .                           .
2106 // .                           .
2107 // .                           .
2108 // |                           |
2109 // -----------------------------
2110 //
2111 // The data entry area is a heterogeneous array of DataLayouts. Each
2112 // DataLayout in the array corresponds to a specific bytecode in the
2113 // method.  The entries in the array are sorted by the corresponding
2114 // bytecode.  Access to the data is via resource-allocated ProfileData,
2115 // which point to the underlying blocks of DataLayout structures.
2116 //
2117 // During interpretation, if profiling in enabled, the interpreter
2118 // maintains a method data pointer (mdp), which points at the entry
2119 // in the array corresponding to the current bci.  In the course of
2120 // interpretation, when a bytecode is encountered that has profile data
2121 // associated with it, the entry pointed to by mdp is updated, then the
2122 // mdp is adjusted to point to the next appropriate DataLayout.  If mdp
2123 // is null to begin with, the interpreter assumes that the current method
2124 // is not (yet) being profiled.
2125 //
2126 // In MethodData* parlance, "dp" is a "data pointer", the actual address
2127 // of a DataLayout element.  A "di" is a "data index", the offset in bytes
2128 // from the base of the data entry array.  A "displacement" is the byte offset
2129 // in certain ProfileData objects that indicate the amount the mdp must be
2130 // adjusted in the event of a change in control flow.
2131 //
2132 
2133 class CleanExtraDataClosure : public StackObj {
2134 public:
2135   virtual bool is_live(Method* m) = 0;
2136 };
2137 
2138 
2139 #if INCLUDE_JVMCI
2140 // Encapsulates an encoded speculation reason. These are linked together in
2141 // a list that is atomically appended to during deoptimization. Entries are
2142 // never removed from the list.
2143 // @see jdk.vm.ci.hotspot.HotSpotSpeculationLog.HotSpotSpeculationEncoding
2144 class FailedSpeculation: public CHeapObj<mtCompiler> {
2145  private:
2146   // The length of HotSpotSpeculationEncoding.toByteArray(). The data itself
2147   // is an array embedded at the end of this object.
2148   int   _data_len;
2149 
2150   // Next entry in a linked list.
2151   FailedSpeculation* _next;
2152 
2153   FailedSpeculation(address data, int data_len);
2154 
2155   FailedSpeculation** next_adr() { return &_next; }
2156 
2157   // Placement new operator for inlining the speculation data into
2158   // the FailedSpeculation object.
2159   void* operator new(size_t size, size_t fs_size) throw();
2160 
2161  public:
2162   char* data()         { return (char*)(((address) this) + sizeof(FailedSpeculation)); }
2163   int data_len() const { return _data_len; }
2164   FailedSpeculation* next() const { return _next; }
2165 
2166   // Atomically appends a speculation from nm to the list whose head is at (*failed_speculations_address).
2167   // Returns false if the FailedSpeculation object could not be allocated.
2168   static bool add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len);
2169 
2170   // Frees all entries in the linked list whose head is at (*failed_speculations_address).
2171   static void free_failed_speculations(FailedSpeculation** failed_speculations_address);
2172 };
2173 #endif
2174 
2175 class ciMethodData;
2176 
2177 class MethodData : public Metadata {
2178   friend class VMStructs;
2179   friend class JVMCIVMStructs;
2180   friend class ProfileData;
2181   friend class TypeEntriesAtCall;
2182   friend class ciMethodData;
2183   friend class VM_ReinitializeMDO;
2184 
2185   // If you add a new field that points to any metaspace object, you
2186   // must add this field to MethodData::metaspace_pointers_do().
2187 
2188   // Back pointer to the Method*
2189   Method* _method;
2190 
2191   // Size of this oop in bytes
2192   int _size;
2193 
2194   // Cached hint for bci_to_dp and bci_to_data
2195   int _hint_di;
2196 
2197   Mutex _extra_data_lock;
2198 
2199   MethodData(const methodHandle& method);
2200 
2201   void initialize();
2202 
2203 public:
2204   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2205 
2206   virtual bool is_methodData() const { return true; }
2207 
2208   // Safely reinitialize the data in the MDO.  This is intended as a testing facility as the
2209   // reinitialization is performed at a safepoint so it's isn't cheap and it doesn't ensure that all
2210   // readers will see consistent profile data.
2211   void reinitialize();
2212 
2213   // Whole-method sticky bits and flags
2214   enum {
2215     _trap_hist_limit    = Deoptimization::Reason_TRAP_HISTORY_LENGTH,
2216     _trap_hist_mask     = max_jubyte,
2217     _extra_data_count   = 4     // extra DataLayout headers, for trap history
2218   }; // Public flag values
2219 
2220   // Compiler-related counters.
2221   class CompilerCounters {
2222     friend class VMStructs;
2223     friend class JVMCIVMStructs;
2224 
2225     uint _nof_decompiles;             // count of all nmethod removals
2226     uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2227     uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2228     union {
2229       intptr_t _align;
2230       // JVMCI separates trap history for OSR compilations from normal compilations
2231       u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit];
2232     } _trap_hist;
2233 
2234   public:
2235     CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0) {
2236 #ifndef ZERO
2237       // Some Zero platforms do not have expected alignment, and do not use
2238       // this code. static_assert would still fire and fail for them.
2239       static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align");
2240 #endif
2241       uint size_in_words = sizeof(_trap_hist) / HeapWordSize;
2242       Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words);
2243     }
2244 
2245     // Return (uint)-1 for overflow.
2246     uint trap_count(int reason) const {
2247       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2248       return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2249     }
2250 
2251     uint inc_trap_count(int reason) {
2252       // Count another trap, anywhere in this method.
2253       assert(reason >= 0, "must be single trap");
2254       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2255       uint cnt1 = 1 + _trap_hist._array[reason];
2256       if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
2257         _trap_hist._array[reason] = (u1)cnt1;
2258         return cnt1;
2259       } else {
2260         return _trap_hist_mask + (++_nof_overflow_traps);
2261       }
2262     }
2263 
2264     uint overflow_trap_count() const {
2265       return _nof_overflow_traps;
2266     }
2267     uint overflow_recompile_count() const {
2268       return _nof_overflow_recompiles;
2269     }
2270     uint inc_overflow_recompile_count() {
2271       return ++_nof_overflow_recompiles;
2272     }
2273     uint decompile_count() const {
2274       return _nof_decompiles;
2275     }
2276     uint inc_decompile_count() {
2277       return ++_nof_decompiles;
2278     }
2279 
2280     // Support for code generation
2281     static ByteSize trap_history_offset() {
2282       return byte_offset_of(CompilerCounters, _trap_hist._array);
2283     }
2284   };
2285 
2286 private:
2287   CompilerCounters _compiler_counters;
2288 
2289   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2290   intx              _eflags;          // flags on escape information
2291   intx              _arg_local;       // bit set of non-escaping arguments
2292   intx              _arg_stack;       // bit set of stack-allocatable arguments
2293   intx              _arg_returned;    // bit set of returned arguments
2294 
2295   // How many invocations has this MDO seen?
2296   // These counters are used to determine the exact age of MDO.
2297   // We need those because in tiered a method can be concurrently
2298   // executed at different levels.
2299   InvocationCounter _invocation_counter;
2300   // Same for backedges.
2301   InvocationCounter _backedge_counter;
2302   // Counter values at the time profiling started.
2303   int               _invocation_counter_start;
2304   int               _backedge_counter_start;
2305   uint              _tenure_traps;
2306   int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
2307   int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
2308 
2309   // Number of loops and blocks is computed when compiling the first
2310   // time with C1. It is used to determine if method is trivial.
2311   short             _num_loops;
2312   short             _num_blocks;
2313   // Does this method contain anything worth profiling?
2314   enum WouldProfile {unknown, no_profile, profile};
2315   WouldProfile      _would_profile;
2316 
2317 #if INCLUDE_JVMCI
2318   // Support for HotSpotMethodData.setCompiledIRSize(int)
2319   FailedSpeculation* _failed_speculations;
2320   int                _jvmci_ir_size;
2321 #endif
2322 
2323   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
2324   int _data_size;
2325 
2326   // data index for the area dedicated to parameters. -1 if no
2327   // parameter profiling.
2328   enum { no_parameters = -2, parameters_uninitialized = -1 };
2329   int _parameters_type_data_di;
2330 
2331   // data index of exception handler profiling data
2332   int _exception_handler_data_di;
2333 
2334   // Beginning of the data entries
2335   // See comment in ciMethodData::load_data
2336   intptr_t _data[1];
2337 
2338   // Helper for size computation
2339   static int compute_data_size(BytecodeStream* stream);
2340   static int bytecode_cell_count(Bytecodes::Code code);
2341   static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2342   enum { no_profile_data = -1, variable_cell_count = -2 };
2343 
2344   // Helper for initialization
2345   DataLayout* data_layout_at(int data_index) const {
2346     assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2347     return (DataLayout*) (((address)_data) + data_index);
2348   }
2349 
2350   static int single_exception_handler_data_cell_count() {
2351     return BitData::static_cell_count();
2352   }
2353 
2354   static int single_exception_handler_data_size() {
2355     return DataLayout::compute_size_in_bytes(single_exception_handler_data_cell_count());
2356   }
2357 
2358   DataLayout* exception_handler_data_at(int exception_handler_index) const {
2359     return data_layout_at(_exception_handler_data_di + (exception_handler_index * single_exception_handler_data_size()));
2360   }
2361 
2362   int num_exception_handler_data() const {
2363     return exception_handlers_data_size() / single_exception_handler_data_size();
2364   }
2365 
2366   // Initialize an individual data segment.  Returns the size of
2367   // the segment in bytes.
2368   int initialize_data(BytecodeStream* stream, int data_index);
2369 
2370   // Helper for data_at
2371   DataLayout* limit_data_position() const {
2372     return data_layout_at(_data_size);
2373   }
2374   bool out_of_bounds(int data_index) const {
2375     return data_index >= data_size();
2376   }
2377 
2378   // Give each of the data entries a chance to perform specific
2379   // data initialization.
2380   void post_initialize(BytecodeStream* stream);
2381 
2382   // hint accessors
2383   int      hint_di() const  { return _hint_di; }
2384   void set_hint_di(int di)  {
2385     assert(!out_of_bounds(di), "hint_di out of bounds");
2386     _hint_di = di;
2387   }
2388 
2389   DataLayout* data_layout_before(int bci) {
2390     // avoid SEGV on this edge case
2391     if (data_size() == 0)
2392       return nullptr;
2393     DataLayout* layout = data_layout_at(hint_di());
2394     if (layout->bci() <= bci)
2395       return layout;
2396     return data_layout_at(first_di());
2397   }
2398 
2399   // What is the index of the first data entry?
2400   int first_di() const { return 0; }
2401 
2402   ProfileData* bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp);
2403   // Find or create an extra ProfileData:
2404   ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2405 
2406   // return the argument info cell
2407   ArgInfoData *arg_info();
2408 
2409   enum {
2410     no_type_profile = 0,
2411     type_profile_jsr292 = 1,
2412     type_profile_all = 2
2413   };
2414 
2415   static bool profile_jsr292(const methodHandle& m, int bci);
2416   static bool profile_unsafe(const methodHandle& m, int bci);
2417   static bool profile_memory_access(const methodHandle& m, int bci);
2418   static int profile_arguments_flag();
2419   static bool profile_all_arguments();
2420   static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2421   static int profile_return_flag();
2422   static bool profile_all_return();
2423   static bool profile_return_for_invoke(const methodHandle& m, int bci);
2424   static int profile_parameters_flag();
2425   static bool profile_parameters_jsr292_only();
2426   static bool profile_all_parameters();
2427 
2428   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2429   void verify_extra_data_clean(CleanExtraDataClosure* cl);
2430 
2431   DataLayout* exception_handler_bci_to_data_helper(int bci);
2432 
2433 public:
2434   void clean_extra_data(CleanExtraDataClosure* cl);
2435 
2436   static int header_size() {
2437     return sizeof(MethodData)/wordSize;
2438   }
2439 
2440   // Compute the size of a MethodData* before it is created.
2441   static int compute_allocation_size_in_bytes(const methodHandle& method);
2442   static int compute_allocation_size_in_words(const methodHandle& method);
2443   static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2444 
2445   // Determine if a given bytecode can have profile information.
2446   static bool bytecode_has_profile(Bytecodes::Code code) {
2447     return bytecode_cell_count(code) != no_profile_data;
2448   }
2449 
2450   // reset into original state
2451   void init();
2452 
2453   // My size
2454   int size_in_bytes() const { return _size; }
2455   int size() const    { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
2456 
2457   int invocation_count() {
2458     if (invocation_counter()->carry()) {
2459       return InvocationCounter::count_limit;
2460     }
2461     return invocation_counter()->count();
2462   }
2463   int backedge_count() {
2464     if (backedge_counter()->carry()) {
2465       return InvocationCounter::count_limit;
2466     }
2467     return backedge_counter()->count();
2468   }
2469 
2470   int invocation_count_start() {
2471     if (invocation_counter()->carry()) {
2472       return 0;
2473     }
2474     return _invocation_counter_start;
2475   }
2476 
2477   int backedge_count_start() {
2478     if (backedge_counter()->carry()) {
2479       return 0;
2480     }
2481     return _backedge_counter_start;
2482   }
2483 
2484   int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2485   int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
2486 
2487   void reset_start_counters() {
2488     _invocation_counter_start = invocation_count();
2489     _backedge_counter_start = backedge_count();
2490   }
2491 
2492   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
2493   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
2494 
2495 #if INCLUDE_JVMCI
2496   FailedSpeculation** get_failed_speculations_address() {
2497     return &_failed_speculations;
2498   }
2499 #endif
2500 
2501   void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
2502   bool would_profile() const                  { return _would_profile != no_profile; }
2503 
2504   int num_loops() const                       { return _num_loops;  }
2505   void set_num_loops(short n)                 { _num_loops = n;     }
2506   int num_blocks() const                      { return _num_blocks; }
2507   void set_num_blocks(short n)                { _num_blocks = n;    }
2508 
2509   bool is_mature() const;
2510 
2511   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2512   enum EscapeFlag {
2513     estimated    = 1 << 0,
2514     return_local = 1 << 1,
2515     return_allocated = 1 << 2,
2516     allocated_escapes = 1 << 3,
2517     unknown_modified = 1 << 4
2518   };
2519 
2520   intx eflags()                                  { return _eflags; }
2521   intx arg_local()                               { return _arg_local; }
2522   intx arg_stack()                               { return _arg_stack; }
2523   intx arg_returned()                            { return _arg_returned; }
2524   uint arg_modified(int a);
2525   void set_eflags(intx v)                        { _eflags = v; }
2526   void set_arg_local(intx v)                     { _arg_local = v; }
2527   void set_arg_stack(intx v)                     { _arg_stack = v; }
2528   void set_arg_returned(intx v)                  { _arg_returned = v; }
2529   void set_arg_modified(int a, uint v);
2530   void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2531 
2532   // Location and size of data area
2533   address data_base() const {
2534     return (address) _data;
2535   }
2536   int data_size() const {
2537     return _data_size;
2538   }
2539 
2540   int parameters_size_in_bytes() const {
2541     return pointer_delta_as_int((address) parameters_data_limit(), (address) parameters_data_base());
2542   }
2543 
2544   int exception_handlers_data_size() const {
2545     return pointer_delta_as_int((address) exception_handler_data_limit(), (address) exception_handler_data_base());
2546   }
2547 
2548   // Accessors
2549   Method* method() const { return _method; }
2550 
2551   // Get the data at an arbitrary (sort of) data index.
2552   ProfileData* data_at(int data_index) const;
2553 
2554   // Walk through the data in order.
2555   ProfileData* first_data() const { return data_at(first_di()); }
2556   ProfileData* next_data(ProfileData* current) const;
2557   DataLayout*  next_data_layout(DataLayout* current) const;
2558   bool is_valid(ProfileData* current) const { return current != nullptr; }
2559   bool is_valid(DataLayout*  current) const { return current != nullptr; }
2560 
2561   // Convert a dp (data pointer) to a di (data index).
2562   int dp_to_di(address dp) const {
2563     return (int)(dp - ((address)_data));
2564   }
2565 
2566   // bci to di/dp conversion.
2567   address bci_to_dp(int bci);
2568   int bci_to_di(int bci) {
2569     return dp_to_di(bci_to_dp(bci));
2570   }
2571 
2572   // Get the data at an arbitrary bci, or null if there is none.
2573   ProfileData* bci_to_data(int bci);
2574 
2575   // Same, but try to create an extra_data record if one is needed:
2576   ProfileData* allocate_bci_to_data(int bci, Method* m) {
2577     check_extra_data_locked();
2578 
2579     ProfileData* data = nullptr;
2580     // If m not null, try to allocate a SpeculativeTrapData entry
2581     if (m == nullptr) {
2582       data = bci_to_data(bci);
2583     }
2584     if (data != nullptr) {
2585       return data;
2586     }
2587     data = bci_to_extra_data(bci, m, true);
2588     if (data != nullptr) {
2589       return data;
2590     }
2591     // If SpeculativeTrapData allocation fails try to allocate a
2592     // regular entry
2593     data = bci_to_data(bci);
2594     if (data != nullptr) {
2595       return data;
2596     }
2597     return bci_to_extra_data(bci, nullptr, true);
2598   }
2599 
2600   BitData* exception_handler_bci_to_data_or_null(int bci);
2601   BitData exception_handler_bci_to_data(int bci);
2602 
2603   // Add a handful of extra data records, for trap tracking.
2604   // Only valid after 'set_size' is called at the end of MethodData::initialize
2605   DataLayout* extra_data_base() const  {
2606     check_extra_data_locked();
2607     return limit_data_position();
2608   }
2609   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2610   // pointers to sections in extra data
2611   DataLayout* args_data_limit() const  { return parameters_data_base(); }
2612   DataLayout* parameters_data_base() const {
2613     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2614     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di) : parameters_data_limit();
2615   }
2616   DataLayout* parameters_data_limit() const {
2617     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2618     return exception_handler_data_base();
2619   }
2620   DataLayout* exception_handler_data_base() const { return data_layout_at(_exception_handler_data_di); }
2621   DataLayout* exception_handler_data_limit() const { return extra_data_limit(); }
2622 
2623   int extra_data_size() const          { return (int)((address)extra_data_limit() - (address)limit_data_position()); }
2624   static DataLayout* next_extra(DataLayout* dp);
2625 
2626   // Return (uint)-1 for overflow.
2627   uint trap_count(int reason) const {
2628     return _compiler_counters.trap_count(reason);
2629   }
2630   // For loops:
2631   static uint trap_reason_limit() { return _trap_hist_limit; }
2632   static uint trap_count_limit()  { return _trap_hist_mask; }
2633   uint inc_trap_count(int reason) {
2634     return _compiler_counters.inc_trap_count(reason);
2635   }
2636 
2637   uint overflow_trap_count() const {
2638     return _compiler_counters.overflow_trap_count();
2639   }
2640   uint overflow_recompile_count() const {
2641     return _compiler_counters.overflow_recompile_count();
2642   }
2643   uint inc_overflow_recompile_count() {
2644     return _compiler_counters.inc_overflow_recompile_count();
2645   }
2646   uint decompile_count() const {
2647     return _compiler_counters.decompile_count();
2648   }
2649   uint inc_decompile_count() {
2650     uint dec_count = _compiler_counters.inc_decompile_count();
2651     if (dec_count > (uint)PerMethodRecompilationCutoff) {
2652       method()->set_not_compilable("decompile_count > PerMethodRecompilationCutoff", CompLevel_full_optimization);
2653     }
2654     return dec_count;
2655   }
2656   uint tenure_traps() const {
2657     return _tenure_traps;
2658   }
2659   void inc_tenure_traps() {
2660     _tenure_traps += 1;
2661   }
2662 
2663   // Return pointer to area dedicated to parameters in MDO
2664   ParametersTypeData* parameters_type_data() const {
2665     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2666     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : nullptr;
2667   }
2668 
2669   int parameters_type_data_di() const {
2670     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2671     return _parameters_type_data_di != no_parameters ? _parameters_type_data_di : exception_handlers_data_di();
2672   }
2673 
2674   int exception_handlers_data_di() const {
2675     return _exception_handler_data_di;
2676   }
2677 
2678   // Support for code generation
2679   static ByteSize data_offset() {
2680     return byte_offset_of(MethodData, _data[0]);
2681   }
2682 
2683   static ByteSize trap_history_offset() {
2684     return byte_offset_of(MethodData, _compiler_counters) + CompilerCounters::trap_history_offset();
2685   }
2686 
2687   static ByteSize invocation_counter_offset() {
2688     return byte_offset_of(MethodData, _invocation_counter);
2689   }
2690 
2691   static ByteSize backedge_counter_offset() {
2692     return byte_offset_of(MethodData, _backedge_counter);
2693   }
2694 
2695   static ByteSize invoke_mask_offset() {
2696     return byte_offset_of(MethodData, _invoke_mask);
2697   }
2698 
2699   static ByteSize backedge_mask_offset() {
2700     return byte_offset_of(MethodData, _backedge_mask);
2701   }
2702 
2703   static ByteSize parameters_type_data_di_offset() {
2704     return byte_offset_of(MethodData, _parameters_type_data_di);
2705   }
2706 
2707   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
2708   virtual MetaspaceObj::Type type() const { return MethodDataType; }
2709 
2710   // Deallocation support
2711   void deallocate_contents(ClassLoaderData* loader_data);
2712   void release_C_heap_structures();
2713 
2714   // GC support
2715   void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2716 
2717   // Printing
2718   void print_on      (outputStream* st) const;
2719   void print_value_on(outputStream* st) const;
2720 
2721   // printing support for method data
2722   void print_data_on(outputStream* st) const;
2723 
2724   const char* internal_name() const { return "{method data}"; }
2725 
2726   // verification
2727   void verify_on(outputStream* st);
2728   void verify_data_on(outputStream* st);
2729 
2730   static bool profile_parameters_for_method(const methodHandle& m);
2731   static bool profile_arguments();
2732   static bool profile_arguments_jsr292_only();
2733   static bool profile_return();
2734   static bool profile_parameters();
2735   static bool profile_return_jsr292_only();
2736 
2737   void clean_method_data(bool always_clean);
2738   void clean_weak_method_links();
2739   Mutex* extra_data_lock() const { return const_cast<Mutex*>(&_extra_data_lock); }
2740   void check_extra_data_locked() const NOT_DEBUG_RETURN;
2741 };
2742 
2743 #endif // SHARE_OOPS_METHODDATA_HPP