1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_METHODDATA_HPP
  26 #define SHARE_OOPS_METHODDATA_HPP
  27 
  28 #include "interpreter/bytecodes.hpp"
  29 #include "oops/metadata.hpp"
  30 #include "oops/method.hpp"
  31 #include "oops/oop.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/deoptimization.hpp"
  34 #include "runtime/mutex.hpp"
  35 #include "utilities/align.hpp"
  36 #include "utilities/copy.hpp"
  37 
  38 class BytecodeStream;
  39 
  40 // The MethodData object collects counts and other profile information
  41 // during zeroth-tier (interpretive) and first-tier execution.
  42 // The profile is used later by compilation heuristics.  Some heuristics
  43 // enable use of aggressive (or "heroic") optimizations.  An aggressive
  44 // optimization often has a down-side, a corner case that it handles
  45 // poorly, but which is thought to be rare.  The profile provides
  46 // evidence of this rarity for a given method or even BCI.  It allows
  47 // the compiler to back out of the optimization at places where it
  48 // has historically been a poor choice.  Other heuristics try to use
  49 // specific information gathered about types observed at a given site.
  50 //
  51 // All data in the profile is approximate.  It is expected to be accurate
  52 // on the whole, but the system expects occasional inaccuraces, due to
  53 // counter overflow, multiprocessor races during data collection, space
  54 // limitations, missing MDO blocks, etc.  Bad or missing data will degrade
  55 // optimization quality but will not affect correctness.  Also, each MDO
  56 // is marked with its birth-date ("creation_mileage") which can be used
  57 // to assess the quality ("maturity") of its data.
  58 //
  59 // Short (<32-bit) counters are designed to overflow to a known "saturated"
  60 // state.  Also, certain recorded per-BCI events are given one-bit counters
  61 // which overflow to a saturated state which applied to all counters at
  62 // that BCI.  In other words, there is a small lattice which approximates
  63 // the ideal of an infinite-precision counter for each event at each BCI,
  64 // and the lattice quickly "bottoms out" in a state where all counters
  65 // are taken to be indefinitely large.
  66 //
  67 // The reader will find many data races in profile gathering code, starting
  68 // with invocation counter incrementation.  None of these races harm correct
  69 // execution of the compiled code.
  70 
  71 // forward decl
  72 class ProfileData;
  73 
  74 // DataLayout
  75 //
  76 // Overlay for generic profiling data.
  77 class DataLayout {
  78   friend class VMStructs;
  79   friend class JVMCIVMStructs;
  80 
  81 private:
  82   // Every data layout begins with a header.  This header
  83   // contains a tag, which is used to indicate the size/layout
  84   // of the data, 8 bits of flags, which can be used in any way,
  85   // 32 bits of trap history (none/one reason/many reasons),
  86   // and a bci, which is used to tie this piece of data to a
  87   // specific bci in the bytecodes.
  88   union {
  89     u8 _bits;
  90     struct {
  91       u1 _tag;
  92       u1 _flags;
  93       u2 _bci;
  94       u4 _traps;
  95     } _struct;
  96   } _header;
  97 
  98   // The data layout has an arbitrary number of cells, each sized
  99   // to accomodate a pointer or an integer.
 100   intptr_t _cells[1];
 101 
 102   // Some types of data layouts need a length field.
 103   static bool needs_array_len(u1 tag);
 104 
 105 public:
 106   enum {
 107     counter_increment = 1
 108   };
 109 
 110   enum {
 111     cell_size = sizeof(intptr_t)
 112   };
 113 
 114   // Tag values
 115   enum {
 116     no_tag,
 117     bit_data_tag,
 118     counter_data_tag,
 119     jump_data_tag,
 120     receiver_type_data_tag,
 121     virtual_call_data_tag,
 122     ret_data_tag,
 123     branch_data_tag,
 124     multi_branch_data_tag,
 125     arg_info_data_tag,
 126     call_type_data_tag,
 127     virtual_call_type_data_tag,
 128     parameters_type_data_tag,
 129     speculative_trap_data_tag,
 130     array_load_store_data_tag,
 131     acmp_data_tag
 132   };
 133 
 134   enum {
 135     // The trap state breaks down as [recompile:1 | reason:31].
 136     // This further breakdown is defined in deoptimization.cpp.
 137     // See Deoptimization::trap_state_reason for an assert that
 138     // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
 139     //
 140     // The trap_state is collected only if ProfileTraps is true.
 141     trap_bits = 1+31,  // 31: enough to distinguish [0..Reason_RECORDED_LIMIT].
 142     trap_mask = -1,
 143     first_flag = 0
 144   };
 145 
 146   // Size computation
 147   static int header_size_in_bytes() {
 148     return header_size_in_cells() * cell_size;
 149   }
 150   static int header_size_in_cells() {
 151     return LP64_ONLY(1) NOT_LP64(2);
 152   }
 153 
 154   static int compute_size_in_bytes(int cell_count) {
 155     return header_size_in_bytes() + cell_count * cell_size;
 156   }
 157 
 158   // Initialization
 159   void initialize(u1 tag, u2 bci, int cell_count);
 160 
 161   // Accessors
 162   u1 tag() {
 163     return _header._struct._tag;
 164   }
 165 
 166   // Return 32 bits of trap state.
 167   // The state tells if traps with zero, one, or many reasons have occurred.
 168   // It also tells whether zero or many recompilations have occurred.
 169   // The associated trap histogram in the MDO itself tells whether
 170   // traps are common or not.  If a BCI shows that a trap X has
 171   // occurred, and the MDO shows N occurrences of X, we make the
 172   // simplifying assumption that all N occurrences can be blamed
 173   // on that BCI.
 174   uint trap_state() const {
 175     return _header._struct._traps;
 176   }
 177 
 178   void set_trap_state(uint new_state) {
 179     assert(ProfileTraps, "used only under +ProfileTraps");
 180     uint old_flags = _header._struct._traps;
 181     _header._struct._traps = new_state | old_flags;
 182   }
 183 
 184   u1 flags() const {
 185     return _header._struct._flags;
 186   }
 187 
 188   u2 bci() const {
 189     return _header._struct._bci;
 190   }
 191 
 192   void set_header(u8 value) {
 193     _header._bits = value;
 194   }
 195   u8 header() {
 196     return _header._bits;
 197   }
 198   void set_cell_at(int index, intptr_t value) {
 199     _cells[index] = value;
 200   }
 201   void release_set_cell_at(int index, intptr_t value);
 202   intptr_t cell_at(int index) const {
 203     return _cells[index];
 204   }
 205 
 206   void set_flag_at(u1 flag_number) {
 207     _header._struct._flags |= (0x1 << flag_number);
 208   }
 209   bool flag_at(u1 flag_number) const {
 210     return (_header._struct._flags & (0x1 << flag_number)) != 0;
 211   }
 212 
 213   // Low-level support for code generation.
 214   static ByteSize header_offset() {
 215     return byte_offset_of(DataLayout, _header);
 216   }
 217   static ByteSize tag_offset() {
 218     return byte_offset_of(DataLayout, _header._struct._tag);
 219   }
 220   static ByteSize flags_offset() {
 221     return byte_offset_of(DataLayout, _header._struct._flags);
 222   }
 223   static ByteSize bci_offset() {
 224     return byte_offset_of(DataLayout, _header._struct._bci);
 225   }
 226   static ByteSize cell_offset(int index) {
 227     return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
 228   }
 229   // Return a value which, when or-ed as a byte into _flags, sets the flag.
 230   static u1 flag_number_to_constant(u1 flag_number) {
 231     DataLayout temp; temp.set_header(0);
 232     temp.set_flag_at(flag_number);
 233     return temp._header._struct._flags;
 234   }
 235   // Return a value which, when or-ed as a word into _header, sets the flag.
 236   static u8 flag_mask_to_header_mask(uint byte_constant) {
 237     DataLayout temp; temp.set_header(0);
 238     temp._header._struct._flags = byte_constant;
 239     return temp._header._bits;
 240   }
 241 
 242   ProfileData* data_in();
 243 
 244   int size_in_bytes() {
 245     int cells = cell_count();
 246     assert(cells >= 0, "invalid number of cells");
 247     return DataLayout::compute_size_in_bytes(cells);
 248   }
 249   int cell_count();
 250 
 251   // GC support
 252   void clean_weak_klass_links(bool always_clean);
 253 };
 254 
 255 
 256 // ProfileData class hierarchy
 257 class ProfileData;
 258 class   BitData;
 259 class     CounterData;
 260 class       ReceiverTypeData;
 261 class         VirtualCallData;
 262 class           VirtualCallTypeData;
 263 class       RetData;
 264 class       CallTypeData;
 265 class   JumpData;
 266 class     BranchData;
 267 class       ACmpData;
 268 class   ArrayData;
 269 class     MultiBranchData;
 270 class     ArgInfoData;
 271 class     ParametersTypeData;
 272 class   SpeculativeTrapData;
 273 class   ArrayLoadStoreData;
 274 
 275 // ProfileData
 276 //
 277 // A ProfileData object is created to refer to a section of profiling
 278 // data in a structured way.
 279 class ProfileData : public ResourceObj {
 280   friend class TypeEntries;
 281   friend class SingleTypeEntry;
 282   friend class TypeStackSlotEntries;
 283 private:
 284   enum {
 285     tab_width_one = 16,
 286     tab_width_two = 36
 287   };
 288 
 289   // This is a pointer to a section of profiling data.
 290   DataLayout* _data;
 291 
 292   char* print_data_on_helper(const MethodData* md) const;
 293 
 294 protected:
 295   DataLayout* data() { return _data; }
 296   const DataLayout* data() const { return _data; }
 297 
 298   enum {
 299     cell_size = DataLayout::cell_size
 300   };
 301 
 302 public:
 303   // How many cells are in this?
 304   virtual int cell_count() const {
 305     ShouldNotReachHere();
 306     return -1;
 307   }
 308 
 309   // Return the size of this data.
 310   int size_in_bytes() {
 311     return DataLayout::compute_size_in_bytes(cell_count());
 312   }
 313 
 314 protected:
 315   // Low-level accessors for underlying data
 316   void set_intptr_at(int index, intptr_t value) {
 317     assert(0 <= index && index < cell_count(), "oob");
 318     data()->set_cell_at(index, value);
 319   }
 320   void release_set_intptr_at(int index, intptr_t value);
 321   intptr_t intptr_at(int index) const {
 322     assert(0 <= index && index < cell_count(), "oob");
 323     return data()->cell_at(index);
 324   }
 325   void set_uint_at(int index, uint value) {
 326     set_intptr_at(index, (intptr_t) value);
 327   }
 328   void release_set_uint_at(int index, uint value);
 329   uint uint_at(int index) const {
 330     return (uint)intptr_at(index);
 331   }
 332   void set_int_at(int index, int value) {
 333     set_intptr_at(index, (intptr_t) value);
 334   }
 335   void release_set_int_at(int index, int value);
 336   int int_at(int index) const {
 337     return (int)intptr_at(index);
 338   }
 339   int int_at_unchecked(int index) const {
 340     return (int)data()->cell_at(index);
 341   }
 342   void set_oop_at(int index, oop value) {
 343     set_intptr_at(index, cast_from_oop<intptr_t>(value));
 344   }
 345   oop oop_at(int index) const {
 346     return cast_to_oop(intptr_at(index));
 347   }
 348 
 349   void set_flag_at(int flag_number) {
 350     data()->set_flag_at(flag_number);
 351   }
 352   bool flag_at(int flag_number) const {
 353     return data()->flag_at(flag_number);
 354   }
 355 
 356   // two convenient imports for use by subclasses:
 357   static ByteSize cell_offset(int index) {
 358     return DataLayout::cell_offset(index);
 359   }
 360   static int flag_number_to_constant(int flag_number) {
 361     return DataLayout::flag_number_to_constant(flag_number);
 362   }
 363 
 364   ProfileData(DataLayout* data) {
 365     _data = data;
 366   }
 367 
 368 public:
 369   // Constructor for invalid ProfileData.
 370   ProfileData();
 371 
 372   u2 bci() const {
 373     return data()->bci();
 374   }
 375 
 376   address dp() {
 377     return (address)_data;
 378   }
 379 
 380   int trap_state() const {
 381     return data()->trap_state();
 382   }
 383   void set_trap_state(int new_state) {
 384     data()->set_trap_state(new_state);
 385   }
 386 
 387   // Type checking
 388   virtual bool is_BitData()         const { return false; }
 389   virtual bool is_CounterData()     const { return false; }
 390   virtual bool is_JumpData()        const { return false; }
 391   virtual bool is_ReceiverTypeData()const { return false; }
 392   virtual bool is_VirtualCallData() const { return false; }
 393   virtual bool is_RetData()         const { return false; }
 394   virtual bool is_BranchData()      const { return false; }
 395   virtual bool is_ArrayData()       const { return false; }
 396   virtual bool is_MultiBranchData() const { return false; }
 397   virtual bool is_ArgInfoData()     const { return false; }
 398   virtual bool is_CallTypeData()    const { return false; }
 399   virtual bool is_VirtualCallTypeData()const { return false; }
 400   virtual bool is_ParametersTypeData() const { return false; }
 401   virtual bool is_SpeculativeTrapData()const { return false; }
 402   virtual bool is_ArrayLoadStoreData() const { return false; }
 403   virtual bool is_ACmpData()           const { return false; }
 404 
 405 
 406   BitData* as_BitData() const {
 407     assert(is_BitData(), "wrong type");
 408     return is_BitData()         ? (BitData*)        this : NULL;
 409   }
 410   CounterData* as_CounterData() const {
 411     assert(is_CounterData(), "wrong type");
 412     return is_CounterData()     ? (CounterData*)    this : NULL;
 413   }
 414   JumpData* as_JumpData() const {
 415     assert(is_JumpData(), "wrong type");
 416     return is_JumpData()        ? (JumpData*)       this : NULL;
 417   }
 418   ReceiverTypeData* as_ReceiverTypeData() const {
 419     assert(is_ReceiverTypeData(), "wrong type");
 420     return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
 421   }
 422   VirtualCallData* as_VirtualCallData() const {
 423     assert(is_VirtualCallData(), "wrong type");
 424     return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
 425   }
 426   RetData* as_RetData() const {
 427     assert(is_RetData(), "wrong type");
 428     return is_RetData()         ? (RetData*)        this : NULL;
 429   }
 430   BranchData* as_BranchData() const {
 431     assert(is_BranchData(), "wrong type");
 432     return is_BranchData()      ? (BranchData*)     this : NULL;
 433   }
 434   ArrayData* as_ArrayData() const {
 435     assert(is_ArrayData(), "wrong type");
 436     return is_ArrayData()       ? (ArrayData*)      this : NULL;
 437   }
 438   MultiBranchData* as_MultiBranchData() const {
 439     assert(is_MultiBranchData(), "wrong type");
 440     return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
 441   }
 442   ArgInfoData* as_ArgInfoData() const {
 443     assert(is_ArgInfoData(), "wrong type");
 444     return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
 445   }
 446   CallTypeData* as_CallTypeData() const {
 447     assert(is_CallTypeData(), "wrong type");
 448     return is_CallTypeData() ? (CallTypeData*)this : NULL;
 449   }
 450   VirtualCallTypeData* as_VirtualCallTypeData() const {
 451     assert(is_VirtualCallTypeData(), "wrong type");
 452     return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
 453   }
 454   ParametersTypeData* as_ParametersTypeData() const {
 455     assert(is_ParametersTypeData(), "wrong type");
 456     return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
 457   }
 458   SpeculativeTrapData* as_SpeculativeTrapData() const {
 459     assert(is_SpeculativeTrapData(), "wrong type");
 460     return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
 461   }
 462   ArrayLoadStoreData* as_ArrayLoadStoreData() const {
 463     assert(is_ArrayLoadStoreData(), "wrong type");
 464     return is_ArrayLoadStoreData() ? (ArrayLoadStoreData*)this : NULL;
 465   }
 466   ACmpData* as_ACmpData() const {
 467     assert(is_ACmpData(), "wrong type");
 468     return is_ACmpData() ? (ACmpData*)this : NULL;
 469   }
 470 
 471 
 472   // Subclass specific initialization
 473   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
 474 
 475   // GC support
 476   virtual void clean_weak_klass_links(bool always_clean) {}
 477 
 478   // CI translation: ProfileData can represent both MethodDataOop data
 479   // as well as CIMethodData data. This function is provided for translating
 480   // an oop in a ProfileData to the ci equivalent. Generally speaking,
 481   // most ProfileData don't require any translation, so we provide the null
 482   // translation here, and the required translators are in the ci subclasses.
 483   virtual void translate_from(const ProfileData* data) {}
 484 
 485   virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
 486     ShouldNotReachHere();
 487   }
 488 
 489   void print_data_on(outputStream* st, const MethodData* md) const;
 490 
 491   void print_shared(outputStream* st, const char* name, const char* extra) const;
 492   void tab(outputStream* st, bool first = false) const;
 493 };
 494 
 495 // BitData
 496 //
 497 // A BitData holds a flag or two in its header.
 498 class BitData : public ProfileData {
 499   friend class VMStructs;
 500   friend class JVMCIVMStructs;
 501 protected:
 502   enum {
 503     // null_seen:
 504     //  saw a null operand (cast/aastore/instanceof)
 505       null_seen_flag              = DataLayout::first_flag + 0
 506 #if INCLUDE_JVMCI
 507     // bytecode threw any exception
 508     , exception_seen_flag         = null_seen_flag + 1
 509 #endif
 510   };
 511   enum { bit_cell_count = 0 };  // no additional data fields needed.
 512 public:
 513   BitData(DataLayout* layout) : ProfileData(layout) {
 514   }
 515 
 516   virtual bool is_BitData() const { return true; }
 517 
 518   static int static_cell_count() {
 519     return bit_cell_count;
 520   }
 521 
 522   virtual int cell_count() const {
 523     return static_cell_count();
 524   }
 525 
 526   // Accessor
 527 
 528   // The null_seen flag bit is specially known to the interpreter.
 529   // Consulting it allows the compiler to avoid setting up null_check traps.
 530   bool null_seen()     { return flag_at(null_seen_flag); }
 531   void set_null_seen()    { set_flag_at(null_seen_flag); }
 532 
 533 #if INCLUDE_JVMCI
 534   // true if an exception was thrown at the specific BCI
 535   bool exception_seen() { return flag_at(exception_seen_flag); }
 536   void set_exception_seen() { set_flag_at(exception_seen_flag); }
 537 #endif
 538 
 539   // Code generation support
 540   static int null_seen_byte_constant() {
 541     return flag_number_to_constant(null_seen_flag);
 542   }
 543 
 544   static ByteSize bit_data_size() {
 545     return cell_offset(bit_cell_count);
 546   }
 547 
 548   void print_data_on(outputStream* st, const char* extra = NULL) const;
 549 };
 550 
 551 // CounterData
 552 //
 553 // A CounterData corresponds to a simple counter.
 554 class CounterData : public BitData {
 555   friend class VMStructs;
 556   friend class JVMCIVMStructs;
 557 protected:
 558   enum {
 559     count_off,
 560     counter_cell_count
 561   };
 562 public:
 563   CounterData(DataLayout* layout) : BitData(layout) {}
 564 
 565   virtual bool is_CounterData() const { return true; }
 566 
 567   static int static_cell_count() {
 568     return counter_cell_count;
 569   }
 570 
 571   virtual int cell_count() const {
 572     return static_cell_count();
 573   }
 574 
 575   // Direct accessor
 576   int count() const {
 577     intptr_t raw_data = intptr_at(count_off);
 578     if (raw_data > max_jint) {
 579       raw_data = max_jint;
 580     } else if (raw_data < min_jint) {
 581       raw_data = min_jint;
 582     }
 583     return int(raw_data);
 584   }
 585 
 586   // Code generation support
 587   static ByteSize count_offset() {
 588     return cell_offset(count_off);
 589   }
 590   static ByteSize counter_data_size() {
 591     return cell_offset(counter_cell_count);
 592   }
 593 
 594   void set_count(int count) {
 595     set_int_at(count_off, count);
 596   }
 597 
 598   void print_data_on(outputStream* st, const char* extra = NULL) const;
 599 };
 600 
 601 // JumpData
 602 //
 603 // A JumpData is used to access profiling information for a direct
 604 // branch.  It is a counter, used for counting the number of branches,
 605 // plus a data displacement, used for realigning the data pointer to
 606 // the corresponding target bci.
 607 class JumpData : public ProfileData {
 608   friend class VMStructs;
 609   friend class JVMCIVMStructs;
 610 protected:
 611   enum {
 612     taken_off_set,
 613     displacement_off_set,
 614     jump_cell_count
 615   };
 616 
 617   void set_displacement(int displacement) {
 618     set_int_at(displacement_off_set, displacement);
 619   }
 620 
 621 public:
 622   JumpData(DataLayout* layout) : ProfileData(layout) {
 623     assert(layout->tag() == DataLayout::jump_data_tag ||
 624       layout->tag() == DataLayout::branch_data_tag ||
 625       layout->tag() == DataLayout::acmp_data_tag, "wrong type");
 626   }
 627 
 628   virtual bool is_JumpData() const { return true; }
 629 
 630   static int static_cell_count() {
 631     return jump_cell_count;
 632   }
 633 
 634   virtual int cell_count() const {
 635     return static_cell_count();
 636   }
 637 
 638   // Direct accessor
 639   uint taken() const {
 640     return uint_at(taken_off_set);
 641   }
 642 
 643   void set_taken(uint cnt) {
 644     set_uint_at(taken_off_set, cnt);
 645   }
 646 
 647   // Saturating counter
 648   uint inc_taken() {
 649     uint cnt = taken() + 1;
 650     // Did we wrap? Will compiler screw us??
 651     if (cnt == 0) cnt--;
 652     set_uint_at(taken_off_set, cnt);
 653     return cnt;
 654   }
 655 
 656   int displacement() const {
 657     return int_at(displacement_off_set);
 658   }
 659 
 660   // Code generation support
 661   static ByteSize taken_offset() {
 662     return cell_offset(taken_off_set);
 663   }
 664 
 665   static ByteSize displacement_offset() {
 666     return cell_offset(displacement_off_set);
 667   }
 668 
 669   // Specific initialization.
 670   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 671 
 672   void print_data_on(outputStream* st, const char* extra = NULL) const;
 673 };
 674 
 675 // Entries in a ProfileData object to record types: it can either be
 676 // none (no profile), unknown (conflicting profile data) or a klass if
 677 // a single one is seen. Whether a null reference was seen is also
 678 // recorded. No counter is associated with the type and a single type
 679 // is tracked (unlike VirtualCallData).
 680 class TypeEntries {
 681 
 682 public:
 683 
 684   // A single cell is used to record information for a type:
 685   // - the cell is initialized to 0
 686   // - when a type is discovered it is stored in the cell
 687   // - bit zero of the cell is used to record whether a null reference
 688   // was encountered or not
 689   // - bit 1 is set to record a conflict in the type information
 690 
 691   enum {
 692     null_seen = 1,
 693     type_mask = ~null_seen,
 694     type_unknown = 2,
 695     status_bits = null_seen | type_unknown,
 696     type_klass_mask = ~status_bits
 697   };
 698 
 699   // what to initialize a cell to
 700   static intptr_t type_none() {
 701     return 0;
 702   }
 703 
 704   // null seen = bit 0 set?
 705   static bool was_null_seen(intptr_t v) {
 706     return (v & null_seen) != 0;
 707   }
 708 
 709   // conflicting type information = bit 1 set?
 710   static bool is_type_unknown(intptr_t v) {
 711     return (v & type_unknown) != 0;
 712   }
 713 
 714   // not type information yet = all bits cleared, ignoring bit 0?
 715   static bool is_type_none(intptr_t v) {
 716     return (v & type_mask) == 0;
 717   }
 718 
 719   // recorded type: cell without bit 0 and 1
 720   static intptr_t klass_part(intptr_t v) {
 721     intptr_t r = v & type_klass_mask;
 722     return r;
 723   }
 724 
 725   // type recorded
 726   static Klass* valid_klass(intptr_t k) {
 727     if (!is_type_none(k) &&
 728         !is_type_unknown(k)) {
 729       Klass* res = (Klass*)klass_part(k);
 730       assert(res != NULL, "invalid");
 731       return res;
 732     } else {
 733       return NULL;
 734     }
 735   }
 736 
 737   static intptr_t with_status(intptr_t k, intptr_t in) {
 738     return k | (in & status_bits);
 739   }
 740 
 741   static intptr_t with_status(Klass* k, intptr_t in) {
 742     return with_status((intptr_t)k, in);
 743   }
 744 
 745   static void print_klass(outputStream* st, intptr_t k);
 746 
 747 protected:
 748   // ProfileData object these entries are part of
 749   ProfileData* _pd;
 750   // offset within the ProfileData object where the entries start
 751   const int _base_off;
 752 
 753   TypeEntries(int base_off)
 754     : _pd(NULL), _base_off(base_off) {}
 755 
 756   void set_intptr_at(int index, intptr_t value) {
 757     _pd->set_intptr_at(index, value);
 758   }
 759 
 760   intptr_t intptr_at(int index) const {
 761     return _pd->intptr_at(index);
 762   }
 763 
 764 public:
 765   void set_profile_data(ProfileData* pd) {
 766     _pd = pd;
 767   }
 768 };
 769 
 770 // Type entries used for arguments passed at a call and parameters on
 771 // method entry. 2 cells per entry: one for the type encoded as in
 772 // TypeEntries and one initialized with the stack slot where the
 773 // profiled object is to be found so that the interpreter can locate
 774 // it quickly.
 775 class TypeStackSlotEntries : public TypeEntries {
 776 
 777 private:
 778   enum {
 779     stack_slot_entry,
 780     type_entry,
 781     per_arg_cell_count
 782   };
 783 
 784   // offset of cell for stack slot for entry i within ProfileData object
 785   int stack_slot_offset(int i) const {
 786     return _base_off + stack_slot_local_offset(i);
 787   }
 788 
 789   const int _number_of_entries;
 790 
 791   // offset of cell for type for entry i within ProfileData object
 792   int type_offset_in_cells(int i) const {
 793     return _base_off + type_local_offset(i);
 794   }
 795 
 796 public:
 797 
 798   TypeStackSlotEntries(int base_off, int nb_entries)
 799     : TypeEntries(base_off), _number_of_entries(nb_entries) {}
 800 
 801   static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
 802 
 803   void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
 804 
 805   int number_of_entries() const { return _number_of_entries; }
 806 
 807   // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
 808   static int stack_slot_local_offset(int i) {
 809     return i * per_arg_cell_count + stack_slot_entry;
 810   }
 811 
 812   // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
 813   static int type_local_offset(int i) {
 814     return i * per_arg_cell_count + type_entry;
 815   }
 816 
 817   // stack slot for entry i
 818   uint stack_slot(int i) const {
 819     assert(i >= 0 && i < _number_of_entries, "oob");
 820     return _pd->uint_at(stack_slot_offset(i));
 821   }
 822 
 823   // set stack slot for entry i
 824   void set_stack_slot(int i, uint num) {
 825     assert(i >= 0 && i < _number_of_entries, "oob");
 826     _pd->set_uint_at(stack_slot_offset(i), num);
 827   }
 828 
 829   // type for entry i
 830   intptr_t type(int i) const {
 831     assert(i >= 0 && i < _number_of_entries, "oob");
 832     return _pd->intptr_at(type_offset_in_cells(i));
 833   }
 834 
 835   // set type for entry i
 836   void set_type(int i, intptr_t k) {
 837     assert(i >= 0 && i < _number_of_entries, "oob");
 838     _pd->set_intptr_at(type_offset_in_cells(i), k);
 839   }
 840 
 841   static ByteSize per_arg_size() {
 842     return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
 843   }
 844 
 845   static int per_arg_count() {
 846     return per_arg_cell_count;
 847   }
 848 
 849   ByteSize type_offset(int i) const {
 850     return DataLayout::cell_offset(type_offset_in_cells(i));
 851   }
 852 
 853   // GC support
 854   void clean_weak_klass_links(bool always_clean);
 855 
 856   void print_data_on(outputStream* st) const;
 857 };
 858 
 859 // Type entry used for return from a call. A single cell to record the
 860 // type.
 861 class SingleTypeEntry : public TypeEntries {
 862 
 863 private:
 864   enum {
 865     cell_count = 1
 866   };
 867 
 868 public:
 869   SingleTypeEntry(int base_off)
 870     : TypeEntries(base_off) {}
 871 
 872   void post_initialize() {
 873     set_type(type_none());
 874   }
 875 
 876   intptr_t type() const {
 877     return _pd->intptr_at(_base_off);
 878   }
 879 
 880   void set_type(intptr_t k) {
 881     _pd->set_intptr_at(_base_off, k);
 882   }
 883 
 884   static int static_cell_count() {
 885     return cell_count;
 886   }
 887 
 888   static ByteSize size() {
 889     return in_ByteSize(cell_count * DataLayout::cell_size);
 890   }
 891 
 892   ByteSize type_offset() {
 893     return DataLayout::cell_offset(_base_off);
 894   }
 895 
 896   // GC support
 897   void clean_weak_klass_links(bool always_clean);
 898 
 899   void print_data_on(outputStream* st) const;
 900 };
 901 
 902 // Entries to collect type information at a call: contains arguments
 903 // (TypeStackSlotEntries), a return type (SingleTypeEntry) and a
 904 // number of cells. Because the number of cells for the return type is
 905 // smaller than the number of cells for the type of an arguments, the
 906 // number of cells is used to tell how many arguments are profiled and
 907 // whether a return value is profiled. See has_arguments() and
 908 // has_return().
 909 class TypeEntriesAtCall {
 910 private:
 911   static int stack_slot_local_offset(int i) {
 912     return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
 913   }
 914 
 915   static int argument_type_local_offset(int i) {
 916     return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
 917   }
 918 
 919 public:
 920 
 921   static int header_cell_count() {
 922     return 1;
 923   }
 924 
 925   static int cell_count_local_offset() {
 926     return 0;
 927   }
 928 
 929   static int compute_cell_count(BytecodeStream* stream);
 930 
 931   static void initialize(DataLayout* dl, int base, int cell_count) {
 932     int off = base + cell_count_local_offset();
 933     dl->set_cell_at(off, cell_count - base - header_cell_count());
 934   }
 935 
 936   static bool arguments_profiling_enabled();
 937   static bool return_profiling_enabled();
 938 
 939   // Code generation support
 940   static ByteSize cell_count_offset() {
 941     return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
 942   }
 943 
 944   static ByteSize args_data_offset() {
 945     return in_ByteSize(header_cell_count() * DataLayout::cell_size);
 946   }
 947 
 948   static ByteSize stack_slot_offset(int i) {
 949     return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
 950   }
 951 
 952   static ByteSize argument_type_offset(int i) {
 953     return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
 954   }
 955 
 956   static ByteSize return_only_size() {
 957     return SingleTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
 958   }
 959 
 960 };
 961 
 962 // CallTypeData
 963 //
 964 // A CallTypeData is used to access profiling information about a non
 965 // virtual call for which we collect type information about arguments
 966 // and return value.
 967 class CallTypeData : public CounterData {
 968 private:
 969   // entries for arguments if any
 970   TypeStackSlotEntries _args;
 971   // entry for return type if any
 972   SingleTypeEntry _ret;
 973 
 974   int cell_count_global_offset() const {
 975     return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
 976   }
 977 
 978   // number of cells not counting the header
 979   int cell_count_no_header() const {
 980     return uint_at(cell_count_global_offset());
 981   }
 982 
 983   void check_number_of_arguments(int total) {
 984     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
 985   }
 986 
 987 public:
 988   CallTypeData(DataLayout* layout) :
 989     CounterData(layout),
 990     _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
 991     _ret(cell_count() - SingleTypeEntry::static_cell_count())
 992   {
 993     assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
 994     // Some compilers (VC++) don't want this passed in member initialization list
 995     _args.set_profile_data(this);
 996     _ret.set_profile_data(this);
 997   }
 998 
 999   const TypeStackSlotEntries* args() const {
1000     assert(has_arguments(), "no profiling of arguments");
1001     return &_args;
1002   }
1003 
1004   const SingleTypeEntry* ret() const {
1005     assert(has_return(), "no profiling of return value");
1006     return &_ret;
1007   }
1008 
1009   virtual bool is_CallTypeData() const { return true; }
1010 
1011   static int static_cell_count() {
1012     return -1;
1013   }
1014 
1015   static int compute_cell_count(BytecodeStream* stream) {
1016     return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1017   }
1018 
1019   static void initialize(DataLayout* dl, int cell_count) {
1020     TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1021   }
1022 
1023   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1024 
1025   virtual int cell_count() const {
1026     return CounterData::static_cell_count() +
1027       TypeEntriesAtCall::header_cell_count() +
1028       int_at_unchecked(cell_count_global_offset());
1029   }
1030 
1031   int number_of_arguments() const {
1032     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1033   }
1034 
1035   void set_argument_type(int i, Klass* k) {
1036     assert(has_arguments(), "no arguments!");
1037     intptr_t current = _args.type(i);
1038     _args.set_type(i, TypeEntries::with_status(k, current));
1039   }
1040 
1041   void set_return_type(Klass* k) {
1042     assert(has_return(), "no return!");
1043     intptr_t current = _ret.type();
1044     _ret.set_type(TypeEntries::with_status(k, current));
1045   }
1046 
1047   // An entry for a return value takes less space than an entry for an
1048   // argument so if the number of cells exceeds the number of cells
1049   // needed for an argument, this object contains type information for
1050   // at least one argument.
1051   bool has_arguments() const {
1052     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1053     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1054     return res;
1055   }
1056 
1057   // An entry for a return value takes less space than an entry for an
1058   // argument, so if the remainder of the number of cells divided by
1059   // the number of cells for an argument is not null, a return value
1060   // is profiled in this object.
1061   bool has_return() const {
1062     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1063     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1064     return res;
1065   }
1066 
1067   // Code generation support
1068   static ByteSize args_data_offset() {
1069     return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1070   }
1071 
1072   ByteSize argument_type_offset(int i) {
1073     return _args.type_offset(i);
1074   }
1075 
1076   ByteSize return_type_offset() {
1077     return _ret.type_offset();
1078   }
1079 
1080   // GC support
1081   virtual void clean_weak_klass_links(bool always_clean) {
1082     if (has_arguments()) {
1083       _args.clean_weak_klass_links(always_clean);
1084     }
1085     if (has_return()) {
1086       _ret.clean_weak_klass_links(always_clean);
1087     }
1088   }
1089 
1090   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1091 };
1092 
1093 // ReceiverTypeData
1094 //
1095 // A ReceiverTypeData is used to access profiling information about a
1096 // dynamic type check.  It consists of a counter which counts the total times
1097 // that the check is reached, and a series of (Klass*, count) pairs
1098 // which are used to store a type profile for the receiver of the check.
1099 class ReceiverTypeData : public CounterData {
1100   friend class VMStructs;
1101   friend class JVMCIVMStructs;
1102 protected:
1103   enum {
1104 #if INCLUDE_JVMCI
1105     // Description of the different counters
1106     // ReceiverTypeData for instanceof/checkcast/aastore:
1107     //   count is decremented for failed type checks
1108     //   JVMCI only: nonprofiled_count is incremented on type overflow
1109     // VirtualCallData for invokevirtual/invokeinterface:
1110     //   count is incremented on type overflow
1111     //   JVMCI only: nonprofiled_count is incremented on method overflow
1112 
1113     // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
1114     nonprofiled_count_off_set = counter_cell_count,
1115     receiver0_offset,
1116 #else
1117     receiver0_offset = counter_cell_count,
1118 #endif
1119     count0_offset,
1120     receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1121   };
1122 
1123 public:
1124   ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1125     assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1126            layout->tag() == DataLayout::virtual_call_data_tag ||
1127            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1128   }
1129 
1130   virtual bool is_ReceiverTypeData() const { return true; }
1131 
1132   static int static_cell_count() {
1133     return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
1134   }
1135 
1136   virtual int cell_count() const {
1137     return static_cell_count();
1138   }
1139 
1140   // Direct accessors
1141   static uint row_limit() {
1142     return TypeProfileWidth;
1143   }
1144   static int receiver_cell_index(uint row) {
1145     return receiver0_offset + row * receiver_type_row_cell_count;
1146   }
1147   static int receiver_count_cell_index(uint row) {
1148     return count0_offset + row * receiver_type_row_cell_count;
1149   }
1150 
1151   Klass* receiver(uint row) const {
1152     assert(row < row_limit(), "oob");
1153 
1154     Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1155     assert(recv == NULL || recv->is_klass(), "wrong type");
1156     return recv;
1157   }
1158 
1159   void set_receiver(uint row, Klass* k) {
1160     assert((uint)row < row_limit(), "oob");
1161     set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1162   }
1163 
1164   uint receiver_count(uint row) const {
1165     assert(row < row_limit(), "oob");
1166     return uint_at(receiver_count_cell_index(row));
1167   }
1168 
1169   void set_receiver_count(uint row, uint count) {
1170     assert(row < row_limit(), "oob");
1171     set_uint_at(receiver_count_cell_index(row), count);
1172   }
1173 
1174   void clear_row(uint row) {
1175     assert(row < row_limit(), "oob");
1176     // Clear total count - indicator of polymorphic call site.
1177     // The site may look like as monomorphic after that but
1178     // it allow to have more accurate profiling information because
1179     // there was execution phase change since klasses were unloaded.
1180     // If the site is still polymorphic then MDO will be updated
1181     // to reflect it. But it could be the case that the site becomes
1182     // only bimorphic. Then keeping total count not 0 will be wrong.
1183     // Even if we use monomorphic (when it is not) for compilation
1184     // we will only have trap, deoptimization and recompile again
1185     // with updated MDO after executing method in Interpreter.
1186     // An additional receiver will be recorded in the cleaned row
1187     // during next call execution.
1188     //
1189     // Note: our profiling logic works with empty rows in any slot.
1190     // We do sorting a profiling info (ciCallProfile) for compilation.
1191     //
1192     set_count(0);
1193     set_receiver(row, NULL);
1194     set_receiver_count(row, 0);
1195 #if INCLUDE_JVMCI
1196     if (!this->is_VirtualCallData()) {
1197       // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
1198       // must also be reset (see "Description of the different counters" above)
1199       set_nonprofiled_count(0);
1200     }
1201 #endif
1202   }
1203 
1204   // Code generation support
1205   static ByteSize receiver_offset(uint row) {
1206     return cell_offset(receiver_cell_index(row));
1207   }
1208   static ByteSize receiver_count_offset(uint row) {
1209     return cell_offset(receiver_count_cell_index(row));
1210   }
1211 #if INCLUDE_JVMCI
1212   static ByteSize nonprofiled_receiver_count_offset() {
1213     return cell_offset(nonprofiled_count_off_set);
1214   }
1215   uint nonprofiled_count() const {
1216     return uint_at(nonprofiled_count_off_set);
1217   }
1218   void set_nonprofiled_count(uint count) {
1219     set_uint_at(nonprofiled_count_off_set, count);
1220   }
1221 #endif // INCLUDE_JVMCI
1222   static ByteSize receiver_type_data_size() {
1223     return cell_offset(static_cell_count());
1224   }
1225 
1226   // GC support
1227   virtual void clean_weak_klass_links(bool always_clean);
1228 
1229   void print_receiver_data_on(outputStream* st) const;
1230   void print_data_on(outputStream* st, const char* extra = NULL) const;
1231 };
1232 
1233 // VirtualCallData
1234 //
1235 // A VirtualCallData is used to access profiling information about a
1236 // virtual call.  For now, it has nothing more than a ReceiverTypeData.
1237 class VirtualCallData : public ReceiverTypeData {
1238 public:
1239   VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1240     assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1241            layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1242   }
1243 
1244   virtual bool is_VirtualCallData() const { return true; }
1245 
1246   static int static_cell_count() {
1247     // At this point we could add more profile state, e.g., for arguments.
1248     // But for now it's the same size as the base record type.
1249     return ReceiverTypeData::static_cell_count();
1250   }
1251 
1252   virtual int cell_count() const {
1253     return static_cell_count();
1254   }
1255 
1256   // Direct accessors
1257   static ByteSize virtual_call_data_size() {
1258     return cell_offset(static_cell_count());
1259   }
1260 
1261   void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1262   void print_data_on(outputStream* st, const char* extra = NULL) const;
1263 };
1264 
1265 // VirtualCallTypeData
1266 //
1267 // A VirtualCallTypeData is used to access profiling information about
1268 // a virtual call for which we collect type information about
1269 // arguments and return value.
1270 class VirtualCallTypeData : public VirtualCallData {
1271 private:
1272   // entries for arguments if any
1273   TypeStackSlotEntries _args;
1274   // entry for return type if any
1275   SingleTypeEntry _ret;
1276 
1277   int cell_count_global_offset() const {
1278     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1279   }
1280 
1281   // number of cells not counting the header
1282   int cell_count_no_header() const {
1283     return uint_at(cell_count_global_offset());
1284   }
1285 
1286   void check_number_of_arguments(int total) {
1287     assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1288   }
1289 
1290 public:
1291   VirtualCallTypeData(DataLayout* layout) :
1292     VirtualCallData(layout),
1293     _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1294     _ret(cell_count() - SingleTypeEntry::static_cell_count())
1295   {
1296     assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1297     // Some compilers (VC++) don't want this passed in member initialization list
1298     _args.set_profile_data(this);
1299     _ret.set_profile_data(this);
1300   }
1301 
1302   const TypeStackSlotEntries* args() const {
1303     assert(has_arguments(), "no profiling of arguments");
1304     return &_args;
1305   }
1306 
1307   const SingleTypeEntry* ret() const {
1308     assert(has_return(), "no profiling of return value");
1309     return &_ret;
1310   }
1311 
1312   virtual bool is_VirtualCallTypeData() const { return true; }
1313 
1314   static int static_cell_count() {
1315     return -1;
1316   }
1317 
1318   static int compute_cell_count(BytecodeStream* stream) {
1319     return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1320   }
1321 
1322   static void initialize(DataLayout* dl, int cell_count) {
1323     TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1324   }
1325 
1326   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1327 
1328   virtual int cell_count() const {
1329     return VirtualCallData::static_cell_count() +
1330       TypeEntriesAtCall::header_cell_count() +
1331       int_at_unchecked(cell_count_global_offset());
1332   }
1333 
1334   int number_of_arguments() const {
1335     return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1336   }
1337 
1338   void set_argument_type(int i, Klass* k) {
1339     assert(has_arguments(), "no arguments!");
1340     intptr_t current = _args.type(i);
1341     _args.set_type(i, TypeEntries::with_status(k, current));
1342   }
1343 
1344   void set_return_type(Klass* k) {
1345     assert(has_return(), "no return!");
1346     intptr_t current = _ret.type();
1347     _ret.set_type(TypeEntries::with_status(k, current));
1348   }
1349 
1350   // An entry for a return value takes less space than an entry for an
1351   // argument, so if the remainder of the number of cells divided by
1352   // the number of cells for an argument is not null, a return value
1353   // is profiled in this object.
1354   bool has_return() const {
1355     bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1356     assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1357     return res;
1358   }
1359 
1360   // An entry for a return value takes less space than an entry for an
1361   // argument so if the number of cells exceeds the number of cells
1362   // needed for an argument, this object contains type information for
1363   // at least one argument.
1364   bool has_arguments() const {
1365     bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1366     assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1367     return res;
1368   }
1369 
1370   // Code generation support
1371   static ByteSize args_data_offset() {
1372     return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1373   }
1374 
1375   ByteSize argument_type_offset(int i) {
1376     return _args.type_offset(i);
1377   }
1378 
1379   ByteSize return_type_offset() {
1380     return _ret.type_offset();
1381   }
1382 
1383   // GC support
1384   virtual void clean_weak_klass_links(bool always_clean) {
1385     ReceiverTypeData::clean_weak_klass_links(always_clean);
1386     if (has_arguments()) {
1387       _args.clean_weak_klass_links(always_clean);
1388     }
1389     if (has_return()) {
1390       _ret.clean_weak_klass_links(always_clean);
1391     }
1392   }
1393 
1394   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1395 };
1396 
1397 // RetData
1398 //
1399 // A RetData is used to access profiling information for a ret bytecode.
1400 // It is composed of a count of the number of times that the ret has
1401 // been executed, followed by a series of triples of the form
1402 // (bci, count, di) which count the number of times that some bci was the
1403 // target of the ret and cache a corresponding data displacement.
1404 class RetData : public CounterData {
1405 protected:
1406   enum {
1407     bci0_offset = counter_cell_count,
1408     count0_offset,
1409     displacement0_offset,
1410     ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1411   };
1412 
1413   void set_bci(uint row, int bci) {
1414     assert((uint)row < row_limit(), "oob");
1415     set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1416   }
1417   void release_set_bci(uint row, int bci);
1418   void set_bci_count(uint row, uint count) {
1419     assert((uint)row < row_limit(), "oob");
1420     set_uint_at(count0_offset + row * ret_row_cell_count, count);
1421   }
1422   void set_bci_displacement(uint row, int disp) {
1423     set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1424   }
1425 
1426 public:
1427   RetData(DataLayout* layout) : CounterData(layout) {
1428     assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1429   }
1430 
1431   virtual bool is_RetData() const { return true; }
1432 
1433   enum {
1434     no_bci = -1 // value of bci when bci1/2 are not in use.
1435   };
1436 
1437   static int static_cell_count() {
1438     return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1439   }
1440 
1441   virtual int cell_count() const {
1442     return static_cell_count();
1443   }
1444 
1445   static uint row_limit() {
1446     return BciProfileWidth;
1447   }
1448   static int bci_cell_index(uint row) {
1449     return bci0_offset + row * ret_row_cell_count;
1450   }
1451   static int bci_count_cell_index(uint row) {
1452     return count0_offset + row * ret_row_cell_count;
1453   }
1454   static int bci_displacement_cell_index(uint row) {
1455     return displacement0_offset + row * ret_row_cell_count;
1456   }
1457 
1458   // Direct accessors
1459   int bci(uint row) const {
1460     return int_at(bci_cell_index(row));
1461   }
1462   uint bci_count(uint row) const {
1463     return uint_at(bci_count_cell_index(row));
1464   }
1465   int bci_displacement(uint row) const {
1466     return int_at(bci_displacement_cell_index(row));
1467   }
1468 
1469   // Interpreter Runtime support
1470   address fixup_ret(int return_bci, MethodData* mdo);
1471 
1472   // Code generation support
1473   static ByteSize bci_offset(uint row) {
1474     return cell_offset(bci_cell_index(row));
1475   }
1476   static ByteSize bci_count_offset(uint row) {
1477     return cell_offset(bci_count_cell_index(row));
1478   }
1479   static ByteSize bci_displacement_offset(uint row) {
1480     return cell_offset(bci_displacement_cell_index(row));
1481   }
1482 
1483   // Specific initialization.
1484   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1485 
1486   void print_data_on(outputStream* st, const char* extra = NULL) const;
1487 };
1488 
1489 // BranchData
1490 //
1491 // A BranchData is used to access profiling data for a two-way branch.
1492 // It consists of taken and not_taken counts as well as a data displacement
1493 // for the taken case.
1494 class BranchData : public JumpData {
1495   friend class VMStructs;
1496   friend class JVMCIVMStructs;
1497 protected:
1498   enum {
1499     not_taken_off_set = jump_cell_count,
1500     branch_cell_count
1501   };
1502 
1503   void set_displacement(int displacement) {
1504     set_int_at(displacement_off_set, displacement);
1505   }
1506 
1507 public:
1508   BranchData(DataLayout* layout) : JumpData(layout) {
1509     assert(layout->tag() == DataLayout::branch_data_tag || layout->tag() == DataLayout::acmp_data_tag, "wrong type");
1510   }
1511 
1512   virtual bool is_BranchData() const { return true; }
1513 
1514   static int static_cell_count() {
1515     return branch_cell_count;
1516   }
1517 
1518   virtual int cell_count() const {
1519     return static_cell_count();
1520   }
1521 
1522   // Direct accessor
1523   uint not_taken() const {
1524     return uint_at(not_taken_off_set);
1525   }
1526 
1527   void set_not_taken(uint cnt) {
1528     set_uint_at(not_taken_off_set, cnt);
1529   }
1530 
1531   uint inc_not_taken() {
1532     uint cnt = not_taken() + 1;
1533     // Did we wrap? Will compiler screw us??
1534     if (cnt == 0) cnt--;
1535     set_uint_at(not_taken_off_set, cnt);
1536     return cnt;
1537   }
1538 
1539   // Code generation support
1540   static ByteSize not_taken_offset() {
1541     return cell_offset(not_taken_off_set);
1542   }
1543   static ByteSize branch_data_size() {
1544     return cell_offset(branch_cell_count);
1545   }
1546 
1547   // Specific initialization.
1548   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1549 
1550   void print_data_on(outputStream* st, const char* extra = NULL) const;
1551 };
1552 
1553 // ArrayData
1554 //
1555 // A ArrayData is a base class for accessing profiling data which does
1556 // not have a statically known size.  It consists of an array length
1557 // and an array start.
1558 class ArrayData : public ProfileData {
1559   friend class VMStructs;
1560   friend class JVMCIVMStructs;
1561 protected:
1562   friend class DataLayout;
1563 
1564   enum {
1565     array_len_off_set,
1566     array_start_off_set
1567   };
1568 
1569   uint array_uint_at(int index) const {
1570     int aindex = index + array_start_off_set;
1571     return uint_at(aindex);
1572   }
1573   int array_int_at(int index) const {
1574     int aindex = index + array_start_off_set;
1575     return int_at(aindex);
1576   }
1577   oop array_oop_at(int index) const {
1578     int aindex = index + array_start_off_set;
1579     return oop_at(aindex);
1580   }
1581   void array_set_int_at(int index, int value) {
1582     int aindex = index + array_start_off_set;
1583     set_int_at(aindex, value);
1584   }
1585 
1586   // Code generation support for subclasses.
1587   static ByteSize array_element_offset(int index) {
1588     return cell_offset(array_start_off_set + index);
1589   }
1590 
1591 public:
1592   ArrayData(DataLayout* layout) : ProfileData(layout) {}
1593 
1594   virtual bool is_ArrayData() const { return true; }
1595 
1596   static int static_cell_count() {
1597     return -1;
1598   }
1599 
1600   int array_len() const {
1601     return int_at_unchecked(array_len_off_set);
1602   }
1603 
1604   virtual int cell_count() const {
1605     return array_len() + 1;
1606   }
1607 
1608   // Code generation support
1609   static ByteSize array_len_offset() {
1610     return cell_offset(array_len_off_set);
1611   }
1612   static ByteSize array_start_offset() {
1613     return cell_offset(array_start_off_set);
1614   }
1615 };
1616 
1617 // MultiBranchData
1618 //
1619 // A MultiBranchData is used to access profiling information for
1620 // a multi-way branch (*switch bytecodes).  It consists of a series
1621 // of (count, displacement) pairs, which count the number of times each
1622 // case was taken and specify the data displacment for each branch target.
1623 class MultiBranchData : public ArrayData {
1624   friend class VMStructs;
1625   friend class JVMCIVMStructs;
1626 protected:
1627   enum {
1628     default_count_off_set,
1629     default_disaplacement_off_set,
1630     case_array_start
1631   };
1632   enum {
1633     relative_count_off_set,
1634     relative_displacement_off_set,
1635     per_case_cell_count
1636   };
1637 
1638   void set_default_displacement(int displacement) {
1639     array_set_int_at(default_disaplacement_off_set, displacement);
1640   }
1641   void set_displacement_at(int index, int displacement) {
1642     array_set_int_at(case_array_start +
1643                      index * per_case_cell_count +
1644                      relative_displacement_off_set,
1645                      displacement);
1646   }
1647 
1648 public:
1649   MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1650     assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1651   }
1652 
1653   virtual bool is_MultiBranchData() const { return true; }
1654 
1655   static int compute_cell_count(BytecodeStream* stream);
1656 
1657   int number_of_cases() const {
1658     int alen = array_len() - 2; // get rid of default case here.
1659     assert(alen % per_case_cell_count == 0, "must be even");
1660     return (alen / per_case_cell_count);
1661   }
1662 
1663   uint default_count() const {
1664     return array_uint_at(default_count_off_set);
1665   }
1666   int default_displacement() const {
1667     return array_int_at(default_disaplacement_off_set);
1668   }
1669 
1670   uint count_at(int index) const {
1671     return array_uint_at(case_array_start +
1672                          index * per_case_cell_count +
1673                          relative_count_off_set);
1674   }
1675   int displacement_at(int index) const {
1676     return array_int_at(case_array_start +
1677                         index * per_case_cell_count +
1678                         relative_displacement_off_set);
1679   }
1680 
1681   // Code generation support
1682   static ByteSize default_count_offset() {
1683     return array_element_offset(default_count_off_set);
1684   }
1685   static ByteSize default_displacement_offset() {
1686     return array_element_offset(default_disaplacement_off_set);
1687   }
1688   static ByteSize case_count_offset(int index) {
1689     return case_array_offset() +
1690            (per_case_size() * index) +
1691            relative_count_offset();
1692   }
1693   static ByteSize case_array_offset() {
1694     return array_element_offset(case_array_start);
1695   }
1696   static ByteSize per_case_size() {
1697     return in_ByteSize(per_case_cell_count) * cell_size;
1698   }
1699   static ByteSize relative_count_offset() {
1700     return in_ByteSize(relative_count_off_set) * cell_size;
1701   }
1702   static ByteSize relative_displacement_offset() {
1703     return in_ByteSize(relative_displacement_off_set) * cell_size;
1704   }
1705 
1706   // Specific initialization.
1707   void post_initialize(BytecodeStream* stream, MethodData* mdo);
1708 
1709   void print_data_on(outputStream* st, const char* extra = NULL) const;
1710 };
1711 
1712 class ArgInfoData : public ArrayData {
1713 
1714 public:
1715   ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1716     assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1717   }
1718 
1719   virtual bool is_ArgInfoData() const { return true; }
1720 
1721 
1722   int number_of_args() const {
1723     return array_len();
1724   }
1725 
1726   uint arg_modified(int arg) const {
1727     return array_uint_at(arg);
1728   }
1729 
1730   void set_arg_modified(int arg, uint val) {
1731     array_set_int_at(arg, val);
1732   }
1733 
1734   void print_data_on(outputStream* st, const char* extra = NULL) const;
1735 };
1736 
1737 // ParametersTypeData
1738 //
1739 // A ParametersTypeData is used to access profiling information about
1740 // types of parameters to a method
1741 class ParametersTypeData : public ArrayData {
1742 
1743 private:
1744   TypeStackSlotEntries _parameters;
1745 
1746   static int stack_slot_local_offset(int i) {
1747     assert_profiling_enabled();
1748     return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1749   }
1750 
1751   static int type_local_offset(int i) {
1752     assert_profiling_enabled();
1753     return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1754   }
1755 
1756   static bool profiling_enabled();
1757   static void assert_profiling_enabled() {
1758     assert(profiling_enabled(), "method parameters profiling should be on");
1759   }
1760 
1761 public:
1762   ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
1763     assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
1764     // Some compilers (VC++) don't want this passed in member initialization list
1765     _parameters.set_profile_data(this);
1766   }
1767 
1768   static int compute_cell_count(Method* m);
1769 
1770   virtual bool is_ParametersTypeData() const { return true; }
1771 
1772   virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1773 
1774   int number_of_parameters() const {
1775     return array_len() / TypeStackSlotEntries::per_arg_count();
1776   }
1777 
1778   const TypeStackSlotEntries* parameters() const { return &_parameters; }
1779 
1780   uint stack_slot(int i) const {
1781     return _parameters.stack_slot(i);
1782   }
1783 
1784   void set_type(int i, Klass* k) {
1785     intptr_t current = _parameters.type(i);
1786     _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1787   }
1788 
1789   virtual void clean_weak_klass_links(bool always_clean) {
1790     _parameters.clean_weak_klass_links(always_clean);
1791   }
1792 
1793   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1794 
1795   static ByteSize stack_slot_offset(int i) {
1796     return cell_offset(stack_slot_local_offset(i));
1797   }
1798 
1799   static ByteSize type_offset(int i) {
1800     return cell_offset(type_local_offset(i));
1801   }
1802 };
1803 
1804 // SpeculativeTrapData
1805 //
1806 // A SpeculativeTrapData is used to record traps due to type
1807 // speculation. It records the root of the compilation: that type
1808 // speculation is wrong in the context of one compilation (for
1809 // method1) doesn't mean it's wrong in the context of another one (for
1810 // method2). Type speculation could have more/different data in the
1811 // context of the compilation of method2 and it's worthwhile to try an
1812 // optimization that failed for compilation of method1 in the context
1813 // of compilation of method2.
1814 // Space for SpeculativeTrapData entries is allocated from the extra
1815 // data space in the MDO. If we run out of space, the trap data for
1816 // the ProfileData at that bci is updated.
1817 class SpeculativeTrapData : public ProfileData {
1818 protected:
1819   enum {
1820     speculative_trap_method,
1821 #ifndef _LP64
1822     // The size of the area for traps is a multiple of the header
1823     // size, 2 cells on 32 bits. Packed at the end of this area are
1824     // argument info entries (with tag
1825     // DataLayout::arg_info_data_tag). The logic in
1826     // MethodData::bci_to_extra_data() that guarantees traps don't
1827     // overflow over argument info entries assumes the size of a
1828     // SpeculativeTrapData is twice the header size. On 32 bits, a
1829     // SpeculativeTrapData must be 4 cells.
1830     padding,
1831 #endif
1832     speculative_trap_cell_count
1833   };
1834 public:
1835   SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
1836     assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
1837   }
1838 
1839   virtual bool is_SpeculativeTrapData() const { return true; }
1840 
1841   static int static_cell_count() {
1842     return speculative_trap_cell_count;
1843   }
1844 
1845   virtual int cell_count() const {
1846     return static_cell_count();
1847   }
1848 
1849   // Direct accessor
1850   Method* method() const {
1851     return (Method*)intptr_at(speculative_trap_method);
1852   }
1853 
1854   void set_method(Method* m) {
1855     assert(!m->is_old(), "cannot add old methods");
1856     set_intptr_at(speculative_trap_method, (intptr_t)m);
1857   }
1858 
1859   static ByteSize method_offset() {
1860     return cell_offset(speculative_trap_method);
1861   }
1862 
1863   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1864 };
1865 
1866 class ArrayLoadStoreData : public ProfileData {
1867 private:
1868   enum {
1869     flat_array_flag = DataLayout::first_flag,
1870     null_free_array_flag = flat_array_flag + 1,
1871   };
1872 
1873   SingleTypeEntry _array;
1874   SingleTypeEntry _element;
1875 
1876 public:
1877   ArrayLoadStoreData(DataLayout* layout) :
1878     ProfileData(layout),
1879     _array(0),
1880     _element(SingleTypeEntry::static_cell_count()) {
1881     assert(layout->tag() == DataLayout::array_load_store_data_tag, "wrong type");
1882     _array.set_profile_data(this);
1883     _element.set_profile_data(this);
1884   }
1885 
1886   const SingleTypeEntry* array() const {
1887     return &_array;
1888   }
1889 
1890   const SingleTypeEntry* element() const {
1891     return &_element;
1892   }
1893 
1894   virtual bool is_ArrayLoadStoreData() const { return true; }
1895 
1896   static int static_cell_count() {
1897     return SingleTypeEntry::static_cell_count() * 2;
1898   }
1899 
1900   virtual int cell_count() const {
1901     return static_cell_count();
1902   }
1903 
1904   void set_flat_array() { set_flag_at(flat_array_flag); }
1905   bool flat_array() const { return flag_at(flat_array_flag); }
1906 
1907   void set_null_free_array() { set_flag_at(null_free_array_flag); }
1908   bool null_free_array() const { return flag_at(null_free_array_flag); }
1909 
1910   // Code generation support
1911   static int flat_array_byte_constant() {
1912     return flag_number_to_constant(flat_array_flag);
1913   }
1914 
1915   static int null_free_array_byte_constant() {
1916     return flag_number_to_constant(null_free_array_flag);
1917   }
1918 
1919   static ByteSize array_offset() {
1920     return cell_offset(0);
1921   }
1922 
1923   static ByteSize element_offset() {
1924     return cell_offset(SingleTypeEntry::static_cell_count());
1925   }
1926 
1927   virtual void clean_weak_klass_links(bool always_clean) {
1928     _array.clean_weak_klass_links(always_clean);
1929     _element.clean_weak_klass_links(always_clean);
1930   }
1931 
1932   static ByteSize array_load_store_data_size() {
1933     return cell_offset(static_cell_count());
1934   }
1935 
1936   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1937 };
1938 
1939 class ACmpData : public BranchData {
1940 private:
1941   enum {
1942     left_inline_type_flag = DataLayout::first_flag,
1943     right_inline_type_flag
1944   };
1945 
1946   SingleTypeEntry _left;
1947   SingleTypeEntry _right;
1948 
1949 public:
1950   ACmpData(DataLayout* layout) :
1951     BranchData(layout),
1952     _left(BranchData::static_cell_count()),
1953     _right(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count()) {
1954     assert(layout->tag() == DataLayout::acmp_data_tag, "wrong type");
1955     _left.set_profile_data(this);
1956     _right.set_profile_data(this);
1957   }
1958 
1959   const SingleTypeEntry* left() const {
1960     return &_left;
1961   }
1962 
1963   const SingleTypeEntry* right() const {
1964     return &_right;
1965   }
1966 
1967   virtual bool is_ACmpData() const { return true; }
1968 
1969   static int static_cell_count() {
1970     return BranchData::static_cell_count() + SingleTypeEntry::static_cell_count() * 2;
1971   }
1972 
1973   virtual int cell_count() const {
1974     return static_cell_count();
1975   }
1976 
1977   void set_left_inline_type() { set_flag_at(left_inline_type_flag); }
1978   bool left_inline_type() const { return flag_at(left_inline_type_flag); }
1979 
1980   void set_right_inline_type() { set_flag_at(right_inline_type_flag); }
1981   bool right_inline_type() const { return flag_at(right_inline_type_flag); }
1982 
1983   // Code generation support
1984   static int left_inline_type_byte_constant() {
1985     return flag_number_to_constant(left_inline_type_flag);
1986   }
1987 
1988   static int right_inline_type_byte_constant() {
1989     return flag_number_to_constant(right_inline_type_flag);
1990   }
1991 
1992   static ByteSize left_offset() {
1993     return cell_offset(BranchData::static_cell_count());
1994   }
1995 
1996   static ByteSize right_offset() {
1997     return cell_offset(BranchData::static_cell_count() + SingleTypeEntry::static_cell_count());
1998   }
1999 
2000   virtual void clean_weak_klass_links(bool always_clean) {
2001     _left.clean_weak_klass_links(always_clean);
2002     _right.clean_weak_klass_links(always_clean);
2003   }
2004 
2005   static ByteSize acmp_data_size() {
2006     return cell_offset(static_cell_count());
2007   }
2008 
2009   virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
2010 };
2011 
2012 // MethodData*
2013 //
2014 // A MethodData* holds information which has been collected about
2015 // a method.  Its layout looks like this:
2016 //
2017 // -----------------------------
2018 // | header                    |
2019 // | klass                     |
2020 // -----------------------------
2021 // | method                    |
2022 // | size of the MethodData* |
2023 // -----------------------------
2024 // | Data entries...           |
2025 // |   (variable size)         |
2026 // |                           |
2027 // .                           .
2028 // .                           .
2029 // .                           .
2030 // |                           |
2031 // -----------------------------
2032 //
2033 // The data entry area is a heterogeneous array of DataLayouts. Each
2034 // DataLayout in the array corresponds to a specific bytecode in the
2035 // method.  The entries in the array are sorted by the corresponding
2036 // bytecode.  Access to the data is via resource-allocated ProfileData,
2037 // which point to the underlying blocks of DataLayout structures.
2038 //
2039 // During interpretation, if profiling in enabled, the interpreter
2040 // maintains a method data pointer (mdp), which points at the entry
2041 // in the array corresponding to the current bci.  In the course of
2042 // intepretation, when a bytecode is encountered that has profile data
2043 // associated with it, the entry pointed to by mdp is updated, then the
2044 // mdp is adjusted to point to the next appropriate DataLayout.  If mdp
2045 // is NULL to begin with, the interpreter assumes that the current method
2046 // is not (yet) being profiled.
2047 //
2048 // In MethodData* parlance, "dp" is a "data pointer", the actual address
2049 // of a DataLayout element.  A "di" is a "data index", the offset in bytes
2050 // from the base of the data entry array.  A "displacement" is the byte offset
2051 // in certain ProfileData objects that indicate the amount the mdp must be
2052 // adjusted in the event of a change in control flow.
2053 //
2054 
2055 class CleanExtraDataClosure : public StackObj {
2056 public:
2057   virtual bool is_live(Method* m) = 0;
2058 };
2059 
2060 
2061 #if INCLUDE_JVMCI
2062 // Encapsulates an encoded speculation reason. These are linked together in
2063 // a list that is atomically appended to during deoptimization. Entries are
2064 // never removed from the list.
2065 // @see jdk.vm.ci.hotspot.HotSpotSpeculationLog.HotSpotSpeculationEncoding
2066 class FailedSpeculation: public CHeapObj<mtCompiler> {
2067  private:
2068   // The length of HotSpotSpeculationEncoding.toByteArray(). The data itself
2069   // is an array embedded at the end of this object.
2070   int   _data_len;
2071 
2072   // Next entry in a linked list.
2073   FailedSpeculation* _next;
2074 
2075   FailedSpeculation(address data, int data_len);
2076 
2077   FailedSpeculation** next_adr() { return &_next; }
2078 
2079   // Placement new operator for inlining the speculation data into
2080   // the FailedSpeculation object.
2081   void* operator new(size_t size, size_t fs_size) throw();
2082 
2083  public:
2084   char* data()         { return (char*)(((address) this) + sizeof(FailedSpeculation)); }
2085   int data_len() const { return _data_len; }
2086   FailedSpeculation* next() const { return _next; }
2087 
2088   // Atomically appends a speculation from nm to the list whose head is at (*failed_speculations_address).
2089   // Returns false if the FailedSpeculation object could not be allocated.
2090   static bool add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len);
2091 
2092   // Frees all entries in the linked list whose head is at (*failed_speculations_address).
2093   static void free_failed_speculations(FailedSpeculation** failed_speculations_address);
2094 };
2095 #endif
2096 
2097 class ciMethodData;
2098 
2099 class MethodData : public Metadata {
2100   friend class VMStructs;
2101   friend class JVMCIVMStructs;
2102 private:
2103   friend class ProfileData;
2104   friend class TypeEntriesAtCall;
2105   friend class ciMethodData;
2106 
2107   // If you add a new field that points to any metaspace object, you
2108   // must add this field to MethodData::metaspace_pointers_do().
2109 
2110   // Back pointer to the Method*
2111   Method* _method;
2112 
2113   // Size of this oop in bytes
2114   int _size;
2115 
2116   // Cached hint for bci_to_dp and bci_to_data
2117   int _hint_di;
2118 
2119   Mutex _extra_data_lock;
2120 
2121   MethodData(const methodHandle& method);
2122 public:
2123   static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2124 
2125   virtual bool is_methodData() const { return true; }
2126   void initialize();
2127 
2128   // Whole-method sticky bits and flags
2129   enum {
2130     _trap_hist_limit    = Deoptimization::Reason_TRAP_HISTORY_LENGTH,
2131     _trap_hist_mask     = max_jubyte,
2132     _extra_data_count   = 4     // extra DataLayout headers, for trap history
2133   }; // Public flag values
2134 
2135   // Compiler-related counters.
2136   class CompilerCounters {
2137     friend class VMStructs;
2138     friend class JVMCIVMStructs;
2139 
2140     uint _nof_decompiles;             // count of all nmethod removals
2141     uint _nof_overflow_recompiles;    // recompile count, excluding recomp. bits
2142     uint _nof_overflow_traps;         // trap count, excluding _trap_hist
2143     union {
2144       intptr_t _align;
2145       // JVMCI separates trap history for OSR compilations from normal compilations
2146       u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit];
2147     } _trap_hist;
2148 
2149   public:
2150     CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0) {
2151 #ifndef ZERO
2152       // Some Zero platforms do not have expected alignment, and do not use
2153       // this code. static_assert would still fire and fail for them.
2154       static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align");
2155 #endif
2156       uint size_in_words = sizeof(_trap_hist) / HeapWordSize;
2157       Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words);
2158     }
2159 
2160     // Return (uint)-1 for overflow.
2161     uint trap_count(int reason) const {
2162       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2163       return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2164     }
2165 
2166     uint inc_trap_count(int reason) {
2167       // Count another trap, anywhere in this method.
2168       assert(reason >= 0, "must be single trap");
2169       assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2170       uint cnt1 = 1 + _trap_hist._array[reason];
2171       if ((cnt1 & _trap_hist_mask) != 0) {  // if no counter overflow...
2172         _trap_hist._array[reason] = cnt1;
2173         return cnt1;
2174       } else {
2175         return _trap_hist_mask + (++_nof_overflow_traps);
2176       }
2177     }
2178 
2179     uint overflow_trap_count() const {
2180       return _nof_overflow_traps;
2181     }
2182     uint overflow_recompile_count() const {
2183       return _nof_overflow_recompiles;
2184     }
2185     uint inc_overflow_recompile_count() {
2186       return ++_nof_overflow_recompiles;
2187     }
2188     uint decompile_count() const {
2189       return _nof_decompiles;
2190     }
2191     uint inc_decompile_count() {
2192       return ++_nof_decompiles;
2193     }
2194 
2195     // Support for code generation
2196     static ByteSize trap_history_offset() {
2197       return byte_offset_of(CompilerCounters, _trap_hist._array);
2198     }
2199   };
2200 
2201 private:
2202   CompilerCounters _compiler_counters;
2203 
2204   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2205   intx              _eflags;          // flags on escape information
2206   intx              _arg_local;       // bit set of non-escaping arguments
2207   intx              _arg_stack;       // bit set of stack-allocatable arguments
2208   intx              _arg_returned;    // bit set of returned arguments
2209 
2210   int               _creation_mileage; // method mileage at MDO creation
2211 
2212   // How many invocations has this MDO seen?
2213   // These counters are used to determine the exact age of MDO.
2214   // We need those because in tiered a method can be concurrently
2215   // executed at different levels.
2216   InvocationCounter _invocation_counter;
2217   // Same for backedges.
2218   InvocationCounter _backedge_counter;
2219   // Counter values at the time profiling started.
2220   int               _invocation_counter_start;
2221   int               _backedge_counter_start;
2222   uint              _tenure_traps;
2223   int               _invoke_mask;      // per-method Tier0InvokeNotifyFreqLog
2224   int               _backedge_mask;    // per-method Tier0BackedgeNotifyFreqLog
2225 
2226 #if INCLUDE_RTM_OPT
2227   // State of RTM code generation during compilation of the method
2228   int               _rtm_state;
2229 #endif
2230 
2231   // Number of loops and blocks is computed when compiling the first
2232   // time with C1. It is used to determine if method is trivial.
2233   short             _num_loops;
2234   short             _num_blocks;
2235   // Does this method contain anything worth profiling?
2236   enum WouldProfile {unknown, no_profile, profile};
2237   WouldProfile      _would_profile;
2238 
2239 #if INCLUDE_JVMCI
2240   // Support for HotSpotMethodData.setCompiledIRSize(int)
2241   int                _jvmci_ir_size;
2242   FailedSpeculation* _failed_speculations;
2243 #endif
2244 
2245   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
2246   int _data_size;
2247 
2248   // data index for the area dedicated to parameters. -1 if no
2249   // parameter profiling.
2250   enum { no_parameters = -2, parameters_uninitialized = -1 };
2251   int _parameters_type_data_di;
2252 
2253   // Beginning of the data entries
2254   intptr_t _data[1];
2255 
2256   // Helper for size computation
2257   static int compute_data_size(BytecodeStream* stream);
2258   static int bytecode_cell_count(Bytecodes::Code code);
2259   static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2260   enum { no_profile_data = -1, variable_cell_count = -2 };
2261 
2262   // Helper for initialization
2263   DataLayout* data_layout_at(int data_index) const {
2264     assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2265     return (DataLayout*) (((address)_data) + data_index);
2266   }
2267 
2268   // Initialize an individual data segment.  Returns the size of
2269   // the segment in bytes.
2270   int initialize_data(BytecodeStream* stream, int data_index);
2271 
2272   // Helper for data_at
2273   DataLayout* limit_data_position() const {
2274     return data_layout_at(_data_size);
2275   }
2276   bool out_of_bounds(int data_index) const {
2277     return data_index >= data_size();
2278   }
2279 
2280   // Give each of the data entries a chance to perform specific
2281   // data initialization.
2282   void post_initialize(BytecodeStream* stream);
2283 
2284   // hint accessors
2285   int      hint_di() const  { return _hint_di; }
2286   void set_hint_di(int di)  {
2287     assert(!out_of_bounds(di), "hint_di out of bounds");
2288     _hint_di = di;
2289   }
2290 
2291   DataLayout* data_layout_before(int bci) {
2292     // avoid SEGV on this edge case
2293     if (data_size() == 0)
2294       return NULL;
2295     DataLayout* layout = data_layout_at(hint_di());
2296     if (layout->bci() <= bci)
2297       return layout;
2298     return data_layout_at(first_di());
2299   }
2300 
2301   // What is the index of the first data entry?
2302   int first_di() const { return 0; }
2303 
2304   ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
2305   // Find or create an extra ProfileData:
2306   ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2307 
2308   // return the argument info cell
2309   ArgInfoData *arg_info();
2310 
2311   enum {
2312     no_type_profile = 0,
2313     type_profile_jsr292 = 1,
2314     type_profile_all = 2
2315   };
2316 
2317   static bool profile_jsr292(const methodHandle& m, int bci);
2318   static bool profile_unsafe(const methodHandle& m, int bci);
2319   static bool profile_memory_access(const methodHandle& m, int bci);
2320   static int profile_arguments_flag();
2321   static bool profile_all_arguments();
2322   static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2323   static int profile_return_flag();
2324   static bool profile_all_return();
2325   static bool profile_return_for_invoke(const methodHandle& m, int bci);
2326   static int profile_parameters_flag();
2327   static bool profile_parameters_jsr292_only();
2328   static bool profile_all_parameters();
2329 
2330   void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2331   void verify_extra_data_clean(CleanExtraDataClosure* cl);
2332 
2333 public:
2334   void clean_extra_data(CleanExtraDataClosure* cl);
2335 
2336   static int header_size() {
2337     return sizeof(MethodData)/wordSize;
2338   }
2339 
2340   // Compute the size of a MethodData* before it is created.
2341   static int compute_allocation_size_in_bytes(const methodHandle& method);
2342   static int compute_allocation_size_in_words(const methodHandle& method);
2343   static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2344 
2345   // Determine if a given bytecode can have profile information.
2346   static bool bytecode_has_profile(Bytecodes::Code code) {
2347     return bytecode_cell_count(code) != no_profile_data;
2348   }
2349 
2350   // reset into original state
2351   void init();
2352 
2353   // My size
2354   int size_in_bytes() const { return _size; }
2355   int size() const    { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
2356 
2357   int      creation_mileage() const { return _creation_mileage; }
2358   void set_creation_mileage(int x)  { _creation_mileage = x; }
2359 
2360   int invocation_count() {
2361     if (invocation_counter()->carry()) {
2362       return InvocationCounter::count_limit;
2363     }
2364     return invocation_counter()->count();
2365   }
2366   int backedge_count() {
2367     if (backedge_counter()->carry()) {
2368       return InvocationCounter::count_limit;
2369     }
2370     return backedge_counter()->count();
2371   }
2372 
2373   int invocation_count_start() {
2374     if (invocation_counter()->carry()) {
2375       return 0;
2376     }
2377     return _invocation_counter_start;
2378   }
2379 
2380   int backedge_count_start() {
2381     if (backedge_counter()->carry()) {
2382       return 0;
2383     }
2384     return _backedge_counter_start;
2385   }
2386 
2387   int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2388   int backedge_count_delta()   { return backedge_count()   - backedge_count_start();   }
2389 
2390   void reset_start_counters() {
2391     _invocation_counter_start = invocation_count();
2392     _backedge_counter_start = backedge_count();
2393   }
2394 
2395   InvocationCounter* invocation_counter()     { return &_invocation_counter; }
2396   InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
2397 
2398 #if INCLUDE_JVMCI
2399   FailedSpeculation** get_failed_speculations_address() {
2400     return &_failed_speculations;
2401   }
2402 #endif
2403 
2404 #if INCLUDE_RTM_OPT
2405   int rtm_state() const {
2406     return _rtm_state;
2407   }
2408   void set_rtm_state(RTMState rstate) {
2409     _rtm_state = (int)rstate;
2410   }
2411   void atomic_set_rtm_state(RTMState rstate) {
2412     Atomic::store(&_rtm_state, (int)rstate);
2413   }
2414 
2415   static int rtm_state_offset_in_bytes() {
2416     return offset_of(MethodData, _rtm_state);
2417   }
2418 #endif
2419 
2420   void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
2421   bool would_profile() const                  { return _would_profile != no_profile; }
2422 
2423   int num_loops() const                       { return _num_loops;  }
2424   void set_num_loops(int n)                   { _num_loops = n;     }
2425   int num_blocks() const                      { return _num_blocks; }
2426   void set_num_blocks(int n)                  { _num_blocks = n;    }
2427 
2428   bool is_mature() const;  // consult mileage and ProfileMaturityPercentage
2429   static int mileage_of(Method* m);
2430 
2431   // Support for interprocedural escape analysis, from Thomas Kotzmann.
2432   enum EscapeFlag {
2433     estimated    = 1 << 0,
2434     return_local = 1 << 1,
2435     return_allocated = 1 << 2,
2436     allocated_escapes = 1 << 3,
2437     unknown_modified = 1 << 4
2438   };
2439 
2440   intx eflags()                                  { return _eflags; }
2441   intx arg_local()                               { return _arg_local; }
2442   intx arg_stack()                               { return _arg_stack; }
2443   intx arg_returned()                            { return _arg_returned; }
2444   uint arg_modified(int a)                       { ArgInfoData *aid = arg_info();
2445                                                    assert(aid != NULL, "arg_info must be not null");
2446                                                    assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2447                                                    return aid->arg_modified(a); }
2448 
2449   void set_eflags(intx v)                        { _eflags = v; }
2450   void set_arg_local(intx v)                     { _arg_local = v; }
2451   void set_arg_stack(intx v)                     { _arg_stack = v; }
2452   void set_arg_returned(intx v)                  { _arg_returned = v; }
2453   void set_arg_modified(int a, uint v)           { ArgInfoData *aid = arg_info();
2454                                                    assert(aid != NULL, "arg_info must be not null");
2455                                                    assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2456                                                    aid->set_arg_modified(a, v); }
2457 
2458   void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2459 
2460   // Location and size of data area
2461   address data_base() const {
2462     return (address) _data;
2463   }
2464   int data_size() const {
2465     return _data_size;
2466   }
2467 
2468   int parameters_size_in_bytes() const {
2469     ParametersTypeData* param = parameters_type_data();
2470     return param == NULL ? 0 : param->size_in_bytes();
2471   }
2472 
2473   // Accessors
2474   Method* method() const { return _method; }
2475 
2476   // Get the data at an arbitrary (sort of) data index.
2477   ProfileData* data_at(int data_index) const;
2478 
2479   // Walk through the data in order.
2480   ProfileData* first_data() const { return data_at(first_di()); }
2481   ProfileData* next_data(ProfileData* current) const;
2482   DataLayout*  next_data_layout(DataLayout* current) const;
2483   bool is_valid(ProfileData* current) const { return current != NULL; }
2484   bool is_valid(DataLayout*  current) const { return current != NULL; }
2485 
2486   // Convert a dp (data pointer) to a di (data index).
2487   int dp_to_di(address dp) const {
2488     return dp - ((address)_data);
2489   }
2490 
2491   // bci to di/dp conversion.
2492   address bci_to_dp(int bci);
2493   int bci_to_di(int bci) {
2494     return dp_to_di(bci_to_dp(bci));
2495   }
2496 
2497   // Get the data at an arbitrary bci, or NULL if there is none.
2498   ProfileData* bci_to_data(int bci);
2499 
2500   // Same, but try to create an extra_data record if one is needed:
2501   ProfileData* allocate_bci_to_data(int bci, Method* m) {
2502     ProfileData* data = NULL;
2503     // If m not NULL, try to allocate a SpeculativeTrapData entry
2504     if (m == NULL) {
2505       data = bci_to_data(bci);
2506     }
2507     if (data != NULL) {
2508       return data;
2509     }
2510     data = bci_to_extra_data(bci, m, true);
2511     if (data != NULL) {
2512       return data;
2513     }
2514     // If SpeculativeTrapData allocation fails try to allocate a
2515     // regular entry
2516     data = bci_to_data(bci);
2517     if (data != NULL) {
2518       return data;
2519     }
2520     return bci_to_extra_data(bci, NULL, true);
2521   }
2522 
2523   // Add a handful of extra data records, for trap tracking.
2524   DataLayout* extra_data_base() const  { return limit_data_position(); }
2525   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2526   DataLayout* args_data_limit() const  { return (DataLayout*)((address)this + size_in_bytes() -
2527                                                               parameters_size_in_bytes()); }
2528   int extra_data_size() const          { return (address)extra_data_limit() - (address)extra_data_base(); }
2529   static DataLayout* next_extra(DataLayout* dp);
2530 
2531   // Return (uint)-1 for overflow.
2532   uint trap_count(int reason) const {
2533     return _compiler_counters.trap_count(reason);
2534   }
2535   // For loops:
2536   static uint trap_reason_limit() { return _trap_hist_limit; }
2537   static uint trap_count_limit()  { return _trap_hist_mask; }
2538   uint inc_trap_count(int reason) {
2539     return _compiler_counters.inc_trap_count(reason);
2540   }
2541 
2542   uint overflow_trap_count() const {
2543     return _compiler_counters.overflow_trap_count();
2544   }
2545   uint overflow_recompile_count() const {
2546     return _compiler_counters.overflow_recompile_count();
2547   }
2548   uint inc_overflow_recompile_count() {
2549     return _compiler_counters.inc_overflow_recompile_count();
2550   }
2551   uint decompile_count() const {
2552     return _compiler_counters.decompile_count();
2553   }
2554   uint inc_decompile_count() {
2555     uint dec_count = _compiler_counters.inc_decompile_count();
2556     if (dec_count > (uint)PerMethodRecompilationCutoff) {
2557       method()->set_not_compilable("decompile_count > PerMethodRecompilationCutoff", CompLevel_full_optimization);
2558     }
2559     return dec_count;
2560   }
2561   uint tenure_traps() const {
2562     return _tenure_traps;
2563   }
2564   void inc_tenure_traps() {
2565     _tenure_traps += 1;
2566   }
2567 
2568   // Return pointer to area dedicated to parameters in MDO
2569   ParametersTypeData* parameters_type_data() const {
2570     assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2571     return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
2572   }
2573 
2574   int parameters_type_data_di() const {
2575     assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2576     return _parameters_type_data_di;
2577   }
2578 
2579   // Support for code generation
2580   static ByteSize data_offset() {
2581     return byte_offset_of(MethodData, _data[0]);
2582   }
2583 
2584   static ByteSize trap_history_offset() {
2585     return byte_offset_of(MethodData, _compiler_counters) + CompilerCounters::trap_history_offset();
2586   }
2587 
2588   static ByteSize invocation_counter_offset() {
2589     return byte_offset_of(MethodData, _invocation_counter);
2590   }
2591 
2592   static ByteSize backedge_counter_offset() {
2593     return byte_offset_of(MethodData, _backedge_counter);
2594   }
2595 
2596   static ByteSize invoke_mask_offset() {
2597     return byte_offset_of(MethodData, _invoke_mask);
2598   }
2599 
2600   static ByteSize backedge_mask_offset() {
2601     return byte_offset_of(MethodData, _backedge_mask);
2602   }
2603 
2604   static ByteSize parameters_type_data_di_offset() {
2605     return byte_offset_of(MethodData, _parameters_type_data_di);
2606   }
2607 
2608   virtual void metaspace_pointers_do(MetaspaceClosure* iter);
2609   virtual MetaspaceObj::Type type() const { return MethodDataType; }
2610 
2611   // Deallocation support - no metaspace pointer fields to deallocate
2612   void deallocate_contents(ClassLoaderData* loader_data) {}
2613 
2614   // GC support
2615   void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2616 
2617   // Printing
2618   void print_on      (outputStream* st) const;
2619   void print_value_on(outputStream* st) const;
2620 
2621   // printing support for method data
2622   void print_data_on(outputStream* st) const;
2623 
2624   const char* internal_name() const { return "{method data}"; }
2625 
2626   // verification
2627   void verify_on(outputStream* st);
2628   void verify_data_on(outputStream* st);
2629 
2630   static bool profile_parameters_for_method(const methodHandle& m);
2631   static bool profile_arguments();
2632   static bool profile_arguments_jsr292_only();
2633   static bool profile_return();
2634   static bool profile_parameters();
2635   static bool profile_return_jsr292_only();
2636 
2637   void clean_method_data(bool always_clean);
2638   void clean_weak_method_links();
2639   Mutex* extra_data_lock() { return &_extra_data_lock; }
2640 };
2641 
2642 #endif // SHARE_OOPS_METHODDATA_HPP