1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compilationPolicy.hpp"
  29 #include "compiler/compilerDefinitions.inline.hpp"
  30 #include "compiler/compilerOracle.hpp"
  31 #include "interpreter/bytecode.hpp"
  32 #include "interpreter/bytecodeStream.hpp"
  33 #include "interpreter/linkResolver.hpp"
  34 #include "memory/metaspaceClosure.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/klass.inline.hpp"
  37 #include "oops/methodData.inline.hpp"
  38 #include "prims/jvmtiRedefineClasses.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "utilities/align.hpp"
  46 #include "utilities/checkedCast.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 // ==================================================================
  50 // DataLayout
  51 //
  52 // Overlay for generic profiling data.
  53 
  54 // Some types of data layouts need a length field.
  55 bool DataLayout::needs_array_len(u1 tag) {
  56   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  57 }
  58 
  59 // Perform generic initialization of the data.  More specific
  60 // initialization occurs in overrides of ProfileData::post_initialize.
  61 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  62   _header._bits = (intptr_t)0;
  63   _header._struct._tag = tag;
  64   _header._struct._bci = bci;
  65   for (int i = 0; i < cell_count; i++) {
  66     set_cell_at(i, (intptr_t)0);
  67   }
  68   if (needs_array_len(tag)) {
  69     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  70   }
  71   if (tag == call_type_data_tag) {
  72     CallTypeData::initialize(this, cell_count);
  73   } else if (tag == virtual_call_type_data_tag) {
  74     VirtualCallTypeData::initialize(this, cell_count);
  75   }
  76 }
  77 
  78 void DataLayout::clean_weak_klass_links(bool always_clean) {
  79   ResourceMark m;
  80   data_in()->clean_weak_klass_links(always_clean);
  81 }
  82 
  83 
  84 // ==================================================================
  85 // ProfileData
  86 //
  87 // A ProfileData object is created to refer to a section of profiling
  88 // data in a structured way.
  89 
  90 // Constructor for invalid ProfileData.
  91 ProfileData::ProfileData() {
  92   _data = nullptr;
  93 }
  94 
  95 char* ProfileData::print_data_on_helper(const MethodData* md) const {
  96   DataLayout* dp  = md->extra_data_base();
  97   DataLayout* end = md->args_data_limit();
  98   stringStream ss;
  99   for (;; dp = MethodData::next_extra(dp)) {
 100     assert(dp < end, "moved past end of extra data");
 101     switch(dp->tag()) {
 102     case DataLayout::speculative_trap_data_tag:
 103       if (dp->bci() == bci()) {
 104         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 105         int trap = data->trap_state();
 106         char buf[100];
 107         ss.print("trap/");
 108         data->method()->print_short_name(&ss);
 109         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 110       }
 111       break;
 112     case DataLayout::bit_data_tag:
 113       break;
 114     case DataLayout::no_tag:
 115     case DataLayout::arg_info_data_tag:
 116       return ss.as_string();
 117       break;
 118     default:
 119       fatal("unexpected tag %d", dp->tag());
 120     }
 121   }
 122   return nullptr;
 123 }
 124 
 125 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 126   print_data_on(st, print_data_on_helper(md));
 127 }
 128 
 129 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 130   st->print("bci: %d ", bci());
 131   st->fill_to(tab_width_one + 1);
 132   st->print("%s", name);
 133   tab(st);
 134   int trap = trap_state();
 135   if (trap != 0) {
 136     char buf[100];
 137     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 138   }
 139   if (extra != nullptr) {
 140     st->print("%s", extra);
 141   }
 142   int flags = data()->flags();
 143   if (flags != 0) {
 144     st->print("flags(%d) %p/%d", flags, data(), in_bytes(DataLayout::flags_offset()));
 145   }
 146 }
 147 
 148 void ProfileData::tab(outputStream* st, bool first) const {
 149   st->fill_to(first ? tab_width_one : tab_width_two);
 150 }
 151 
 152 // ==================================================================
 153 // BitData
 154 //
 155 // A BitData corresponds to a one-bit flag.  This is used to indicate
 156 // whether a checkcast bytecode has seen a null value.
 157 
 158 
 159 void BitData::print_data_on(outputStream* st, const char* extra) const {
 160   print_shared(st, "BitData", extra);
 161   st->cr();
 162 }
 163 
 164 // ==================================================================
 165 // CounterData
 166 //
 167 // A CounterData corresponds to a simple counter.
 168 
 169 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 170   print_shared(st, "CounterData", extra);
 171   st->print_cr("count(%u)", count());
 172 }
 173 
 174 // ==================================================================
 175 // JumpData
 176 //
 177 // A JumpData is used to access profiling information for a direct
 178 // branch.  It is a counter, used for counting the number of branches,
 179 // plus a data displacement, used for realigning the data pointer to
 180 // the corresponding target bci.
 181 
 182 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 183   assert(stream->bci() == bci(), "wrong pos");
 184   int target;
 185   Bytecodes::Code c = stream->code();
 186   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 187     target = stream->dest_w();
 188   } else {
 189     target = stream->dest();
 190   }
 191   int my_di = mdo->dp_to_di(dp());
 192   int target_di = mdo->bci_to_di(target);
 193   int offset = target_di - my_di;
 194   set_displacement(offset);
 195 }
 196 
 197 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 198   print_shared(st, "JumpData", extra);
 199   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 200 }
 201 
 202 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 203   // Parameter profiling include the receiver
 204   int args_count = include_receiver ? 1 : 0;
 205   ResourceMark rm;
 206   ReferenceArgumentCount rac(signature);
 207   args_count += rac.count();
 208   args_count = MIN2(args_count, max);
 209   return args_count * per_arg_cell_count;
 210 }
 211 
 212 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 213   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 214   assert(TypeStackSlotEntries::per_arg_count() > SingleTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 215   const methodHandle m = stream->method();
 216   int bci = stream->bci();
 217   Bytecode_invoke inv(m, bci);
 218   int args_cell = 0;
 219   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 220     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 221   }
 222   int ret_cell = 0;
 223   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 224     ret_cell = SingleTypeEntry::static_cell_count();
 225   }
 226   int header_cell = 0;
 227   if (args_cell + ret_cell > 0) {
 228     header_cell = header_cell_count();
 229   }
 230 
 231   return header_cell + args_cell + ret_cell;
 232 }
 233 
 234 class ArgumentOffsetComputer : public SignatureIterator {
 235 private:
 236   int _max;
 237   int _offset;
 238   GrowableArray<int> _offsets;
 239 
 240   friend class SignatureIterator;  // so do_parameters_on can call do_type
 241   void do_type(BasicType type) {
 242     if (is_reference_type(type) && _offsets.length() < _max) {
 243       _offsets.push(_offset);
 244     }
 245     _offset += parameter_type_word_count(type);
 246   }
 247 
 248  public:
 249   ArgumentOffsetComputer(Symbol* signature, int max)
 250     : SignatureIterator(signature),
 251       _max(max), _offset(0),
 252       _offsets(max) {
 253     do_parameters_on(this);  // non-virtual template execution
 254   }
 255 
 256   int off_at(int i) const { return _offsets.at(i); }
 257 };
 258 
 259 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 260   ResourceMark rm;
 261   int start = 0;
 262   // Parameter profiling include the receiver
 263   if (include_receiver && has_receiver) {
 264     set_stack_slot(0, 0);
 265     set_type(0, type_none());
 266     start += 1;
 267   }
 268   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 269   for (int i = start; i < _number_of_entries; i++) {
 270     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 271     set_type(i, type_none());
 272   }
 273 }
 274 
 275 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 276   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 277   Bytecode_invoke inv(stream->method(), stream->bci());
 278 
 279   if (has_arguments()) {
 280 #ifdef ASSERT
 281     ResourceMark rm;
 282     ReferenceArgumentCount rac(inv.signature());
 283     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 284     assert(count > 0, "room for args type but none found?");
 285     check_number_of_arguments(count);
 286 #endif
 287     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 288   }
 289 
 290   if (has_return()) {
 291     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 292     _ret.post_initialize();
 293   }
 294 }
 295 
 296 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 297   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 298   Bytecode_invoke inv(stream->method(), stream->bci());
 299 
 300   if (has_arguments()) {
 301 #ifdef ASSERT
 302     ResourceMark rm;
 303     ReferenceArgumentCount rac(inv.signature());
 304     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 305     assert(count > 0, "room for args type but none found?");
 306     check_number_of_arguments(count);
 307 #endif
 308     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 309   }
 310 
 311   if (has_return()) {
 312     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 313     _ret.post_initialize();
 314   }
 315 }
 316 
 317 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 318   for (int i = 0; i < _number_of_entries; i++) {
 319     intptr_t p = type(i);
 320     Klass* k = (Klass*)klass_part(p);
 321     if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 322       set_type(i, with_status((Klass*)nullptr, p));
 323     }
 324   }
 325 }
 326 
 327 void SingleTypeEntry::clean_weak_klass_links(bool always_clean) {
 328   intptr_t p = type();
 329   Klass* k = (Klass*)klass_part(p);
 330   if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 331     set_type(with_status((Klass*)nullptr, p));
 332   }
 333 }
 334 
 335 bool TypeEntriesAtCall::return_profiling_enabled() {
 336   return MethodData::profile_return();
 337 }
 338 
 339 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 340   return MethodData::profile_arguments();
 341 }
 342 
 343 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 344   if (is_type_none(k)) {
 345     st->print("none");
 346   } else if (is_type_unknown(k)) {
 347     st->print("unknown");
 348   } else {
 349     valid_klass(k)->print_value_on(st);
 350   }
 351   if (was_null_seen(k)) {
 352     st->print(" (null seen)");
 353   }
 354 }
 355 
 356 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 357   for (int i = 0; i < _number_of_entries; i++) {
 358     _pd->tab(st);
 359     st->print("%d: stack(%u) ", i, stack_slot(i));
 360     print_klass(st, type(i));
 361     st->cr();
 362   }
 363 }
 364 
 365 void SingleTypeEntry::print_data_on(outputStream* st) const {
 366   _pd->tab(st);
 367   print_klass(st, type());
 368   st->cr();
 369 }
 370 
 371 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 372   CounterData::print_data_on(st, extra);
 373   if (has_arguments()) {
 374     tab(st, true);
 375     st->print("argument types");
 376     _args.print_data_on(st);
 377   }
 378   if (has_return()) {
 379     tab(st, true);
 380     st->print("return type");
 381     _ret.print_data_on(st);
 382   }
 383 }
 384 
 385 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 386   VirtualCallData::print_data_on(st, extra);
 387   if (has_arguments()) {
 388     tab(st, true);
 389     st->print("argument types");
 390     _args.print_data_on(st);
 391   }
 392   if (has_return()) {
 393     tab(st, true);
 394     st->print("return type");
 395     _ret.print_data_on(st);
 396   }
 397 }
 398 
 399 // ==================================================================
 400 // ReceiverTypeData
 401 //
 402 // A ReceiverTypeData is used to access profiling information about a
 403 // dynamic type check.  It consists of a counter which counts the total times
 404 // that the check is reached, and a series of (Klass*, count) pairs
 405 // which are used to store a type profile for the receiver of the check.
 406 
 407 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 408     for (uint row = 0; row < row_limit(); row++) {
 409     Klass* p = receiver(row);
 410     if (p != nullptr && (always_clean || !p->is_loader_alive())) {
 411       clear_row(row);
 412     }
 413   }
 414 }
 415 
 416 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 417   uint row;
 418   int entries = 0;
 419   for (row = 0; row < row_limit(); row++) {
 420     if (receiver(row) != nullptr)  entries++;
 421   }
 422   st->print_cr("count(%u) entries(%u)", count(), entries);
 423   int total = count();
 424   for (row = 0; row < row_limit(); row++) {
 425     if (receiver(row) != nullptr) {
 426       total += receiver_count(row);
 427     }
 428   }
 429   for (row = 0; row < row_limit(); row++) {
 430     if (receiver(row) != nullptr) {
 431       tab(st);
 432       receiver(row)->print_value_on(st);
 433       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 434     }
 435   }
 436 }
 437 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 438   print_shared(st, "ReceiverTypeData", extra);
 439   print_receiver_data_on(st);
 440 }
 441 
 442 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 443   print_shared(st, "VirtualCallData", extra);
 444   print_receiver_data_on(st);
 445 }
 446 
 447 // ==================================================================
 448 // RetData
 449 //
 450 // A RetData is used to access profiling information for a ret bytecode.
 451 // It is composed of a count of the number of times that the ret has
 452 // been executed, followed by a series of triples of the form
 453 // (bci, count, di) which count the number of times that some bci was the
 454 // target of the ret and cache a corresponding displacement.
 455 
 456 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 457   for (uint row = 0; row < row_limit(); row++) {
 458     set_bci_displacement(row, -1);
 459     set_bci(row, no_bci);
 460   }
 461   // release so other threads see a consistent state.  bci is used as
 462   // a valid flag for bci_displacement.
 463   OrderAccess::release();
 464 }
 465 
 466 // This routine needs to atomically update the RetData structure, so the
 467 // caller needs to hold the RetData_lock before it gets here.  Since taking
 468 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 469 // wrapper around a derived oop, taking the lock in _this_ method will
 470 // basically cause the 'this' pointer's _data field to contain junk after the
 471 // lock.  We require the caller to take the lock before making the ProfileData
 472 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 473 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 474   // First find the mdp which corresponds to the return bci.
 475   address mdp = h_mdo->bci_to_dp(return_bci);
 476 
 477   // Now check to see if any of the cache slots are open.
 478   for (uint row = 0; row < row_limit(); row++) {
 479     if (bci(row) == no_bci) {
 480       set_bci_displacement(row, checked_cast<int>(mdp - dp()));
 481       set_bci_count(row, DataLayout::counter_increment);
 482       // Barrier to ensure displacement is written before the bci; allows
 483       // the interpreter to read displacement without fear of race condition.
 484       release_set_bci(row, return_bci);
 485       break;
 486     }
 487   }
 488   return mdp;
 489 }
 490 
 491 void RetData::print_data_on(outputStream* st, const char* extra) const {
 492   print_shared(st, "RetData", extra);
 493   uint row;
 494   int entries = 0;
 495   for (row = 0; row < row_limit(); row++) {
 496     if (bci(row) != no_bci)  entries++;
 497   }
 498   st->print_cr("count(%u) entries(%u)", count(), entries);
 499   for (row = 0; row < row_limit(); row++) {
 500     if (bci(row) != no_bci) {
 501       tab(st);
 502       st->print_cr("bci(%d: count(%u) displacement(%d))",
 503                    bci(row), bci_count(row), bci_displacement(row));
 504     }
 505   }
 506 }
 507 
 508 // ==================================================================
 509 // BranchData
 510 //
 511 // A BranchData is used to access profiling data for a two-way branch.
 512 // It consists of taken and not_taken counts as well as a data displacement
 513 // for the taken case.
 514 
 515 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 516   assert(stream->bci() == bci(), "wrong pos");
 517   int target = stream->dest();
 518   int my_di = mdo->dp_to_di(dp());
 519   int target_di = mdo->bci_to_di(target);
 520   int offset = target_di - my_di;
 521   set_displacement(offset);
 522 }
 523 
 524 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 525   print_shared(st, "BranchData", extra);
 526   if (data()->flags()) {
 527     tty->cr();
 528     tab(st);
 529   }
 530   st->print_cr("taken(%u) displacement(%d)",
 531                taken(), displacement());
 532   tab(st);
 533   st->print_cr("not taken(%u)", not_taken());
 534 }
 535 
 536 // ==================================================================
 537 // MultiBranchData
 538 //
 539 // A MultiBranchData is used to access profiling information for
 540 // a multi-way branch (*switch bytecodes).  It consists of a series
 541 // of (count, displacement) pairs, which count the number of times each
 542 // case was taken and specify the data displacement for each branch target.
 543 
 544 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 545   int cell_count = 0;
 546   if (stream->code() == Bytecodes::_tableswitch) {
 547     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 548     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 549   } else {
 550     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 551     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 552   }
 553   return cell_count;
 554 }
 555 
 556 void MultiBranchData::post_initialize(BytecodeStream* stream,
 557                                       MethodData* mdo) {
 558   assert(stream->bci() == bci(), "wrong pos");
 559   int target;
 560   int my_di;
 561   int target_di;
 562   int offset;
 563   if (stream->code() == Bytecodes::_tableswitch) {
 564     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 565     int len = sw.length();
 566     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 567     for (int count = 0; count < len; count++) {
 568       target = sw.dest_offset_at(count) + bci();
 569       my_di = mdo->dp_to_di(dp());
 570       target_di = mdo->bci_to_di(target);
 571       offset = target_di - my_di;
 572       set_displacement_at(count, offset);
 573     }
 574     target = sw.default_offset() + bci();
 575     my_di = mdo->dp_to_di(dp());
 576     target_di = mdo->bci_to_di(target);
 577     offset = target_di - my_di;
 578     set_default_displacement(offset);
 579 
 580   } else {
 581     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 582     int npairs = sw.number_of_pairs();
 583     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 584     for (int count = 0; count < npairs; count++) {
 585       LookupswitchPair pair = sw.pair_at(count);
 586       target = pair.offset() + bci();
 587       my_di = mdo->dp_to_di(dp());
 588       target_di = mdo->bci_to_di(target);
 589       offset = target_di - my_di;
 590       set_displacement_at(count, offset);
 591     }
 592     target = sw.default_offset() + bci();
 593     my_di = mdo->dp_to_di(dp());
 594     target_di = mdo->bci_to_di(target);
 595     offset = target_di - my_di;
 596     set_default_displacement(offset);
 597   }
 598 }
 599 
 600 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 601   print_shared(st, "MultiBranchData", extra);
 602   st->print_cr("default_count(%u) displacement(%d)",
 603                default_count(), default_displacement());
 604   int cases = number_of_cases();
 605   for (int i = 0; i < cases; i++) {
 606     tab(st);
 607     st->print_cr("count(%u) displacement(%d)",
 608                  count_at(i), displacement_at(i));
 609   }
 610 }
 611 
 612 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 613   print_shared(st, "ArgInfoData", extra);
 614   int nargs = number_of_args();
 615   for (int i = 0; i < nargs; i++) {
 616     st->print("  0x%x", arg_modified(i));
 617   }
 618   st->cr();
 619 }
 620 
 621 int ParametersTypeData::compute_cell_count(Method* m) {
 622   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 623     return 0;
 624   }
 625   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 626   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 627   if (obj_args > 0) {
 628     return obj_args + 1; // 1 cell for array len
 629   }
 630   return 0;
 631 }
 632 
 633 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 634   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 635 }
 636 
 637 bool ParametersTypeData::profiling_enabled() {
 638   return MethodData::profile_parameters();
 639 }
 640 
 641 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 642   print_shared(st, "ParametersTypeData", extra);
 643   tab(st);
 644   _parameters.print_data_on(st);
 645   st->cr();
 646 }
 647 
 648 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 649   print_shared(st, "SpeculativeTrapData", extra);
 650   tab(st);
 651   method()->print_short_name(st);
 652   st->cr();
 653 }
 654 
 655 void ArrayStoreData::print_data_on(outputStream* st, const char* extra) const {
 656   print_shared(st, "ArrayStore", extra);
 657   st->cr();
 658   tab(st, true);
 659   st->print("array");
 660   _array.print_data_on(st);
 661   tab(st, true);
 662   st->print("element");
 663   if (null_seen()) {
 664     st->print(" (null seen)");
 665   }
 666   tab(st);
 667   print_receiver_data_on(st);
 668 }
 669 
 670 void ArrayLoadData::print_data_on(outputStream* st, const char* extra) const {
 671   print_shared(st, "ArrayLoad", extra);
 672   st->cr();
 673   tab(st, true);
 674   st->print("array");
 675   _array.print_data_on(st);
 676   tab(st, true);
 677   st->print("element");
 678   _element.print_data_on(st);
 679 }
 680 
 681 void ACmpData::print_data_on(outputStream* st, const char* extra) const {
 682   BranchData::print_data_on(st, extra);
 683   tab(st, true);
 684   st->print("left");
 685   _left.print_data_on(st);
 686   tab(st, true);
 687   st->print("right");
 688   _right.print_data_on(st);
 689 }
 690 
 691 // ==================================================================
 692 // MethodData*
 693 //
 694 // A MethodData* holds information which has been collected about
 695 // a method.
 696 
 697 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 698   assert(!THREAD->owns_locks(), "Should not own any locks");
 699   int size = MethodData::compute_allocation_size_in_words(method);
 700 
 701   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 702     MethodData(method);
 703 }
 704 
 705 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 706   switch (code) {
 707   case Bytecodes::_checkcast:
 708   case Bytecodes::_instanceof:
 709     if (TypeProfileCasts) {
 710       return ReceiverTypeData::static_cell_count();
 711     } else {
 712       return BitData::static_cell_count();
 713     }
 714   case Bytecodes::_aaload:
 715     return ArrayLoadData::static_cell_count();
 716   case Bytecodes::_aastore:
 717     return ArrayStoreData::static_cell_count();
 718   case Bytecodes::_invokespecial:
 719   case Bytecodes::_invokestatic:
 720     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 721       return variable_cell_count;
 722     } else {
 723       return CounterData::static_cell_count();
 724     }
 725   case Bytecodes::_goto:
 726   case Bytecodes::_goto_w:
 727   case Bytecodes::_jsr:
 728   case Bytecodes::_jsr_w:
 729     return JumpData::static_cell_count();
 730   case Bytecodes::_invokevirtual:
 731   case Bytecodes::_invokeinterface:
 732     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 733       return variable_cell_count;
 734     } else {
 735       return VirtualCallData::static_cell_count();
 736     }
 737   case Bytecodes::_invokedynamic:
 738     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 739       return variable_cell_count;
 740     } else {
 741       return CounterData::static_cell_count();
 742     }
 743   case Bytecodes::_ret:
 744     return RetData::static_cell_count();
 745   case Bytecodes::_ifeq:
 746   case Bytecodes::_ifne:
 747   case Bytecodes::_iflt:
 748   case Bytecodes::_ifge:
 749   case Bytecodes::_ifgt:
 750   case Bytecodes::_ifle:
 751   case Bytecodes::_if_icmpeq:
 752   case Bytecodes::_if_icmpne:
 753   case Bytecodes::_if_icmplt:
 754   case Bytecodes::_if_icmpge:
 755   case Bytecodes::_if_icmpgt:
 756   case Bytecodes::_if_icmple:
 757   case Bytecodes::_ifnull:
 758   case Bytecodes::_ifnonnull:
 759     return BranchData::static_cell_count();
 760   case Bytecodes::_if_acmpne:
 761   case Bytecodes::_if_acmpeq:
 762     return ACmpData::static_cell_count();
 763   case Bytecodes::_lookupswitch:
 764   case Bytecodes::_tableswitch:
 765     return variable_cell_count;
 766   default:
 767     return no_profile_data;
 768   }
 769 }
 770 
 771 // Compute the size of the profiling information corresponding to
 772 // the current bytecode.
 773 int MethodData::compute_data_size(BytecodeStream* stream) {
 774   int cell_count = bytecode_cell_count(stream->code());
 775   if (cell_count == no_profile_data) {
 776     return 0;
 777   }
 778   if (cell_count == variable_cell_count) {
 779     switch (stream->code()) {
 780     case Bytecodes::_lookupswitch:
 781     case Bytecodes::_tableswitch:
 782       cell_count = MultiBranchData::compute_cell_count(stream);
 783       break;
 784     case Bytecodes::_invokespecial:
 785     case Bytecodes::_invokestatic:
 786     case Bytecodes::_invokedynamic:
 787       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 788       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 789           profile_return_for_invoke(stream->method(), stream->bci())) {
 790         cell_count = CallTypeData::compute_cell_count(stream);
 791       } else {
 792         cell_count = CounterData::static_cell_count();
 793       }
 794       break;
 795     case Bytecodes::_invokevirtual:
 796     case Bytecodes::_invokeinterface: {
 797       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 798       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 799           profile_return_for_invoke(stream->method(), stream->bci())) {
 800         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 801       } else {
 802         cell_count = VirtualCallData::static_cell_count();
 803       }
 804       break;
 805     }
 806     default:
 807       fatal("unexpected bytecode for var length profile data");
 808     }
 809   }
 810   // Note:  cell_count might be zero, meaning that there is just
 811   //        a DataLayout header, with no extra cells.
 812   assert(cell_count >= 0, "sanity");
 813   return DataLayout::compute_size_in_bytes(cell_count);
 814 }
 815 
 816 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 817   // Bytecodes for which we may use speculation
 818   switch (code) {
 819   case Bytecodes::_checkcast:
 820   case Bytecodes::_instanceof:
 821   case Bytecodes::_aaload:
 822   case Bytecodes::_aastore:
 823   case Bytecodes::_invokevirtual:
 824   case Bytecodes::_invokeinterface:
 825   case Bytecodes::_if_acmpeq:
 826   case Bytecodes::_if_acmpne:
 827   case Bytecodes::_ifnull:
 828   case Bytecodes::_ifnonnull:
 829   case Bytecodes::_invokestatic:
 830 #ifdef COMPILER2
 831     if (CompilerConfig::is_c2_enabled()) {
 832       return UseTypeSpeculation;
 833     }
 834 #endif
 835   default:
 836     return false;
 837   }
 838   return false;
 839 }
 840 
 841 #if INCLUDE_JVMCI
 842 
 843 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 844   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 845 }
 846 
 847 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 848   memcpy(data(), speculation, speculation_len);
 849 }
 850 
 851 // A heuristic check to detect nmethods that outlive a failed speculations list.
 852 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 853   jlong head = (jlong)(address) *failed_speculations_address;
 854   if ((head & 0x1) == 0x1) {
 855     stringStream st;
 856     if (nm != nullptr) {
 857       st.print("%d", nm->compile_id());
 858       Method* method = nm->method();
 859       st.print_raw("{");
 860       if (method != nullptr) {
 861         method->print_name(&st);
 862       } else {
 863         const char* jvmci_name = nm->jvmci_name();
 864         if (jvmci_name != nullptr) {
 865           st.print_raw(jvmci_name);
 866         }
 867       }
 868       st.print_raw("}");
 869     } else {
 870       st.print("<unknown>");
 871     }
 872     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 873   }
 874 }
 875 
 876 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 877   assert(failed_speculations_address != nullptr, "must be");
 878   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 879 
 880   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 881 
 882   FailedSpeculation** cursor = failed_speculations_address;
 883   FailedSpeculation* fs = nullptr;
 884   do {
 885     if (*cursor == nullptr) {
 886       if (fs == nullptr) {
 887         // lazily allocate FailedSpeculation
 888         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 889         if (fs == nullptr) {
 890           // no memory -> ignore failed speculation
 891           return false;
 892         }
 893         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 894       }
 895       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 896       if (old_fs == nullptr) {
 897         // Successfully appended fs to end of the list
 898         return true;
 899       }
 900     }
 901     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 902     // check if the current entry matches this thread's failed speculation
 903     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 904       if (fs != nullptr) {
 905         delete fs;
 906       }
 907       return false;
 908     }
 909     cursor = (*cursor)->next_adr();
 910   } while (true);
 911 }
 912 
 913 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 914   assert(failed_speculations_address != nullptr, "must be");
 915   FailedSpeculation* fs = *failed_speculations_address;
 916   while (fs != nullptr) {
 917     FailedSpeculation* next = fs->next();
 918     delete fs;
 919     fs = next;
 920   }
 921 
 922   // Write an unaligned value to failed_speculations_address to denote
 923   // that it is no longer a valid pointer. This is allows for the check
 924   // in add_failed_speculation against adding to a freed failed
 925   // speculations list.
 926   long* head = (long*) failed_speculations_address;
 927   (*head) = (*head) | 0x1;
 928 }
 929 #endif // INCLUDE_JVMCI
 930 
 931 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 932 #if INCLUDE_JVMCI
 933   if (ProfileTraps) {
 934     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 935     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 936 
 937     // Make sure we have a minimum number of extra data slots to
 938     // allocate SpeculativeTrapData entries. We would want to have one
 939     // entry per compilation that inlines this method and for which
 940     // some type speculation assumption fails. So the room we need for
 941     // the SpeculativeTrapData entries doesn't directly depend on the
 942     // size of the method. Because it's hard to estimate, we reserve
 943     // space for an arbitrary number of entries.
 944     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 945       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 946 
 947     return MAX2(extra_data_count, spec_data_count);
 948   } else {
 949     return 0;
 950   }
 951 #else // INCLUDE_JVMCI
 952   if (ProfileTraps) {
 953     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 954     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 955     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 956     int one_percent_of_data
 957       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 958     if (extra_data_count < one_percent_of_data)
 959       extra_data_count = one_percent_of_data;
 960     if (extra_data_count > empty_bc_count)
 961       extra_data_count = empty_bc_count;  // no need for more
 962 
 963     // Make sure we have a minimum number of extra data slots to
 964     // allocate SpeculativeTrapData entries. We would want to have one
 965     // entry per compilation that inlines this method and for which
 966     // some type speculation assumption fails. So the room we need for
 967     // the SpeculativeTrapData entries doesn't directly depend on the
 968     // size of the method. Because it's hard to estimate, we reserve
 969     // space for an arbitrary number of entries.
 970     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 971       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 972 
 973     return MAX2(extra_data_count, spec_data_count);
 974   } else {
 975     return 0;
 976   }
 977 #endif // INCLUDE_JVMCI
 978 }
 979 
 980 // Compute the size of the MethodData* necessary to store
 981 // profiling information about a given method.  Size is in bytes.
 982 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 983   int data_size = 0;
 984   BytecodeStream stream(method);
 985   Bytecodes::Code c;
 986   int empty_bc_count = 0;  // number of bytecodes lacking data
 987   bool needs_speculative_traps = false;
 988   while ((c = stream.next()) >= 0) {
 989     int size_in_bytes = compute_data_size(&stream);
 990     data_size += size_in_bytes;
 991     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 992     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 993   }
 994   int object_size = in_bytes(data_offset()) + data_size;
 995 
 996   // Add some extra DataLayout cells (at least one) to track stray traps.
 997   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
 998   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 999 
1000   // Add a cell to record information about modified arguments.
1001   int arg_size = method->size_of_parameters();
1002   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1003 
1004   // Reserve room for an area of the MDO dedicated to profiling of
1005   // parameters
1006   int args_cell = ParametersTypeData::compute_cell_count(method());
1007   if (args_cell > 0) {
1008     object_size += DataLayout::compute_size_in_bytes(args_cell);
1009   }
1010 
1011   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1012     int num_exception_handlers = method()->exception_table_length();
1013     object_size += num_exception_handlers * single_exception_handler_data_size();
1014   }
1015 
1016   return object_size;
1017 }
1018 
1019 // Compute the size of the MethodData* necessary to store
1020 // profiling information about a given method.  Size is in words
1021 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1022   int byte_size = compute_allocation_size_in_bytes(method);
1023   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1024   return align_metadata_size(word_size);
1025 }
1026 
1027 // Initialize an individual data segment.  Returns the size of
1028 // the segment in bytes.
1029 int MethodData::initialize_data(BytecodeStream* stream,
1030                                        int data_index) {
1031   int cell_count = -1;
1032   u1 tag = DataLayout::no_tag;
1033   DataLayout* data_layout = data_layout_at(data_index);
1034   Bytecodes::Code c = stream->code();
1035   switch (c) {
1036   case Bytecodes::_checkcast:
1037   case Bytecodes::_instanceof:
1038     if (TypeProfileCasts) {
1039       cell_count = ReceiverTypeData::static_cell_count();
1040       tag = DataLayout::receiver_type_data_tag;
1041     } else {
1042       cell_count = BitData::static_cell_count();
1043       tag = DataLayout::bit_data_tag;
1044     }
1045     break;
1046   case Bytecodes::_aaload:
1047     cell_count = ArrayLoadData::static_cell_count();
1048     tag = DataLayout::array_load_data_tag;
1049     break;
1050   case Bytecodes::_aastore:
1051     cell_count = ArrayStoreData::static_cell_count();
1052     tag = DataLayout::array_store_data_tag;
1053     break;
1054   case Bytecodes::_invokespecial:
1055   case Bytecodes::_invokestatic: {
1056     int counter_data_cell_count = CounterData::static_cell_count();
1057     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1058         profile_return_for_invoke(stream->method(), stream->bci())) {
1059       cell_count = CallTypeData::compute_cell_count(stream);
1060     } else {
1061       cell_count = counter_data_cell_count;
1062     }
1063     if (cell_count > counter_data_cell_count) {
1064       tag = DataLayout::call_type_data_tag;
1065     } else {
1066       tag = DataLayout::counter_data_tag;
1067     }
1068     break;
1069   }
1070   case Bytecodes::_goto:
1071   case Bytecodes::_goto_w:
1072   case Bytecodes::_jsr:
1073   case Bytecodes::_jsr_w:
1074     cell_count = JumpData::static_cell_count();
1075     tag = DataLayout::jump_data_tag;
1076     break;
1077   case Bytecodes::_invokevirtual:
1078   case Bytecodes::_invokeinterface: {
1079     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1080     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1081         profile_return_for_invoke(stream->method(), stream->bci())) {
1082       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1083     } else {
1084       cell_count = virtual_call_data_cell_count;
1085     }
1086     if (cell_count > virtual_call_data_cell_count) {
1087       tag = DataLayout::virtual_call_type_data_tag;
1088     } else {
1089       tag = DataLayout::virtual_call_data_tag;
1090     }
1091     break;
1092   }
1093   case Bytecodes::_invokedynamic: {
1094     // %%% should make a type profile for any invokedynamic that takes a ref argument
1095     int counter_data_cell_count = CounterData::static_cell_count();
1096     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1097         profile_return_for_invoke(stream->method(), stream->bci())) {
1098       cell_count = CallTypeData::compute_cell_count(stream);
1099     } else {
1100       cell_count = counter_data_cell_count;
1101     }
1102     if (cell_count > counter_data_cell_count) {
1103       tag = DataLayout::call_type_data_tag;
1104     } else {
1105       tag = DataLayout::counter_data_tag;
1106     }
1107     break;
1108   }
1109   case Bytecodes::_ret:
1110     cell_count = RetData::static_cell_count();
1111     tag = DataLayout::ret_data_tag;
1112     break;
1113   case Bytecodes::_ifeq:
1114   case Bytecodes::_ifne:
1115   case Bytecodes::_iflt:
1116   case Bytecodes::_ifge:
1117   case Bytecodes::_ifgt:
1118   case Bytecodes::_ifle:
1119   case Bytecodes::_if_icmpeq:
1120   case Bytecodes::_if_icmpne:
1121   case Bytecodes::_if_icmplt:
1122   case Bytecodes::_if_icmpge:
1123   case Bytecodes::_if_icmpgt:
1124   case Bytecodes::_if_icmple:
1125   case Bytecodes::_ifnull:
1126   case Bytecodes::_ifnonnull:
1127     cell_count = BranchData::static_cell_count();
1128     tag = DataLayout::branch_data_tag;
1129     break;
1130   case Bytecodes::_if_acmpeq:
1131   case Bytecodes::_if_acmpne:
1132     cell_count = ACmpData::static_cell_count();
1133     tag = DataLayout::acmp_data_tag;
1134     break;
1135   case Bytecodes::_lookupswitch:
1136   case Bytecodes::_tableswitch:
1137     cell_count = MultiBranchData::compute_cell_count(stream);
1138     tag = DataLayout::multi_branch_data_tag;
1139     break;
1140   default:
1141     break;
1142   }
1143   assert(tag == DataLayout::multi_branch_data_tag ||
1144          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1145           (tag == DataLayout::call_type_data_tag ||
1146            tag == DataLayout::counter_data_tag ||
1147            tag == DataLayout::virtual_call_type_data_tag ||
1148            tag == DataLayout::virtual_call_data_tag)) ||
1149          cell_count == bytecode_cell_count(c), "cell counts must agree");
1150   if (cell_count >= 0) {
1151     assert(tag != DataLayout::no_tag, "bad tag");
1152     assert(bytecode_has_profile(c), "agree w/ BHP");
1153     data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1154     return DataLayout::compute_size_in_bytes(cell_count);
1155   } else {
1156     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1157     return 0;
1158   }
1159 }
1160 
1161 // Get the data at an arbitrary (sort of) data index.
1162 ProfileData* MethodData::data_at(int data_index) const {
1163   if (out_of_bounds(data_index)) {
1164     return nullptr;
1165   }
1166   DataLayout* data_layout = data_layout_at(data_index);
1167   return data_layout->data_in();
1168 }
1169 
1170 int DataLayout::cell_count() {
1171   switch (tag()) {
1172   case DataLayout::no_tag:
1173   default:
1174     ShouldNotReachHere();
1175     return 0;
1176   case DataLayout::bit_data_tag:
1177     return BitData::static_cell_count();
1178   case DataLayout::counter_data_tag:
1179     return CounterData::static_cell_count();
1180   case DataLayout::jump_data_tag:
1181     return JumpData::static_cell_count();
1182   case DataLayout::receiver_type_data_tag:
1183     return ReceiverTypeData::static_cell_count();
1184   case DataLayout::virtual_call_data_tag:
1185     return VirtualCallData::static_cell_count();
1186   case DataLayout::ret_data_tag:
1187     return RetData::static_cell_count();
1188   case DataLayout::branch_data_tag:
1189     return BranchData::static_cell_count();
1190   case DataLayout::multi_branch_data_tag:
1191     return ((new MultiBranchData(this))->cell_count());
1192   case DataLayout::arg_info_data_tag:
1193     return ((new ArgInfoData(this))->cell_count());
1194   case DataLayout::call_type_data_tag:
1195     return ((new CallTypeData(this))->cell_count());
1196   case DataLayout::virtual_call_type_data_tag:
1197     return ((new VirtualCallTypeData(this))->cell_count());
1198   case DataLayout::parameters_type_data_tag:
1199     return ((new ParametersTypeData(this))->cell_count());
1200   case DataLayout::speculative_trap_data_tag:
1201     return SpeculativeTrapData::static_cell_count();
1202   case DataLayout::array_store_data_tag:
1203     return ((new ArrayStoreData(this))->cell_count());
1204   case DataLayout::array_load_data_tag:
1205     return ((new ArrayLoadData(this))->cell_count());
1206   case DataLayout::acmp_data_tag:
1207     return ((new ACmpData(this))->cell_count());
1208   }
1209 }
1210 ProfileData* DataLayout::data_in() {
1211   switch (tag()) {
1212   case DataLayout::no_tag:
1213   default:
1214     ShouldNotReachHere();
1215     return nullptr;
1216   case DataLayout::bit_data_tag:
1217     return new BitData(this);
1218   case DataLayout::counter_data_tag:
1219     return new CounterData(this);
1220   case DataLayout::jump_data_tag:
1221     return new JumpData(this);
1222   case DataLayout::receiver_type_data_tag:
1223     return new ReceiverTypeData(this);
1224   case DataLayout::virtual_call_data_tag:
1225     return new VirtualCallData(this);
1226   case DataLayout::ret_data_tag:
1227     return new RetData(this);
1228   case DataLayout::branch_data_tag:
1229     return new BranchData(this);
1230   case DataLayout::multi_branch_data_tag:
1231     return new MultiBranchData(this);
1232   case DataLayout::arg_info_data_tag:
1233     return new ArgInfoData(this);
1234   case DataLayout::call_type_data_tag:
1235     return new CallTypeData(this);
1236   case DataLayout::virtual_call_type_data_tag:
1237     return new VirtualCallTypeData(this);
1238   case DataLayout::parameters_type_data_tag:
1239     return new ParametersTypeData(this);
1240   case DataLayout::speculative_trap_data_tag:
1241     return new SpeculativeTrapData(this);
1242   case DataLayout::array_store_data_tag:
1243     return new ArrayStoreData(this);
1244   case DataLayout::array_load_data_tag:
1245     return new ArrayLoadData(this);
1246   case DataLayout::acmp_data_tag:
1247     return new ACmpData(this);
1248   }
1249 }
1250 
1251 // Iteration over data.
1252 ProfileData* MethodData::next_data(ProfileData* current) const {
1253   int current_index = dp_to_di(current->dp());
1254   int next_index = current_index + current->size_in_bytes();
1255   ProfileData* next = data_at(next_index);
1256   return next;
1257 }
1258 
1259 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1260   int current_index = dp_to_di((address)current);
1261   int next_index = current_index + current->size_in_bytes();
1262   if (out_of_bounds(next_index)) {
1263     return nullptr;
1264   }
1265   DataLayout* next = data_layout_at(next_index);
1266   return next;
1267 }
1268 
1269 // Give each of the data entries a chance to perform specific
1270 // data initialization.
1271 void MethodData::post_initialize(BytecodeStream* stream) {
1272   ResourceMark rm;
1273   ProfileData* data;
1274   for (data = first_data(); is_valid(data); data = next_data(data)) {
1275     stream->set_start(data->bci());
1276     stream->next();
1277     data->post_initialize(stream, this);
1278   }
1279   if (_parameters_type_data_di != no_parameters) {
1280     parameters_type_data()->post_initialize(nullptr, this);
1281   }
1282 }
1283 
1284 // Initialize the MethodData* corresponding to a given method.
1285 MethodData::MethodData(const methodHandle& method)
1286   : _method(method()),
1287     // Holds Compile_lock
1288     _extra_data_lock(Mutex::safepoint-2, "MDOExtraData_lock"),
1289     _compiler_counters(),
1290     _parameters_type_data_di(parameters_uninitialized) {
1291   initialize();
1292 }
1293 
1294 void MethodData::initialize() {
1295   Thread* thread = Thread::current();
1296   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1297   ResourceMark rm(thread);
1298 
1299   init();
1300   set_creation_mileage(mileage_of(method()));
1301 
1302   // Go through the bytecodes and allocate and initialize the
1303   // corresponding data cells.
1304   int data_size = 0;
1305   int empty_bc_count = 0;  // number of bytecodes lacking data
1306   _data[0] = 0;  // apparently not set below.
1307   BytecodeStream stream(methodHandle(thread, method()));
1308   Bytecodes::Code c;
1309   bool needs_speculative_traps = false;
1310   while ((c = stream.next()) >= 0) {
1311     int size_in_bytes = initialize_data(&stream, data_size);
1312     data_size += size_in_bytes;
1313     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1314     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1315   }
1316   _data_size = data_size;
1317   int object_size = in_bytes(data_offset()) + data_size;
1318 
1319   // Add some extra DataLayout cells (at least one) to track stray traps.
1320   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1321   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1322 
1323   // Let's zero the space for the extra data
1324   if (extra_size > 0) {
1325     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1326   }
1327 
1328   // Add a cell to record information about modified arguments.
1329   // Set up _args_modified array after traps cells so that
1330   // the code for traps cells works.
1331   DataLayout *dp = data_layout_at(data_size + extra_size);
1332 
1333   int arg_size = method()->size_of_parameters();
1334   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1335 
1336   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1337   object_size += extra_size + arg_data_size;
1338 
1339   int parms_cell = ParametersTypeData::compute_cell_count(method());
1340   // If we are profiling parameters, we reserved an area near the end
1341   // of the MDO after the slots for bytecodes (because there's no bci
1342   // for method entry so they don't fit with the framework for the
1343   // profiling of bytecodes). We store the offset within the MDO of
1344   // this area (or -1 if no parameter is profiled)
1345   int parm_data_size = 0;
1346   if (parms_cell > 0) {
1347     parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1348     object_size += parm_data_size;
1349     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1350     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1351     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1352   } else {
1353     _parameters_type_data_di = no_parameters;
1354   }
1355 
1356   _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1357   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1358     int num_exception_handlers = method()->exception_table_length();
1359     object_size += num_exception_handlers * single_exception_handler_data_size();
1360     ExceptionTableElement* exception_handlers = method()->exception_table_start();
1361     for (int i = 0; i < num_exception_handlers; i++) {
1362       DataLayout *dp = exception_handler_data_at(i);
1363       dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1364     }
1365   }
1366 
1367   // Set an initial hint. Don't use set_hint_di() because
1368   // first_di() may be out of bounds if data_size is 0.
1369   // In that situation, _hint_di is never used, but at
1370   // least well-defined.
1371   _hint_di = first_di();
1372 
1373   post_initialize(&stream);
1374 
1375   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1376   set_size(object_size);
1377 }
1378 
1379 void MethodData::init() {
1380   _compiler_counters = CompilerCounters(); // reset compiler counters
1381   _invocation_counter.init();
1382   _backedge_counter.init();
1383   _invocation_counter_start = 0;
1384   _backedge_counter_start = 0;
1385 
1386   // Set per-method invoke- and backedge mask.
1387   double scale = 1.0;
1388   methodHandle mh(Thread::current(), _method);
1389   CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale);
1390   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1391   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1392 
1393   _tenure_traps = 0;
1394   _num_loops = 0;
1395   _num_blocks = 0;
1396   _would_profile = unknown;
1397 
1398 #if INCLUDE_JVMCI
1399   _jvmci_ir_size = 0;
1400   _failed_speculations = nullptr;
1401 #endif
1402 
1403 #if INCLUDE_RTM_OPT
1404   _rtm_state = NoRTM; // No RTM lock eliding by default
1405   if (UseRTMLocking &&
1406       !CompilerOracle::has_option(mh, CompileCommand::NoRTMLockEliding)) {
1407     if (CompilerOracle::has_option(mh, CompileCommand::UseRTMLockEliding) || !UseRTMDeopt) {
1408       // Generate RTM lock eliding code without abort ratio calculation code.
1409       _rtm_state = UseRTM;
1410     } else if (UseRTMDeopt) {
1411       // Generate RTM lock eliding code and include abort ratio calculation
1412       // code if UseRTMDeopt is on.
1413       _rtm_state = ProfileRTM;
1414     }
1415   }
1416 #endif
1417 
1418   // Initialize escape flags.
1419   clear_escape_info();
1420 }
1421 
1422 // Get a measure of how much mileage the method has on it.
1423 int MethodData::mileage_of(Method* method) {
1424   return MAX2(method->invocation_count(), method->backedge_count());
1425 }
1426 
1427 bool MethodData::is_mature() const {
1428   return CompilationPolicy::is_mature(_method);
1429 }
1430 
1431 // Translate a bci to its corresponding data index (di).
1432 address MethodData::bci_to_dp(int bci) {
1433   ResourceMark rm;
1434   DataLayout* data = data_layout_before(bci);
1435   DataLayout* prev = nullptr;
1436   for ( ; is_valid(data); data = next_data_layout(data)) {
1437     if (data->bci() >= bci) {
1438       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1439       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1440       return (address)data;
1441     }
1442     prev = data;
1443   }
1444   return (address)limit_data_position();
1445 }
1446 
1447 // Translate a bci to its corresponding data, or null.
1448 ProfileData* MethodData::bci_to_data(int bci) {
1449   DataLayout* data = data_layout_before(bci);
1450   for ( ; is_valid(data); data = next_data_layout(data)) {
1451     if (data->bci() == bci) {
1452       set_hint_di(dp_to_di((address)data));
1453       return data->data_in();
1454     } else if (data->bci() > bci) {
1455       break;
1456     }
1457   }
1458   return bci_to_extra_data(bci, nullptr, false);
1459 }
1460 
1461 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1462   assert(ProfileExceptionHandlers, "not profiling");
1463   for (int i = 0; i < num_exception_handler_data(); i++) {
1464     DataLayout* exception_handler_data = exception_handler_data_at(i);
1465     if (exception_handler_data->bci() == bci) {
1466       return exception_handler_data;
1467     }
1468   }
1469   return nullptr;
1470 }
1471 
1472 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1473   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1474   return data != nullptr ? new BitData(data) : nullptr;
1475 }
1476 
1477 BitData MethodData::exception_handler_bci_to_data(int bci) {
1478   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1479   assert(data != nullptr, "invalid bci");
1480   return BitData(data);
1481 }
1482 
1483 DataLayout* MethodData::next_extra(DataLayout* dp) {
1484   int nb_cells = 0;
1485   switch(dp->tag()) {
1486   case DataLayout::bit_data_tag:
1487   case DataLayout::no_tag:
1488     nb_cells = BitData::static_cell_count();
1489     break;
1490   case DataLayout::speculative_trap_data_tag:
1491     nb_cells = SpeculativeTrapData::static_cell_count();
1492     break;
1493   default:
1494     fatal("unexpected tag %d", dp->tag());
1495   }
1496   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1497 }
1498 
1499 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
1500   DataLayout* end = args_data_limit();
1501 
1502   for (;; dp = next_extra(dp)) {
1503     assert(dp < end, "moved past end of extra data");
1504     // No need for "Atomic::load_acquire" ops,
1505     // since the data structure is monotonic.
1506     switch(dp->tag()) {
1507     case DataLayout::no_tag:
1508       return nullptr;
1509     case DataLayout::arg_info_data_tag:
1510       dp = end;
1511       return nullptr; // ArgInfoData is at the end of extra data section.
1512     case DataLayout::bit_data_tag:
1513       if (m == nullptr && dp->bci() == bci) {
1514         return new BitData(dp);
1515       }
1516       break;
1517     case DataLayout::speculative_trap_data_tag:
1518       if (m != nullptr) {
1519         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1520         // data->method() may be null in case of a concurrent
1521         // allocation. Maybe it's for the same method. Try to use that
1522         // entry in that case.
1523         if (dp->bci() == bci) {
1524           if (data->method() == nullptr) {
1525             assert(concurrent, "impossible because no concurrent allocation");
1526             return nullptr;
1527           } else if (data->method() == m) {
1528             return data;
1529           }
1530         }
1531       }
1532       break;
1533     default:
1534       fatal("unexpected tag %d", dp->tag());
1535     }
1536   }
1537   return nullptr;
1538 }
1539 
1540 
1541 // Translate a bci to its corresponding extra data, or null.
1542 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1543   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1544   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1545          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1546          "code needs to be adjusted");
1547 
1548   // Do not create one of these if method has been redefined.
1549   if (m != nullptr && m->is_old()) {
1550     return nullptr;
1551   }
1552 
1553   DataLayout* dp  = extra_data_base();
1554   DataLayout* end = args_data_limit();
1555 
1556   // Allocation in the extra data space has to be atomic because not
1557   // all entries have the same size and non atomic concurrent
1558   // allocation would result in a corrupted extra data space.
1559   ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
1560   if (result != nullptr) {
1561     return result;
1562   }
1563 
1564   if (create_if_missing && dp < end) {
1565     MutexLocker ml(&_extra_data_lock);
1566     // Check again now that we have the lock. Another thread may
1567     // have added extra data entries.
1568     ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
1569     if (result != nullptr || dp >= end) {
1570       return result;
1571     }
1572 
1573     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1574     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1575     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1576     // SpeculativeTrapData is 2 slots. Make sure we have room.
1577     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1578       return nullptr;
1579     }
1580     DataLayout temp;
1581     temp.initialize(tag, checked_cast<u2>(bci), 0);
1582 
1583     dp->set_header(temp.header());
1584     assert(dp->tag() == tag, "sane");
1585     assert(dp->bci() == bci, "no concurrent allocation");
1586     if (tag == DataLayout::bit_data_tag) {
1587       return new BitData(dp);
1588     } else {
1589       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1590       data->set_method(m);
1591       return data;
1592     }
1593   }
1594   return nullptr;
1595 }
1596 
1597 ArgInfoData *MethodData::arg_info() {
1598   DataLayout* dp    = extra_data_base();
1599   DataLayout* end   = args_data_limit();
1600   for (; dp < end; dp = next_extra(dp)) {
1601     if (dp->tag() == DataLayout::arg_info_data_tag)
1602       return new ArgInfoData(dp);
1603   }
1604   return nullptr;
1605 }
1606 
1607 // Printing
1608 
1609 void MethodData::print_on(outputStream* st) const {
1610   assert(is_methodData(), "should be method data");
1611   st->print("method data for ");
1612   method()->print_value_on(st);
1613   st->cr();
1614   print_data_on(st);
1615 }
1616 
1617 void MethodData::print_value_on(outputStream* st) const {
1618   assert(is_methodData(), "should be method data");
1619   st->print("method data for ");
1620   method()->print_value_on(st);
1621 }
1622 
1623 void MethodData::print_data_on(outputStream* st) const {
1624   ResourceMark rm;
1625   ProfileData* data = first_data();
1626   if (_parameters_type_data_di != no_parameters) {
1627     parameters_type_data()->print_data_on(st);
1628   }
1629   for ( ; is_valid(data); data = next_data(data)) {
1630     st->print("%d", dp_to_di(data->dp()));
1631     st->fill_to(6);
1632     data->print_data_on(st, this);
1633   }
1634   st->print_cr("--- Extra data:");
1635   DataLayout* dp    = extra_data_base();
1636   DataLayout* end   = args_data_limit();
1637   for (;; dp = next_extra(dp)) {
1638     assert(dp < end, "moved past end of extra data");
1639     // No need for "Atomic::load_acquire" ops,
1640     // since the data structure is monotonic.
1641     switch(dp->tag()) {
1642     case DataLayout::no_tag:
1643       continue;
1644     case DataLayout::bit_data_tag:
1645       data = new BitData(dp);
1646       break;
1647     case DataLayout::speculative_trap_data_tag:
1648       data = new SpeculativeTrapData(dp);
1649       break;
1650     case DataLayout::arg_info_data_tag:
1651       data = new ArgInfoData(dp);
1652       dp = end; // ArgInfoData is at the end of extra data section.
1653       break;
1654     default:
1655       fatal("unexpected tag %d", dp->tag());
1656     }
1657     st->print("%d", dp_to_di(data->dp()));
1658     st->fill_to(6);
1659     data->print_data_on(st);
1660     if (dp >= end) return;
1661   }
1662 }
1663 
1664 // Verification
1665 
1666 void MethodData::verify_on(outputStream* st) {
1667   guarantee(is_methodData(), "object must be method data");
1668   // guarantee(m->is_perm(), "should be in permspace");
1669   this->verify_data_on(st);
1670 }
1671 
1672 void MethodData::verify_data_on(outputStream* st) {
1673   NEEDS_CLEANUP;
1674   // not yet implemented.
1675 }
1676 
1677 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1678   if (m->is_compiled_lambda_form()) {
1679     return true;
1680   }
1681 
1682   Bytecode_invoke inv(m , bci);
1683   return inv.is_invokedynamic() || inv.is_invokehandle();
1684 }
1685 
1686 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1687   Bytecode_invoke inv(m , bci);
1688   if (inv.is_invokevirtual()) {
1689     Symbol* klass = inv.klass();
1690     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1691         klass == vmSymbols::sun_misc_Unsafe() ||
1692         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1693       Symbol* name = inv.name();
1694       if (name->starts_with("get") || name->starts_with("put")) {
1695         return true;
1696       }
1697     }
1698   }
1699   return false;
1700 }
1701 
1702 int MethodData::profile_arguments_flag() {
1703   return TypeProfileLevel % 10;
1704 }
1705 
1706 bool MethodData::profile_arguments() {
1707   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1708 }
1709 
1710 bool MethodData::profile_arguments_jsr292_only() {
1711   return profile_arguments_flag() == type_profile_jsr292;
1712 }
1713 
1714 bool MethodData::profile_all_arguments() {
1715   return profile_arguments_flag() == type_profile_all;
1716 }
1717 
1718 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1719   if (!profile_arguments()) {
1720     return false;
1721   }
1722 
1723   if (profile_all_arguments()) {
1724     return true;
1725   }
1726 
1727   if (profile_unsafe(m, bci)) {
1728     return true;
1729   }
1730 
1731   assert(profile_arguments_jsr292_only(), "inconsistent");
1732   return profile_jsr292(m, bci);
1733 }
1734 
1735 int MethodData::profile_return_flag() {
1736   return (TypeProfileLevel % 100) / 10;
1737 }
1738 
1739 bool MethodData::profile_return() {
1740   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1741 }
1742 
1743 bool MethodData::profile_return_jsr292_only() {
1744   return profile_return_flag() == type_profile_jsr292;
1745 }
1746 
1747 bool MethodData::profile_all_return() {
1748   return profile_return_flag() == type_profile_all;
1749 }
1750 
1751 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1752   if (!profile_return()) {
1753     return false;
1754   }
1755 
1756   if (profile_all_return()) {
1757     return true;
1758   }
1759 
1760   assert(profile_return_jsr292_only(), "inconsistent");
1761   return profile_jsr292(m, bci);
1762 }
1763 
1764 int MethodData::profile_parameters_flag() {
1765   return TypeProfileLevel / 100;
1766 }
1767 
1768 bool MethodData::profile_parameters() {
1769   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1770 }
1771 
1772 bool MethodData::profile_parameters_jsr292_only() {
1773   return profile_parameters_flag() == type_profile_jsr292;
1774 }
1775 
1776 bool MethodData::profile_all_parameters() {
1777   return profile_parameters_flag() == type_profile_all;
1778 }
1779 
1780 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1781   if (!profile_parameters()) {
1782     return false;
1783   }
1784 
1785   if (profile_all_parameters()) {
1786     return true;
1787   }
1788 
1789   assert(profile_parameters_jsr292_only(), "inconsistent");
1790   return m->is_compiled_lambda_form();
1791 }
1792 
1793 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1794   log_trace(cds)("Iter(MethodData): %p", this);
1795   it->push(&_method);
1796 }
1797 
1798 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1799   if (shift == 0) {
1800     return;
1801   }
1802   if (!reset) {
1803     // Move all cells of trap entry at dp left by "shift" cells
1804     intptr_t* start = (intptr_t*)dp;
1805     intptr_t* end = (intptr_t*)next_extra(dp);
1806     for (intptr_t* ptr = start; ptr < end; ptr++) {
1807       *(ptr-shift) = *ptr;
1808     }
1809   } else {
1810     // Reset "shift" cells stopping at dp
1811     intptr_t* start = ((intptr_t*)dp) - shift;
1812     intptr_t* end = (intptr_t*)dp;
1813     for (intptr_t* ptr = start; ptr < end; ptr++) {
1814       *ptr = 0;
1815     }
1816   }
1817 }
1818 
1819 // Check for entries that reference an unloaded method
1820 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1821   bool _always_clean;
1822 public:
1823   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1824   bool is_live(Method* m) {
1825     return !(_always_clean) && m->method_holder()->is_loader_alive();
1826   }
1827 };
1828 
1829 // Check for entries that reference a redefined method
1830 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1831 public:
1832   CleanExtraDataMethodClosure() {}
1833   bool is_live(Method* m) { return !m->is_old(); }
1834 };
1835 
1836 
1837 // Remove SpeculativeTrapData entries that reference an unloaded or
1838 // redefined method
1839 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1840   DataLayout* dp  = extra_data_base();
1841   DataLayout* end = args_data_limit();
1842 
1843   int shift = 0;
1844   for (; dp < end; dp = next_extra(dp)) {
1845     switch(dp->tag()) {
1846     case DataLayout::speculative_trap_data_tag: {
1847       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1848       Method* m = data->method();
1849       assert(m != nullptr, "should have a method");
1850       if (!cl->is_live(m)) {
1851         // "shift" accumulates the number of cells for dead
1852         // SpeculativeTrapData entries that have been seen so
1853         // far. Following entries must be shifted left by that many
1854         // cells to remove the dead SpeculativeTrapData entries.
1855         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1856       } else {
1857         // Shift this entry left if it follows dead
1858         // SpeculativeTrapData entries
1859         clean_extra_data_helper(dp, shift);
1860       }
1861       break;
1862     }
1863     case DataLayout::bit_data_tag:
1864       // Shift this entry left if it follows dead SpeculativeTrapData
1865       // entries
1866       clean_extra_data_helper(dp, shift);
1867       continue;
1868     case DataLayout::no_tag:
1869     case DataLayout::arg_info_data_tag:
1870       // We are at end of the live trap entries. The previous "shift"
1871       // cells contain entries that are either dead or were shifted
1872       // left. They need to be reset to no_tag
1873       clean_extra_data_helper(dp, shift, true);
1874       return;
1875     default:
1876       fatal("unexpected tag %d", dp->tag());
1877     }
1878   }
1879 }
1880 
1881 // Verify there's no unloaded or redefined method referenced by a
1882 // SpeculativeTrapData entry
1883 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1884 #ifdef ASSERT
1885   DataLayout* dp  = extra_data_base();
1886   DataLayout* end = args_data_limit();
1887 
1888   for (; dp < end; dp = next_extra(dp)) {
1889     switch(dp->tag()) {
1890     case DataLayout::speculative_trap_data_tag: {
1891       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1892       Method* m = data->method();
1893       assert(m != nullptr && cl->is_live(m), "Method should exist");
1894       break;
1895     }
1896     case DataLayout::bit_data_tag:
1897       continue;
1898     case DataLayout::no_tag:
1899     case DataLayout::arg_info_data_tag:
1900       return;
1901     default:
1902       fatal("unexpected tag %d", dp->tag());
1903     }
1904   }
1905 #endif
1906 }
1907 
1908 void MethodData::clean_method_data(bool always_clean) {
1909   ResourceMark rm;
1910   for (ProfileData* data = first_data();
1911        is_valid(data);
1912        data = next_data(data)) {
1913     data->clean_weak_klass_links(always_clean);
1914   }
1915   ParametersTypeData* parameters = parameters_type_data();
1916   if (parameters != nullptr) {
1917     parameters->clean_weak_klass_links(always_clean);
1918   }
1919 
1920   CleanExtraDataKlassClosure cl(always_clean);
1921   clean_extra_data(&cl);
1922   verify_extra_data_clean(&cl);
1923 }
1924 
1925 // This is called during redefinition to clean all "old" redefined
1926 // methods out of MethodData for all methods.
1927 void MethodData::clean_weak_method_links() {
1928   ResourceMark rm;
1929   CleanExtraDataMethodClosure cl;
1930   clean_extra_data(&cl);
1931   verify_extra_data_clean(&cl);
1932 }
1933 
1934 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1935   release_C_heap_structures();
1936 }
1937 
1938 void MethodData::release_C_heap_structures() {
1939 #if INCLUDE_JVMCI
1940   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1941 #endif
1942 }