1 /*
   2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compilationPolicy.hpp"
  29 #include "compiler/compilerDefinitions.inline.hpp"
  30 #include "compiler/compilerOracle.hpp"
  31 #include "interpreter/bytecode.hpp"
  32 #include "interpreter/bytecodeStream.hpp"
  33 #include "interpreter/linkResolver.hpp"
  34 #include "memory/metaspaceClosure.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "oops/klass.inline.hpp"
  37 #include "oops/methodData.inline.hpp"
  38 #include "prims/jvmtiRedefineClasses.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "utilities/align.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 // ==================================================================
  49 // DataLayout
  50 //
  51 // Overlay for generic profiling data.
  52 
  53 // Some types of data layouts need a length field.
  54 bool DataLayout::needs_array_len(u1 tag) {
  55   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  56 }
  57 
  58 // Perform generic initialization of the data.  More specific
  59 // initialization occurs in overrides of ProfileData::post_initialize.
  60 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  61   _header._bits = (intptr_t)0;
  62   _header._struct._tag = tag;
  63   _header._struct._bci = bci;
  64   for (int i = 0; i < cell_count; i++) {
  65     set_cell_at(i, (intptr_t)0);
  66   }
  67   if (needs_array_len(tag)) {
  68     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  69   }
  70   if (tag == call_type_data_tag) {
  71     CallTypeData::initialize(this, cell_count);
  72   } else if (tag == virtual_call_type_data_tag) {
  73     VirtualCallTypeData::initialize(this, cell_count);
  74   }
  75 }
  76 
  77 void DataLayout::clean_weak_klass_links(bool always_clean) {
  78   ResourceMark m;
  79   data_in()->clean_weak_klass_links(always_clean);
  80 }
  81 
  82 
  83 // ==================================================================
  84 // ProfileData
  85 //
  86 // A ProfileData object is created to refer to a section of profiling
  87 // data in a structured way.
  88 
  89 // Constructor for invalid ProfileData.
  90 ProfileData::ProfileData() {
  91   _data = nullptr;
  92 }
  93 
  94 char* ProfileData::print_data_on_helper(const MethodData* md) const {
  95   DataLayout* dp  = md->extra_data_base();
  96   DataLayout* end = md->args_data_limit();
  97   stringStream ss;
  98   for (;; dp = MethodData::next_extra(dp)) {
  99     assert(dp < end, "moved past end of extra data");
 100     switch(dp->tag()) {
 101     case DataLayout::speculative_trap_data_tag:
 102       if (dp->bci() == bci()) {
 103         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 104         int trap = data->trap_state();
 105         char buf[100];
 106         ss.print("trap/");
 107         data->method()->print_short_name(&ss);
 108         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 109       }
 110       break;
 111     case DataLayout::bit_data_tag:
 112       break;
 113     case DataLayout::no_tag:
 114     case DataLayout::arg_info_data_tag:
 115       return ss.as_string();
 116       break;
 117     default:
 118       fatal("unexpected tag %d", dp->tag());
 119     }
 120   }
 121   return nullptr;
 122 }
 123 
 124 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 125   print_data_on(st, print_data_on_helper(md));
 126 }
 127 
 128 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 129   st->print("bci: %d ", bci());
 130   st->fill_to(tab_width_one + 1);
 131   st->print("%s", name);
 132   tab(st);
 133   int trap = trap_state();
 134   if (trap != 0) {
 135     char buf[100];
 136     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 137   }
 138   if (extra != nullptr) {
 139     st->print("%s", extra);
 140   }
 141   int flags = data()->flags();
 142   if (flags != 0) {
 143     st->print("flags(%d) %p/%d", flags, data(), in_bytes(DataLayout::flags_offset()));
 144   }
 145 }
 146 
 147 void ProfileData::tab(outputStream* st, bool first) const {
 148   st->fill_to(first ? tab_width_one : tab_width_two);
 149 }
 150 
 151 // ==================================================================
 152 // BitData
 153 //
 154 // A BitData corresponds to a one-bit flag.  This is used to indicate
 155 // whether a checkcast bytecode has seen a null value.
 156 
 157 
 158 void BitData::print_data_on(outputStream* st, const char* extra) const {
 159   print_shared(st, "BitData", extra);
 160   st->cr();
 161 }
 162 
 163 // ==================================================================
 164 // CounterData
 165 //
 166 // A CounterData corresponds to a simple counter.
 167 
 168 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 169   print_shared(st, "CounterData", extra);
 170   st->print_cr("count(%u)", count());
 171 }
 172 
 173 // ==================================================================
 174 // JumpData
 175 //
 176 // A JumpData is used to access profiling information for a direct
 177 // branch.  It is a counter, used for counting the number of branches,
 178 // plus a data displacement, used for realigning the data pointer to
 179 // the corresponding target bci.
 180 
 181 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 182   assert(stream->bci() == bci(), "wrong pos");
 183   int target;
 184   Bytecodes::Code c = stream->code();
 185   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 186     target = stream->dest_w();
 187   } else {
 188     target = stream->dest();
 189   }
 190   int my_di = mdo->dp_to_di(dp());
 191   int target_di = mdo->bci_to_di(target);
 192   int offset = target_di - my_di;
 193   set_displacement(offset);
 194 }
 195 
 196 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 197   print_shared(st, "JumpData", extra);
 198   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 199 }
 200 
 201 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 202   // Parameter profiling include the receiver
 203   int args_count = include_receiver ? 1 : 0;
 204   ResourceMark rm;
 205   ReferenceArgumentCount rac(signature);
 206   args_count += rac.count();
 207   args_count = MIN2(args_count, max);
 208   return args_count * per_arg_cell_count;
 209 }
 210 
 211 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 212   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 213   assert(TypeStackSlotEntries::per_arg_count() > SingleTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 214   const methodHandle m = stream->method();
 215   int bci = stream->bci();
 216   Bytecode_invoke inv(m, bci);
 217   int args_cell = 0;
 218   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 219     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 220   }
 221   int ret_cell = 0;
 222   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 223     ret_cell = SingleTypeEntry::static_cell_count();
 224   }
 225   int header_cell = 0;
 226   if (args_cell + ret_cell > 0) {
 227     header_cell = header_cell_count();
 228   }
 229 
 230   return header_cell + args_cell + ret_cell;
 231 }
 232 
 233 class ArgumentOffsetComputer : public SignatureIterator {
 234 private:
 235   int _max;
 236   int _offset;
 237   GrowableArray<int> _offsets;
 238 
 239   friend class SignatureIterator;  // so do_parameters_on can call do_type
 240   void do_type(BasicType type) {
 241     if (is_reference_type(type) && _offsets.length() < _max) {
 242       _offsets.push(_offset);
 243     }
 244     _offset += parameter_type_word_count(type);
 245   }
 246 
 247  public:
 248   ArgumentOffsetComputer(Symbol* signature, int max)
 249     : SignatureIterator(signature),
 250       _max(max), _offset(0),
 251       _offsets(max) {
 252     do_parameters_on(this);  // non-virtual template execution
 253   }
 254 
 255   int off_at(int i) const { return _offsets.at(i); }
 256 };
 257 
 258 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 259   ResourceMark rm;
 260   int start = 0;
 261   // Parameter profiling include the receiver
 262   if (include_receiver && has_receiver) {
 263     set_stack_slot(0, 0);
 264     set_type(0, type_none());
 265     start += 1;
 266   }
 267   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 268   for (int i = start; i < _number_of_entries; i++) {
 269     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 270     set_type(i, type_none());
 271   }
 272 }
 273 
 274 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 275   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 276   Bytecode_invoke inv(stream->method(), stream->bci());
 277 
 278   if (has_arguments()) {
 279 #ifdef ASSERT
 280     ResourceMark rm;
 281     ReferenceArgumentCount rac(inv.signature());
 282     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 283     assert(count > 0, "room for args type but none found?");
 284     check_number_of_arguments(count);
 285 #endif
 286     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 287   }
 288 
 289   if (has_return()) {
 290     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 291     _ret.post_initialize();
 292   }
 293 }
 294 
 295 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 296   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 297   Bytecode_invoke inv(stream->method(), stream->bci());
 298 
 299   if (has_arguments()) {
 300 #ifdef ASSERT
 301     ResourceMark rm;
 302     ReferenceArgumentCount rac(inv.signature());
 303     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 304     assert(count > 0, "room for args type but none found?");
 305     check_number_of_arguments(count);
 306 #endif
 307     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 308   }
 309 
 310   if (has_return()) {
 311     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 312     _ret.post_initialize();
 313   }
 314 }
 315 
 316 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 317   for (int i = 0; i < _number_of_entries; i++) {
 318     intptr_t p = type(i);
 319     Klass* k = (Klass*)klass_part(p);
 320     if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 321       set_type(i, with_status((Klass*)nullptr, p));
 322     }
 323   }
 324 }
 325 
 326 void SingleTypeEntry::clean_weak_klass_links(bool always_clean) {
 327   intptr_t p = type();
 328   Klass* k = (Klass*)klass_part(p);
 329   if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 330     set_type(with_status((Klass*)nullptr, p));
 331   }
 332 }
 333 
 334 bool TypeEntriesAtCall::return_profiling_enabled() {
 335   return MethodData::profile_return();
 336 }
 337 
 338 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 339   return MethodData::profile_arguments();
 340 }
 341 
 342 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 343   if (is_type_none(k)) {
 344     st->print("none");
 345   } else if (is_type_unknown(k)) {
 346     st->print("unknown");
 347   } else {
 348     valid_klass(k)->print_value_on(st);
 349   }
 350   if (was_null_seen(k)) {
 351     st->print(" (null seen)");
 352   }
 353 }
 354 
 355 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 356   for (int i = 0; i < _number_of_entries; i++) {
 357     _pd->tab(st);
 358     st->print("%d: stack(%u) ", i, stack_slot(i));
 359     print_klass(st, type(i));
 360     st->cr();
 361   }
 362 }
 363 
 364 void SingleTypeEntry::print_data_on(outputStream* st) const {
 365   _pd->tab(st);
 366   print_klass(st, type());
 367   st->cr();
 368 }
 369 
 370 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 371   CounterData::print_data_on(st, extra);
 372   if (has_arguments()) {
 373     tab(st, true);
 374     st->print("argument types");
 375     _args.print_data_on(st);
 376   }
 377   if (has_return()) {
 378     tab(st, true);
 379     st->print("return type");
 380     _ret.print_data_on(st);
 381   }
 382 }
 383 
 384 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 385   VirtualCallData::print_data_on(st, extra);
 386   if (has_arguments()) {
 387     tab(st, true);
 388     st->print("argument types");
 389     _args.print_data_on(st);
 390   }
 391   if (has_return()) {
 392     tab(st, true);
 393     st->print("return type");
 394     _ret.print_data_on(st);
 395   }
 396 }
 397 
 398 // ==================================================================
 399 // ReceiverTypeData
 400 //
 401 // A ReceiverTypeData is used to access profiling information about a
 402 // dynamic type check.  It consists of a counter which counts the total times
 403 // that the check is reached, and a series of (Klass*, count) pairs
 404 // which are used to store a type profile for the receiver of the check.
 405 
 406 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 407     for (uint row = 0; row < row_limit(); row++) {
 408     Klass* p = receiver(row);
 409     if (p != nullptr && (always_clean || !p->is_loader_alive())) {
 410       clear_row(row);
 411     }
 412   }
 413 }
 414 
 415 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 416   uint row;
 417   int entries = 0;
 418   for (row = 0; row < row_limit(); row++) {
 419     if (receiver(row) != nullptr)  entries++;
 420   }
 421 #if INCLUDE_JVMCI
 422   st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries);
 423 #else
 424   st->print_cr("count(%u) entries(%u)", count(), entries);
 425 #endif
 426   int total = count();
 427   for (row = 0; row < row_limit(); row++) {
 428     if (receiver(row) != nullptr) {
 429       total += receiver_count(row);
 430     }
 431   }
 432   for (row = 0; row < row_limit(); row++) {
 433     if (receiver(row) != nullptr) {
 434       tab(st);
 435       receiver(row)->print_value_on(st);
 436       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 437     }
 438   }
 439 }
 440 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 441   print_shared(st, "ReceiverTypeData", extra);
 442   print_receiver_data_on(st);
 443 }
 444 
 445 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 446   print_shared(st, "VirtualCallData", extra);
 447   print_receiver_data_on(st);
 448 }
 449 
 450 // ==================================================================
 451 // RetData
 452 //
 453 // A RetData is used to access profiling information for a ret bytecode.
 454 // It is composed of a count of the number of times that the ret has
 455 // been executed, followed by a series of triples of the form
 456 // (bci, count, di) which count the number of times that some bci was the
 457 // target of the ret and cache a corresponding displacement.
 458 
 459 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 460   for (uint row = 0; row < row_limit(); row++) {
 461     set_bci_displacement(row, -1);
 462     set_bci(row, no_bci);
 463   }
 464   // release so other threads see a consistent state.  bci is used as
 465   // a valid flag for bci_displacement.
 466   OrderAccess::release();
 467 }
 468 
 469 // This routine needs to atomically update the RetData structure, so the
 470 // caller needs to hold the RetData_lock before it gets here.  Since taking
 471 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 472 // wrapper around a derived oop, taking the lock in _this_ method will
 473 // basically cause the 'this' pointer's _data field to contain junk after the
 474 // lock.  We require the caller to take the lock before making the ProfileData
 475 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 476 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 477   // First find the mdp which corresponds to the return bci.
 478   address mdp = h_mdo->bci_to_dp(return_bci);
 479 
 480   // Now check to see if any of the cache slots are open.
 481   for (uint row = 0; row < row_limit(); row++) {
 482     if (bci(row) == no_bci) {
 483       set_bci_displacement(row, mdp - dp());
 484       set_bci_count(row, DataLayout::counter_increment);
 485       // Barrier to ensure displacement is written before the bci; allows
 486       // the interpreter to read displacement without fear of race condition.
 487       release_set_bci(row, return_bci);
 488       break;
 489     }
 490   }
 491   return mdp;
 492 }
 493 
 494 void RetData::print_data_on(outputStream* st, const char* extra) const {
 495   print_shared(st, "RetData", extra);
 496   uint row;
 497   int entries = 0;
 498   for (row = 0; row < row_limit(); row++) {
 499     if (bci(row) != no_bci)  entries++;
 500   }
 501   st->print_cr("count(%u) entries(%u)", count(), entries);
 502   for (row = 0; row < row_limit(); row++) {
 503     if (bci(row) != no_bci) {
 504       tab(st);
 505       st->print_cr("bci(%d: count(%u) displacement(%d))",
 506                    bci(row), bci_count(row), bci_displacement(row));
 507     }
 508   }
 509 }
 510 
 511 // ==================================================================
 512 // BranchData
 513 //
 514 // A BranchData is used to access profiling data for a two-way branch.
 515 // It consists of taken and not_taken counts as well as a data displacement
 516 // for the taken case.
 517 
 518 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 519   assert(stream->bci() == bci(), "wrong pos");
 520   int target = stream->dest();
 521   int my_di = mdo->dp_to_di(dp());
 522   int target_di = mdo->bci_to_di(target);
 523   int offset = target_di - my_di;
 524   set_displacement(offset);
 525 }
 526 
 527 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 528   print_shared(st, "BranchData", extra);
 529   if (data()->flags()) {
 530     tty->cr();
 531     tab(st);
 532   }
 533   st->print_cr("taken(%u) displacement(%d)",
 534                taken(), displacement());
 535   tab(st);
 536   st->print_cr("not taken(%u)", not_taken());
 537 }
 538 
 539 // ==================================================================
 540 // MultiBranchData
 541 //
 542 // A MultiBranchData is used to access profiling information for
 543 // a multi-way branch (*switch bytecodes).  It consists of a series
 544 // of (count, displacement) pairs, which count the number of times each
 545 // case was taken and specify the data displacement for each branch target.
 546 
 547 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 548   int cell_count = 0;
 549   if (stream->code() == Bytecodes::_tableswitch) {
 550     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 551     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 552   } else {
 553     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 554     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 555   }
 556   return cell_count;
 557 }
 558 
 559 void MultiBranchData::post_initialize(BytecodeStream* stream,
 560                                       MethodData* mdo) {
 561   assert(stream->bci() == bci(), "wrong pos");
 562   int target;
 563   int my_di;
 564   int target_di;
 565   int offset;
 566   if (stream->code() == Bytecodes::_tableswitch) {
 567     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 568     int len = sw.length();
 569     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 570     for (int count = 0; count < len; count++) {
 571       target = sw.dest_offset_at(count) + bci();
 572       my_di = mdo->dp_to_di(dp());
 573       target_di = mdo->bci_to_di(target);
 574       offset = target_di - my_di;
 575       set_displacement_at(count, offset);
 576     }
 577     target = sw.default_offset() + bci();
 578     my_di = mdo->dp_to_di(dp());
 579     target_di = mdo->bci_to_di(target);
 580     offset = target_di - my_di;
 581     set_default_displacement(offset);
 582 
 583   } else {
 584     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 585     int npairs = sw.number_of_pairs();
 586     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 587     for (int count = 0; count < npairs; count++) {
 588       LookupswitchPair pair = sw.pair_at(count);
 589       target = pair.offset() + bci();
 590       my_di = mdo->dp_to_di(dp());
 591       target_di = mdo->bci_to_di(target);
 592       offset = target_di - my_di;
 593       set_displacement_at(count, offset);
 594     }
 595     target = sw.default_offset() + bci();
 596     my_di = mdo->dp_to_di(dp());
 597     target_di = mdo->bci_to_di(target);
 598     offset = target_di - my_di;
 599     set_default_displacement(offset);
 600   }
 601 }
 602 
 603 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 604   print_shared(st, "MultiBranchData", extra);
 605   st->print_cr("default_count(%u) displacement(%d)",
 606                default_count(), default_displacement());
 607   int cases = number_of_cases();
 608   for (int i = 0; i < cases; i++) {
 609     tab(st);
 610     st->print_cr("count(%u) displacement(%d)",
 611                  count_at(i), displacement_at(i));
 612   }
 613 }
 614 
 615 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 616   print_shared(st, "ArgInfoData", extra);
 617   int nargs = number_of_args();
 618   for (int i = 0; i < nargs; i++) {
 619     st->print("  0x%x", arg_modified(i));
 620   }
 621   st->cr();
 622 }
 623 
 624 int ParametersTypeData::compute_cell_count(Method* m) {
 625   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 626     return 0;
 627   }
 628   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 629   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 630   if (obj_args > 0) {
 631     return obj_args + 1; // 1 cell for array len
 632   }
 633   return 0;
 634 }
 635 
 636 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 637   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 638 }
 639 
 640 bool ParametersTypeData::profiling_enabled() {
 641   return MethodData::profile_parameters();
 642 }
 643 
 644 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 645   print_shared(st, "ParametersTypeData", extra);
 646   tab(st);
 647   _parameters.print_data_on(st);
 648   st->cr();
 649 }
 650 
 651 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 652   print_shared(st, "SpeculativeTrapData", extra);
 653   tab(st);
 654   method()->print_short_name(st);
 655   st->cr();
 656 }
 657 
 658 void ArrayLoadStoreData::print_data_on(outputStream* st, const char* extra) const {
 659   print_shared(st, "ArrayLoadStore", extra);
 660   st->cr();
 661   tab(st, true);
 662   st->print("array");
 663   _array.print_data_on(st);
 664   tab(st, true);
 665   st->print("element");
 666   _element.print_data_on(st);
 667 }
 668 
 669 void ACmpData::print_data_on(outputStream* st, const char* extra) const {
 670   BranchData::print_data_on(st, extra);
 671   tab(st, true);
 672   st->print("left");
 673   _left.print_data_on(st);
 674   tab(st, true);
 675   st->print("right");
 676   _right.print_data_on(st);
 677 }
 678 
 679 // ==================================================================
 680 // MethodData*
 681 //
 682 // A MethodData* holds information which has been collected about
 683 // a method.
 684 
 685 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 686   assert(!THREAD->owns_locks(), "Should not own any locks");
 687   int size = MethodData::compute_allocation_size_in_words(method);
 688 
 689   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 690     MethodData(method);
 691 }
 692 
 693 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 694   if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) {
 695     return no_profile_data;
 696   }
 697   switch (code) {
 698   case Bytecodes::_checkcast:
 699   case Bytecodes::_instanceof:
 700     if (TypeProfileCasts) {
 701       return ReceiverTypeData::static_cell_count();
 702     } else {
 703       return BitData::static_cell_count();
 704     }
 705   case Bytecodes::_aaload:
 706   case Bytecodes::_aastore:
 707     return ArrayLoadStoreData::static_cell_count();
 708   case Bytecodes::_invokespecial:
 709   case Bytecodes::_invokestatic:
 710     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 711       return variable_cell_count;
 712     } else {
 713       return CounterData::static_cell_count();
 714     }
 715   case Bytecodes::_goto:
 716   case Bytecodes::_goto_w:
 717   case Bytecodes::_jsr:
 718   case Bytecodes::_jsr_w:
 719     return JumpData::static_cell_count();
 720   case Bytecodes::_invokevirtual:
 721   case Bytecodes::_invokeinterface:
 722     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 723       return variable_cell_count;
 724     } else {
 725       return VirtualCallData::static_cell_count();
 726     }
 727   case Bytecodes::_invokedynamic:
 728     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 729       return variable_cell_count;
 730     } else {
 731       return CounterData::static_cell_count();
 732     }
 733   case Bytecodes::_ret:
 734     return RetData::static_cell_count();
 735   case Bytecodes::_ifeq:
 736   case Bytecodes::_ifne:
 737   case Bytecodes::_iflt:
 738   case Bytecodes::_ifge:
 739   case Bytecodes::_ifgt:
 740   case Bytecodes::_ifle:
 741   case Bytecodes::_if_icmpeq:
 742   case Bytecodes::_if_icmpne:
 743   case Bytecodes::_if_icmplt:
 744   case Bytecodes::_if_icmpge:
 745   case Bytecodes::_if_icmpgt:
 746   case Bytecodes::_if_icmple:
 747   case Bytecodes::_ifnull:
 748   case Bytecodes::_ifnonnull:
 749     return BranchData::static_cell_count();
 750   case Bytecodes::_if_acmpne:
 751   case Bytecodes::_if_acmpeq:
 752     return ACmpData::static_cell_count();
 753   case Bytecodes::_lookupswitch:
 754   case Bytecodes::_tableswitch:
 755     return variable_cell_count;
 756   default:
 757     return no_profile_data;
 758   }
 759 }
 760 
 761 // Compute the size of the profiling information corresponding to
 762 // the current bytecode.
 763 int MethodData::compute_data_size(BytecodeStream* stream) {
 764   int cell_count = bytecode_cell_count(stream->code());
 765   if (cell_count == no_profile_data) {
 766     return 0;
 767   }
 768   if (cell_count == variable_cell_count) {
 769     switch (stream->code()) {
 770     case Bytecodes::_lookupswitch:
 771     case Bytecodes::_tableswitch:
 772       cell_count = MultiBranchData::compute_cell_count(stream);
 773       break;
 774     case Bytecodes::_invokespecial:
 775     case Bytecodes::_invokestatic:
 776     case Bytecodes::_invokedynamic:
 777       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 778       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 779           profile_return_for_invoke(stream->method(), stream->bci())) {
 780         cell_count = CallTypeData::compute_cell_count(stream);
 781       } else {
 782         cell_count = CounterData::static_cell_count();
 783       }
 784       break;
 785     case Bytecodes::_invokevirtual:
 786     case Bytecodes::_invokeinterface: {
 787       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 788       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 789           profile_return_for_invoke(stream->method(), stream->bci())) {
 790         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 791       } else {
 792         cell_count = VirtualCallData::static_cell_count();
 793       }
 794       break;
 795     }
 796     default:
 797       fatal("unexpected bytecode for var length profile data");
 798     }
 799   }
 800   // Note:  cell_count might be zero, meaning that there is just
 801   //        a DataLayout header, with no extra cells.
 802   assert(cell_count >= 0, "sanity");
 803   return DataLayout::compute_size_in_bytes(cell_count);
 804 }
 805 
 806 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 807   // Bytecodes for which we may use speculation
 808   switch (code) {
 809   case Bytecodes::_checkcast:
 810   case Bytecodes::_instanceof:
 811   case Bytecodes::_aaload:
 812   case Bytecodes::_aastore:
 813   case Bytecodes::_invokevirtual:
 814   case Bytecodes::_invokeinterface:
 815   case Bytecodes::_if_acmpeq:
 816   case Bytecodes::_if_acmpne:
 817   case Bytecodes::_ifnull:
 818   case Bytecodes::_ifnonnull:
 819   case Bytecodes::_invokestatic:
 820 #ifdef COMPILER2
 821     if (CompilerConfig::is_c2_enabled()) {
 822       return UseTypeSpeculation;
 823     }
 824 #endif
 825   default:
 826     return false;
 827   }
 828   return false;
 829 }
 830 
 831 #if INCLUDE_JVMCI
 832 
 833 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 834   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 835 }
 836 
 837 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 838   memcpy(data(), speculation, speculation_len);
 839 }
 840 
 841 // A heuristic check to detect nmethods that outlive a failed speculations list.
 842 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 843   jlong head = (jlong)(address) *failed_speculations_address;
 844   if ((head & 0x1) == 0x1) {
 845     stringStream st;
 846     if (nm != nullptr) {
 847       st.print("%d", nm->compile_id());
 848       Method* method = nm->method();
 849       st.print_raw("{");
 850       if (method != nullptr) {
 851         method->print_name(&st);
 852       } else {
 853         const char* jvmci_name = nm->jvmci_name();
 854         if (jvmci_name != nullptr) {
 855           st.print_raw(jvmci_name);
 856         }
 857       }
 858       st.print_raw("}");
 859     } else {
 860       st.print("<unknown>");
 861     }
 862     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 863   }
 864 }
 865 
 866 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 867   assert(failed_speculations_address != nullptr, "must be");
 868   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 869 
 870   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 871 
 872   FailedSpeculation** cursor = failed_speculations_address;
 873   FailedSpeculation* fs = nullptr;
 874   do {
 875     if (*cursor == nullptr) {
 876       if (fs == nullptr) {
 877         // lazily allocate FailedSpeculation
 878         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 879         if (fs == nullptr) {
 880           // no memory -> ignore failed speculation
 881           return false;
 882         }
 883         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 884       }
 885       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 886       if (old_fs == nullptr) {
 887         // Successfully appended fs to end of the list
 888         return true;
 889       }
 890     }
 891     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 892     // check if the current entry matches this thread's failed speculation
 893     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 894       if (fs != nullptr) {
 895         delete fs;
 896       }
 897       return false;
 898     }
 899     cursor = (*cursor)->next_adr();
 900   } while (true);
 901 }
 902 
 903 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 904   assert(failed_speculations_address != nullptr, "must be");
 905   FailedSpeculation* fs = *failed_speculations_address;
 906   while (fs != nullptr) {
 907     FailedSpeculation* next = fs->next();
 908     delete fs;
 909     fs = next;
 910   }
 911 
 912   // Write an unaligned value to failed_speculations_address to denote
 913   // that it is no longer a valid pointer. This is allows for the check
 914   // in add_failed_speculation against adding to a freed failed
 915   // speculations list.
 916   long* head = (long*) failed_speculations_address;
 917   (*head) = (*head) | 0x1;
 918 }
 919 #endif // INCLUDE_JVMCI
 920 
 921 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 922 #if INCLUDE_JVMCI
 923   if (ProfileTraps) {
 924     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 925     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 926 
 927     // Make sure we have a minimum number of extra data slots to
 928     // allocate SpeculativeTrapData entries. We would want to have one
 929     // entry per compilation that inlines this method and for which
 930     // some type speculation assumption fails. So the room we need for
 931     // the SpeculativeTrapData entries doesn't directly depend on the
 932     // size of the method. Because it's hard to estimate, we reserve
 933     // space for an arbitrary number of entries.
 934     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 935       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 936 
 937     return MAX2(extra_data_count, spec_data_count);
 938   } else {
 939     return 0;
 940   }
 941 #else // INCLUDE_JVMCI
 942   if (ProfileTraps) {
 943     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 944     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 945     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 946     int one_percent_of_data
 947       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 948     if (extra_data_count < one_percent_of_data)
 949       extra_data_count = one_percent_of_data;
 950     if (extra_data_count > empty_bc_count)
 951       extra_data_count = empty_bc_count;  // no need for more
 952 
 953     // Make sure we have a minimum number of extra data slots to
 954     // allocate SpeculativeTrapData entries. We would want to have one
 955     // entry per compilation that inlines this method and for which
 956     // some type speculation assumption fails. So the room we need for
 957     // the SpeculativeTrapData entries doesn't directly depend on the
 958     // size of the method. Because it's hard to estimate, we reserve
 959     // space for an arbitrary number of entries.
 960     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 961       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 962 
 963     return MAX2(extra_data_count, spec_data_count);
 964   } else {
 965     return 0;
 966   }
 967 #endif // INCLUDE_JVMCI
 968 }
 969 
 970 // Compute the size of the MethodData* necessary to store
 971 // profiling information about a given method.  Size is in bytes.
 972 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 973   int data_size = 0;
 974   BytecodeStream stream(method);
 975   Bytecodes::Code c;
 976   int empty_bc_count = 0;  // number of bytecodes lacking data
 977   bool needs_speculative_traps = false;
 978   while ((c = stream.next()) >= 0) {
 979     int size_in_bytes = compute_data_size(&stream);
 980     data_size += size_in_bytes;
 981     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 982     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 983   }
 984   int object_size = in_bytes(data_offset()) + data_size;
 985 
 986   // Add some extra DataLayout cells (at least one) to track stray traps.
 987   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
 988   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 989 
 990   // Add a cell to record information about modified arguments.
 991   int arg_size = method->size_of_parameters();
 992   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
 993 
 994   // Reserve room for an area of the MDO dedicated to profiling of
 995   // parameters
 996   int args_cell = ParametersTypeData::compute_cell_count(method());
 997   if (args_cell > 0) {
 998     object_size += DataLayout::compute_size_in_bytes(args_cell);
 999   }
1000   return object_size;
1001 }
1002 
1003 // Compute the size of the MethodData* necessary to store
1004 // profiling information about a given method.  Size is in words
1005 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1006   int byte_size = compute_allocation_size_in_bytes(method);
1007   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1008   return align_metadata_size(word_size);
1009 }
1010 
1011 // Initialize an individual data segment.  Returns the size of
1012 // the segment in bytes.
1013 int MethodData::initialize_data(BytecodeStream* stream,
1014                                        int data_index) {
1015   if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) {
1016     return 0;
1017   }
1018   int cell_count = -1;
1019   int tag = DataLayout::no_tag;
1020   DataLayout* data_layout = data_layout_at(data_index);
1021   Bytecodes::Code c = stream->code();
1022   switch (c) {
1023   case Bytecodes::_checkcast:
1024   case Bytecodes::_instanceof:
1025     if (TypeProfileCasts) {
1026       cell_count = ReceiverTypeData::static_cell_count();
1027       tag = DataLayout::receiver_type_data_tag;
1028     } else {
1029       cell_count = BitData::static_cell_count();
1030       tag = DataLayout::bit_data_tag;
1031     }
1032     break;
1033   case Bytecodes::_aaload:
1034   case Bytecodes::_aastore:
1035     cell_count = ArrayLoadStoreData::static_cell_count();
1036     tag = DataLayout::array_load_store_data_tag;
1037     break;
1038   case Bytecodes::_invokespecial:
1039   case Bytecodes::_invokestatic: {
1040     int counter_data_cell_count = CounterData::static_cell_count();
1041     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1042         profile_return_for_invoke(stream->method(), stream->bci())) {
1043       cell_count = CallTypeData::compute_cell_count(stream);
1044     } else {
1045       cell_count = counter_data_cell_count;
1046     }
1047     if (cell_count > counter_data_cell_count) {
1048       tag = DataLayout::call_type_data_tag;
1049     } else {
1050       tag = DataLayout::counter_data_tag;
1051     }
1052     break;
1053   }
1054   case Bytecodes::_goto:
1055   case Bytecodes::_goto_w:
1056   case Bytecodes::_jsr:
1057   case Bytecodes::_jsr_w:
1058     cell_count = JumpData::static_cell_count();
1059     tag = DataLayout::jump_data_tag;
1060     break;
1061   case Bytecodes::_invokevirtual:
1062   case Bytecodes::_invokeinterface: {
1063     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1064     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1065         profile_return_for_invoke(stream->method(), stream->bci())) {
1066       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1067     } else {
1068       cell_count = virtual_call_data_cell_count;
1069     }
1070     if (cell_count > virtual_call_data_cell_count) {
1071       tag = DataLayout::virtual_call_type_data_tag;
1072     } else {
1073       tag = DataLayout::virtual_call_data_tag;
1074     }
1075     break;
1076   }
1077   case Bytecodes::_invokedynamic: {
1078     // %%% should make a type profile for any invokedynamic that takes a ref argument
1079     int counter_data_cell_count = CounterData::static_cell_count();
1080     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1081         profile_return_for_invoke(stream->method(), stream->bci())) {
1082       cell_count = CallTypeData::compute_cell_count(stream);
1083     } else {
1084       cell_count = counter_data_cell_count;
1085     }
1086     if (cell_count > counter_data_cell_count) {
1087       tag = DataLayout::call_type_data_tag;
1088     } else {
1089       tag = DataLayout::counter_data_tag;
1090     }
1091     break;
1092   }
1093   case Bytecodes::_ret:
1094     cell_count = RetData::static_cell_count();
1095     tag = DataLayout::ret_data_tag;
1096     break;
1097   case Bytecodes::_ifeq:
1098   case Bytecodes::_ifne:
1099   case Bytecodes::_iflt:
1100   case Bytecodes::_ifge:
1101   case Bytecodes::_ifgt:
1102   case Bytecodes::_ifle:
1103   case Bytecodes::_if_icmpeq:
1104   case Bytecodes::_if_icmpne:
1105   case Bytecodes::_if_icmplt:
1106   case Bytecodes::_if_icmpge:
1107   case Bytecodes::_if_icmpgt:
1108   case Bytecodes::_if_icmple:
1109   case Bytecodes::_ifnull:
1110   case Bytecodes::_ifnonnull:
1111     cell_count = BranchData::static_cell_count();
1112     tag = DataLayout::branch_data_tag;
1113     break;
1114   case Bytecodes::_if_acmpeq:
1115   case Bytecodes::_if_acmpne:
1116     cell_count = ACmpData::static_cell_count();
1117     tag = DataLayout::acmp_data_tag;
1118     break;
1119   case Bytecodes::_lookupswitch:
1120   case Bytecodes::_tableswitch:
1121     cell_count = MultiBranchData::compute_cell_count(stream);
1122     tag = DataLayout::multi_branch_data_tag;
1123     break;
1124   default:
1125     break;
1126   }
1127   assert(tag == DataLayout::multi_branch_data_tag ||
1128          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1129           (tag == DataLayout::call_type_data_tag ||
1130            tag == DataLayout::counter_data_tag ||
1131            tag == DataLayout::virtual_call_type_data_tag ||
1132            tag == DataLayout::virtual_call_data_tag)) ||
1133          cell_count == bytecode_cell_count(c), "cell counts must agree");
1134   if (cell_count >= 0) {
1135     assert(tag != DataLayout::no_tag, "bad tag");
1136     assert(bytecode_has_profile(c), "agree w/ BHP");
1137     data_layout->initialize(tag, stream->bci(), cell_count);
1138     return DataLayout::compute_size_in_bytes(cell_count);
1139   } else {
1140     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1141     return 0;
1142   }
1143 }
1144 
1145 // Get the data at an arbitrary (sort of) data index.
1146 ProfileData* MethodData::data_at(int data_index) const {
1147   if (out_of_bounds(data_index)) {
1148     return nullptr;
1149   }
1150   DataLayout* data_layout = data_layout_at(data_index);
1151   return data_layout->data_in();
1152 }
1153 
1154 int DataLayout::cell_count() {
1155   switch (tag()) {
1156   case DataLayout::no_tag:
1157   default:
1158     ShouldNotReachHere();
1159     return 0;
1160   case DataLayout::bit_data_tag:
1161     return BitData::static_cell_count();
1162   case DataLayout::counter_data_tag:
1163     return CounterData::static_cell_count();
1164   case DataLayout::jump_data_tag:
1165     return JumpData::static_cell_count();
1166   case DataLayout::receiver_type_data_tag:
1167     return ReceiverTypeData::static_cell_count();
1168   case DataLayout::virtual_call_data_tag:
1169     return VirtualCallData::static_cell_count();
1170   case DataLayout::ret_data_tag:
1171     return RetData::static_cell_count();
1172   case DataLayout::branch_data_tag:
1173     return BranchData::static_cell_count();
1174   case DataLayout::multi_branch_data_tag:
1175     return ((new MultiBranchData(this))->cell_count());
1176   case DataLayout::arg_info_data_tag:
1177     return ((new ArgInfoData(this))->cell_count());
1178   case DataLayout::call_type_data_tag:
1179     return ((new CallTypeData(this))->cell_count());
1180   case DataLayout::virtual_call_type_data_tag:
1181     return ((new VirtualCallTypeData(this))->cell_count());
1182   case DataLayout::parameters_type_data_tag:
1183     return ((new ParametersTypeData(this))->cell_count());
1184   case DataLayout::speculative_trap_data_tag:
1185     return SpeculativeTrapData::static_cell_count();
1186   case DataLayout::array_load_store_data_tag:
1187     return ((new ArrayLoadStoreData(this))->cell_count());
1188   case DataLayout::acmp_data_tag:
1189     return ((new ACmpData(this))->cell_count());
1190   }
1191 }
1192 ProfileData* DataLayout::data_in() {
1193   switch (tag()) {
1194   case DataLayout::no_tag:
1195   default:
1196     ShouldNotReachHere();
1197     return nullptr;
1198   case DataLayout::bit_data_tag:
1199     return new BitData(this);
1200   case DataLayout::counter_data_tag:
1201     return new CounterData(this);
1202   case DataLayout::jump_data_tag:
1203     return new JumpData(this);
1204   case DataLayout::receiver_type_data_tag:
1205     return new ReceiverTypeData(this);
1206   case DataLayout::virtual_call_data_tag:
1207     return new VirtualCallData(this);
1208   case DataLayout::ret_data_tag:
1209     return new RetData(this);
1210   case DataLayout::branch_data_tag:
1211     return new BranchData(this);
1212   case DataLayout::multi_branch_data_tag:
1213     return new MultiBranchData(this);
1214   case DataLayout::arg_info_data_tag:
1215     return new ArgInfoData(this);
1216   case DataLayout::call_type_data_tag:
1217     return new CallTypeData(this);
1218   case DataLayout::virtual_call_type_data_tag:
1219     return new VirtualCallTypeData(this);
1220   case DataLayout::parameters_type_data_tag:
1221     return new ParametersTypeData(this);
1222   case DataLayout::speculative_trap_data_tag:
1223     return new SpeculativeTrapData(this);
1224   case DataLayout::array_load_store_data_tag:
1225     return new ArrayLoadStoreData(this);
1226   case DataLayout::acmp_data_tag:
1227     return new ACmpData(this);
1228   }
1229 }
1230 
1231 // Iteration over data.
1232 ProfileData* MethodData::next_data(ProfileData* current) const {
1233   int current_index = dp_to_di(current->dp());
1234   int next_index = current_index + current->size_in_bytes();
1235   ProfileData* next = data_at(next_index);
1236   return next;
1237 }
1238 
1239 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1240   int current_index = dp_to_di((address)current);
1241   int next_index = current_index + current->size_in_bytes();
1242   if (out_of_bounds(next_index)) {
1243     return nullptr;
1244   }
1245   DataLayout* next = data_layout_at(next_index);
1246   return next;
1247 }
1248 
1249 // Give each of the data entries a chance to perform specific
1250 // data initialization.
1251 void MethodData::post_initialize(BytecodeStream* stream) {
1252   ResourceMark rm;
1253   ProfileData* data;
1254   for (data = first_data(); is_valid(data); data = next_data(data)) {
1255     stream->set_start(data->bci());
1256     stream->next();
1257     data->post_initialize(stream, this);
1258   }
1259   if (_parameters_type_data_di != no_parameters) {
1260     parameters_type_data()->post_initialize(nullptr, this);
1261   }
1262 }
1263 
1264 // Initialize the MethodData* corresponding to a given method.
1265 MethodData::MethodData(const methodHandle& method)
1266   : _method(method()),
1267     // Holds Compile_lock
1268     _extra_data_lock(Mutex::safepoint-2, "MDOExtraData_lock"),
1269     _compiler_counters(),
1270     _parameters_type_data_di(parameters_uninitialized) {
1271   initialize();
1272 }
1273 
1274 void MethodData::initialize() {
1275   Thread* thread = Thread::current();
1276   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1277   ResourceMark rm(thread);
1278 
1279   init();
1280   set_creation_mileage(mileage_of(method()));
1281 
1282   // Go through the bytecodes and allocate and initialize the
1283   // corresponding data cells.
1284   int data_size = 0;
1285   int empty_bc_count = 0;  // number of bytecodes lacking data
1286   _data[0] = 0;  // apparently not set below.
1287   BytecodeStream stream(methodHandle(thread, method()));
1288   Bytecodes::Code c;
1289   bool needs_speculative_traps = false;
1290   while ((c = stream.next()) >= 0) {
1291     int size_in_bytes = initialize_data(&stream, data_size);
1292     data_size += size_in_bytes;
1293     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1294     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1295   }
1296   _data_size = data_size;
1297   int object_size = in_bytes(data_offset()) + data_size;
1298 
1299   // Add some extra DataLayout cells (at least one) to track stray traps.
1300   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1301   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1302 
1303   // Let's zero the space for the extra data
1304   if (extra_size > 0) {
1305     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1306   }
1307 
1308   // Add a cell to record information about modified arguments.
1309   // Set up _args_modified array after traps cells so that
1310   // the code for traps cells works.
1311   DataLayout *dp = data_layout_at(data_size + extra_size);
1312 
1313   int arg_size = method()->size_of_parameters();
1314   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1315 
1316   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1317   object_size += extra_size + arg_data_size;
1318 
1319   int parms_cell = ParametersTypeData::compute_cell_count(method());
1320   // If we are profiling parameters, we reserved an area near the end
1321   // of the MDO after the slots for bytecodes (because there's no bci
1322   // for method entry so they don't fit with the framework for the
1323   // profiling of bytecodes). We store the offset within the MDO of
1324   // this area (or -1 if no parameter is profiled)
1325   if (parms_cell > 0) {
1326     object_size += DataLayout::compute_size_in_bytes(parms_cell);
1327     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1328     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1329     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1330   } else {
1331     _parameters_type_data_di = no_parameters;
1332   }
1333 
1334   // Set an initial hint. Don't use set_hint_di() because
1335   // first_di() may be out of bounds if data_size is 0.
1336   // In that situation, _hint_di is never used, but at
1337   // least well-defined.
1338   _hint_di = first_di();
1339 
1340   post_initialize(&stream);
1341 
1342   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1343   set_size(object_size);
1344 }
1345 
1346 void MethodData::init() {
1347   _compiler_counters = CompilerCounters(); // reset compiler counters
1348   _invocation_counter.init();
1349   _backedge_counter.init();
1350   _invocation_counter_start = 0;
1351   _backedge_counter_start = 0;
1352 
1353   // Set per-method invoke- and backedge mask.
1354   double scale = 1.0;
1355   methodHandle mh(Thread::current(), _method);
1356   CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale);
1357   _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1358   _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1359 
1360   _tenure_traps = 0;
1361   _num_loops = 0;
1362   _num_blocks = 0;
1363   _would_profile = unknown;
1364 
1365 #if INCLUDE_JVMCI
1366   _jvmci_ir_size = 0;
1367   _failed_speculations = nullptr;
1368 #endif
1369 
1370 #if INCLUDE_RTM_OPT
1371   _rtm_state = NoRTM; // No RTM lock eliding by default
1372   if (UseRTMLocking &&
1373       !CompilerOracle::has_option(mh, CompileCommand::NoRTMLockEliding)) {
1374     if (CompilerOracle::has_option(mh, CompileCommand::UseRTMLockEliding) || !UseRTMDeopt) {
1375       // Generate RTM lock eliding code without abort ratio calculation code.
1376       _rtm_state = UseRTM;
1377     } else if (UseRTMDeopt) {
1378       // Generate RTM lock eliding code and include abort ratio calculation
1379       // code if UseRTMDeopt is on.
1380       _rtm_state = ProfileRTM;
1381     }
1382   }
1383 #endif
1384 
1385   // Initialize escape flags.
1386   clear_escape_info();
1387 }
1388 
1389 // Get a measure of how much mileage the method has on it.
1390 int MethodData::mileage_of(Method* method) {
1391   return MAX2(method->invocation_count(), method->backedge_count());
1392 }
1393 
1394 bool MethodData::is_mature() const {
1395   return CompilationPolicy::is_mature(_method);
1396 }
1397 
1398 // Translate a bci to its corresponding data index (di).
1399 address MethodData::bci_to_dp(int bci) {
1400   ResourceMark rm;
1401   DataLayout* data = data_layout_before(bci);
1402   DataLayout* prev = nullptr;
1403   for ( ; is_valid(data); data = next_data_layout(data)) {
1404     if (data->bci() >= bci) {
1405       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1406       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1407       return (address)data;
1408     }
1409     prev = data;
1410   }
1411   return (address)limit_data_position();
1412 }
1413 
1414 // Translate a bci to its corresponding data, or null.
1415 ProfileData* MethodData::bci_to_data(int bci) {
1416   DataLayout* data = data_layout_before(bci);
1417   for ( ; is_valid(data); data = next_data_layout(data)) {
1418     if (data->bci() == bci) {
1419       set_hint_di(dp_to_di((address)data));
1420       return data->data_in();
1421     } else if (data->bci() > bci) {
1422       break;
1423     }
1424   }
1425   return bci_to_extra_data(bci, nullptr, false);
1426 }
1427 
1428 DataLayout* MethodData::next_extra(DataLayout* dp) {
1429   int nb_cells = 0;
1430   switch(dp->tag()) {
1431   case DataLayout::bit_data_tag:
1432   case DataLayout::no_tag:
1433     nb_cells = BitData::static_cell_count();
1434     break;
1435   case DataLayout::speculative_trap_data_tag:
1436     nb_cells = SpeculativeTrapData::static_cell_count();
1437     break;
1438   default:
1439     fatal("unexpected tag %d", dp->tag());
1440   }
1441   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1442 }
1443 
1444 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
1445   DataLayout* end = args_data_limit();
1446 
1447   for (;; dp = next_extra(dp)) {
1448     assert(dp < end, "moved past end of extra data");
1449     // No need for "Atomic::load_acquire" ops,
1450     // since the data structure is monotonic.
1451     switch(dp->tag()) {
1452     case DataLayout::no_tag:
1453       return nullptr;
1454     case DataLayout::arg_info_data_tag:
1455       dp = end;
1456       return nullptr; // ArgInfoData is at the end of extra data section.
1457     case DataLayout::bit_data_tag:
1458       if (m == nullptr && dp->bci() == bci) {
1459         return new BitData(dp);
1460       }
1461       break;
1462     case DataLayout::speculative_trap_data_tag:
1463       if (m != nullptr) {
1464         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1465         // data->method() may be null in case of a concurrent
1466         // allocation. Maybe it's for the same method. Try to use that
1467         // entry in that case.
1468         if (dp->bci() == bci) {
1469           if (data->method() == nullptr) {
1470             assert(concurrent, "impossible because no concurrent allocation");
1471             return nullptr;
1472           } else if (data->method() == m) {
1473             return data;
1474           }
1475         }
1476       }
1477       break;
1478     default:
1479       fatal("unexpected tag %d", dp->tag());
1480     }
1481   }
1482   return nullptr;
1483 }
1484 
1485 
1486 // Translate a bci to its corresponding extra data, or null.
1487 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1488   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1489   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1490          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1491          "code needs to be adjusted");
1492 
1493   // Do not create one of these if method has been redefined.
1494   if (m != nullptr && m->is_old()) {
1495     return nullptr;
1496   }
1497 
1498   DataLayout* dp  = extra_data_base();
1499   DataLayout* end = args_data_limit();
1500 
1501   // Allocation in the extra data space has to be atomic because not
1502   // all entries have the same size and non atomic concurrent
1503   // allocation would result in a corrupted extra data space.
1504   ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
1505   if (result != nullptr) {
1506     return result;
1507   }
1508 
1509   if (create_if_missing && dp < end) {
1510     MutexLocker ml(&_extra_data_lock);
1511     // Check again now that we have the lock. Another thread may
1512     // have added extra data entries.
1513     ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
1514     if (result != nullptr || dp >= end) {
1515       return result;
1516     }
1517 
1518     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1519     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1520     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1521     // SpeculativeTrapData is 2 slots. Make sure we have room.
1522     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1523       return nullptr;
1524     }
1525     DataLayout temp;
1526     temp.initialize(tag, bci, 0);
1527 
1528     dp->set_header(temp.header());
1529     assert(dp->tag() == tag, "sane");
1530     assert(dp->bci() == bci, "no concurrent allocation");
1531     if (tag == DataLayout::bit_data_tag) {
1532       return new BitData(dp);
1533     } else {
1534       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1535       data->set_method(m);
1536       return data;
1537     }
1538   }
1539   return nullptr;
1540 }
1541 
1542 ArgInfoData *MethodData::arg_info() {
1543   DataLayout* dp    = extra_data_base();
1544   DataLayout* end   = args_data_limit();
1545   for (; dp < end; dp = next_extra(dp)) {
1546     if (dp->tag() == DataLayout::arg_info_data_tag)
1547       return new ArgInfoData(dp);
1548   }
1549   return nullptr;
1550 }
1551 
1552 // Printing
1553 
1554 void MethodData::print_on(outputStream* st) const {
1555   assert(is_methodData(), "should be method data");
1556   st->print("method data for ");
1557   method()->print_value_on(st);
1558   st->cr();
1559   print_data_on(st);
1560 }
1561 
1562 void MethodData::print_value_on(outputStream* st) const {
1563   assert(is_methodData(), "should be method data");
1564   st->print("method data for ");
1565   method()->print_value_on(st);
1566 }
1567 
1568 void MethodData::print_data_on(outputStream* st) const {
1569   ResourceMark rm;
1570   ProfileData* data = first_data();
1571   if (_parameters_type_data_di != no_parameters) {
1572     parameters_type_data()->print_data_on(st);
1573   }
1574   for ( ; is_valid(data); data = next_data(data)) {
1575     st->print("%d", dp_to_di(data->dp()));
1576     st->fill_to(6);
1577     data->print_data_on(st, this);
1578   }
1579   st->print_cr("--- Extra data:");
1580   DataLayout* dp    = extra_data_base();
1581   DataLayout* end   = args_data_limit();
1582   for (;; dp = next_extra(dp)) {
1583     assert(dp < end, "moved past end of extra data");
1584     // No need for "Atomic::load_acquire" ops,
1585     // since the data structure is monotonic.
1586     switch(dp->tag()) {
1587     case DataLayout::no_tag:
1588       continue;
1589     case DataLayout::bit_data_tag:
1590       data = new BitData(dp);
1591       break;
1592     case DataLayout::speculative_trap_data_tag:
1593       data = new SpeculativeTrapData(dp);
1594       break;
1595     case DataLayout::arg_info_data_tag:
1596       data = new ArgInfoData(dp);
1597       dp = end; // ArgInfoData is at the end of extra data section.
1598       break;
1599     default:
1600       fatal("unexpected tag %d", dp->tag());
1601     }
1602     st->print("%d", dp_to_di(data->dp()));
1603     st->fill_to(6);
1604     data->print_data_on(st);
1605     if (dp >= end) return;
1606   }
1607 }
1608 
1609 // Verification
1610 
1611 void MethodData::verify_on(outputStream* st) {
1612   guarantee(is_methodData(), "object must be method data");
1613   // guarantee(m->is_perm(), "should be in permspace");
1614   this->verify_data_on(st);
1615 }
1616 
1617 void MethodData::verify_data_on(outputStream* st) {
1618   NEEDS_CLEANUP;
1619   // not yet implemented.
1620 }
1621 
1622 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1623   if (m->is_compiled_lambda_form()) {
1624     return true;
1625   }
1626 
1627   Bytecode_invoke inv(m , bci);
1628   return inv.is_invokedynamic() || inv.is_invokehandle();
1629 }
1630 
1631 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1632   Bytecode_invoke inv(m , bci);
1633   if (inv.is_invokevirtual()) {
1634     Symbol* klass = inv.klass();
1635     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1636         klass == vmSymbols::sun_misc_Unsafe() ||
1637         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1638       Symbol* name = inv.name();
1639       if (name->starts_with("get") || name->starts_with("put")) {
1640         return true;
1641       }
1642     }
1643   }
1644   return false;
1645 }
1646 
1647 int MethodData::profile_arguments_flag() {
1648   return TypeProfileLevel % 10;
1649 }
1650 
1651 bool MethodData::profile_arguments() {
1652   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1653 }
1654 
1655 bool MethodData::profile_arguments_jsr292_only() {
1656   return profile_arguments_flag() == type_profile_jsr292;
1657 }
1658 
1659 bool MethodData::profile_all_arguments() {
1660   return profile_arguments_flag() == type_profile_all;
1661 }
1662 
1663 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1664   if (!profile_arguments()) {
1665     return false;
1666   }
1667 
1668   if (profile_all_arguments()) {
1669     return true;
1670   }
1671 
1672   if (profile_unsafe(m, bci)) {
1673     return true;
1674   }
1675 
1676   assert(profile_arguments_jsr292_only(), "inconsistent");
1677   return profile_jsr292(m, bci);
1678 }
1679 
1680 int MethodData::profile_return_flag() {
1681   return (TypeProfileLevel % 100) / 10;
1682 }
1683 
1684 bool MethodData::profile_return() {
1685   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1686 }
1687 
1688 bool MethodData::profile_return_jsr292_only() {
1689   return profile_return_flag() == type_profile_jsr292;
1690 }
1691 
1692 bool MethodData::profile_all_return() {
1693   return profile_return_flag() == type_profile_all;
1694 }
1695 
1696 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1697   if (!profile_return()) {
1698     return false;
1699   }
1700 
1701   if (profile_all_return()) {
1702     return true;
1703   }
1704 
1705   assert(profile_return_jsr292_only(), "inconsistent");
1706   return profile_jsr292(m, bci);
1707 }
1708 
1709 int MethodData::profile_parameters_flag() {
1710   return TypeProfileLevel / 100;
1711 }
1712 
1713 bool MethodData::profile_parameters() {
1714   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1715 }
1716 
1717 bool MethodData::profile_parameters_jsr292_only() {
1718   return profile_parameters_flag() == type_profile_jsr292;
1719 }
1720 
1721 bool MethodData::profile_all_parameters() {
1722   return profile_parameters_flag() == type_profile_all;
1723 }
1724 
1725 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1726   if (!profile_parameters()) {
1727     return false;
1728   }
1729 
1730   if (profile_all_parameters()) {
1731     return true;
1732   }
1733 
1734   assert(profile_parameters_jsr292_only(), "inconsistent");
1735   return m->is_compiled_lambda_form();
1736 }
1737 
1738 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1739   log_trace(cds)("Iter(MethodData): %p", this);
1740   it->push(&_method);
1741 }
1742 
1743 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1744   if (shift == 0) {
1745     return;
1746   }
1747   if (!reset) {
1748     // Move all cells of trap entry at dp left by "shift" cells
1749     intptr_t* start = (intptr_t*)dp;
1750     intptr_t* end = (intptr_t*)next_extra(dp);
1751     for (intptr_t* ptr = start; ptr < end; ptr++) {
1752       *(ptr-shift) = *ptr;
1753     }
1754   } else {
1755     // Reset "shift" cells stopping at dp
1756     intptr_t* start = ((intptr_t*)dp) - shift;
1757     intptr_t* end = (intptr_t*)dp;
1758     for (intptr_t* ptr = start; ptr < end; ptr++) {
1759       *ptr = 0;
1760     }
1761   }
1762 }
1763 
1764 // Check for entries that reference an unloaded method
1765 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1766   bool _always_clean;
1767 public:
1768   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1769   bool is_live(Method* m) {
1770     return !(_always_clean) && m->method_holder()->is_loader_alive();
1771   }
1772 };
1773 
1774 // Check for entries that reference a redefined method
1775 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1776 public:
1777   CleanExtraDataMethodClosure() {}
1778   bool is_live(Method* m) { return !m->is_old(); }
1779 };
1780 
1781 
1782 // Remove SpeculativeTrapData entries that reference an unloaded or
1783 // redefined method
1784 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1785   DataLayout* dp  = extra_data_base();
1786   DataLayout* end = args_data_limit();
1787 
1788   int shift = 0;
1789   for (; dp < end; dp = next_extra(dp)) {
1790     switch(dp->tag()) {
1791     case DataLayout::speculative_trap_data_tag: {
1792       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1793       Method* m = data->method();
1794       assert(m != nullptr, "should have a method");
1795       if (!cl->is_live(m)) {
1796         // "shift" accumulates the number of cells for dead
1797         // SpeculativeTrapData entries that have been seen so
1798         // far. Following entries must be shifted left by that many
1799         // cells to remove the dead SpeculativeTrapData entries.
1800         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1801       } else {
1802         // Shift this entry left if it follows dead
1803         // SpeculativeTrapData entries
1804         clean_extra_data_helper(dp, shift);
1805       }
1806       break;
1807     }
1808     case DataLayout::bit_data_tag:
1809       // Shift this entry left if it follows dead SpeculativeTrapData
1810       // entries
1811       clean_extra_data_helper(dp, shift);
1812       continue;
1813     case DataLayout::no_tag:
1814     case DataLayout::arg_info_data_tag:
1815       // We are at end of the live trap entries. The previous "shift"
1816       // cells contain entries that are either dead or were shifted
1817       // left. They need to be reset to no_tag
1818       clean_extra_data_helper(dp, shift, true);
1819       return;
1820     default:
1821       fatal("unexpected tag %d", dp->tag());
1822     }
1823   }
1824 }
1825 
1826 // Verify there's no unloaded or redefined method referenced by a
1827 // SpeculativeTrapData entry
1828 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1829 #ifdef ASSERT
1830   DataLayout* dp  = extra_data_base();
1831   DataLayout* end = args_data_limit();
1832 
1833   for (; dp < end; dp = next_extra(dp)) {
1834     switch(dp->tag()) {
1835     case DataLayout::speculative_trap_data_tag: {
1836       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1837       Method* m = data->method();
1838       assert(m != nullptr && cl->is_live(m), "Method should exist");
1839       break;
1840     }
1841     case DataLayout::bit_data_tag:
1842       continue;
1843     case DataLayout::no_tag:
1844     case DataLayout::arg_info_data_tag:
1845       return;
1846     default:
1847       fatal("unexpected tag %d", dp->tag());
1848     }
1849   }
1850 #endif
1851 }
1852 
1853 void MethodData::clean_method_data(bool always_clean) {
1854   ResourceMark rm;
1855   for (ProfileData* data = first_data();
1856        is_valid(data);
1857        data = next_data(data)) {
1858     data->clean_weak_klass_links(always_clean);
1859   }
1860   ParametersTypeData* parameters = parameters_type_data();
1861   if (parameters != nullptr) {
1862     parameters->clean_weak_klass_links(always_clean);
1863   }
1864 
1865   CleanExtraDataKlassClosure cl(always_clean);
1866   clean_extra_data(&cl);
1867   verify_extra_data_clean(&cl);
1868 }
1869 
1870 // This is called during redefinition to clean all "old" redefined
1871 // methods out of MethodData for all methods.
1872 void MethodData::clean_weak_method_links() {
1873   ResourceMark rm;
1874   CleanExtraDataMethodClosure cl;
1875   clean_extra_data(&cl);
1876   verify_extra_data_clean(&cl);
1877 }
1878 
1879 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1880   release_C_heap_structures();
1881 }
1882 
1883 void MethodData::release_C_heap_structures() {
1884 #if INCLUDE_JVMCI
1885   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1886 #endif
1887 }