1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/cdsConfig.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/systemDictionaryShared.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "interpreter/bytecode.hpp"
  33 #include "interpreter/bytecodeStream.hpp"
  34 #include "interpreter/linkResolver.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/method.inline.hpp"
  39 #include "oops/methodData.inline.hpp"
  40 #include "prims/jvmtiRedefineClasses.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/orderAccess.hpp"
  45 #include "runtime/safepointVerifiers.hpp"
  46 #include "runtime/signature.hpp"
  47 #include "utilities/align.hpp"
  48 #include "utilities/checkedCast.hpp"
  49 #include "utilities/copy.hpp"
  50 
  51 // ==================================================================
  52 // DataLayout
  53 //
  54 // Overlay for generic profiling data.
  55 
  56 // Some types of data layouts need a length field.
  57 bool DataLayout::needs_array_len(u1 tag) {
  58   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  59 }
  60 
  61 // Perform generic initialization of the data.  More specific
  62 // initialization occurs in overrides of ProfileData::post_initialize.
  63 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  64   DataLayout temp;
  65   temp._header._bits = (intptr_t)0;
  66   temp._header._struct._tag = tag;
  67   temp._header._struct._bci = bci;
  68   // Write the header using a single intptr_t write.  This ensures that if the layout is
  69   // reinitialized readers will never see the transient state where the header is 0.
  70   _header = temp._header;
  71 
  72   for (int i = 0; i < cell_count; i++) {
  73     set_cell_at(i, (intptr_t)0);
  74   }
  75   if (needs_array_len(tag)) {
  76     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  77   }
  78   if (tag == call_type_data_tag) {
  79     CallTypeData::initialize(this, cell_count);
  80   } else if (tag == virtual_call_type_data_tag) {
  81     VirtualCallTypeData::initialize(this, cell_count);
  82   }
  83 }
  84 
  85 void DataLayout::clean_weak_klass_links(bool always_clean) {
  86   ResourceMark m;
  87   data_in()->clean_weak_klass_links(always_clean);
  88 }
  89 
  90 
  91 // ==================================================================
  92 // ProfileData
  93 //
  94 // A ProfileData object is created to refer to a section of profiling
  95 // data in a structured way.
  96 
  97 // Constructor for invalid ProfileData.
  98 ProfileData::ProfileData() {
  99   _data = nullptr;
 100 }
 101 
 102 char* ProfileData::print_data_on_helper(const MethodData* md) const {
 103   DataLayout* dp  = md->extra_data_base();
 104   DataLayout* end = md->args_data_limit();
 105   stringStream ss;
 106   for (;; dp = MethodData::next_extra(dp)) {
 107     assert(dp < end, "moved past end of extra data");
 108     switch(dp->tag()) {
 109     case DataLayout::speculative_trap_data_tag:
 110       if (dp->bci() == bci()) {
 111         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 112         int trap = data->trap_state();
 113         char buf[100];
 114         ss.print("trap/");
 115         data->method()->print_short_name(&ss);
 116         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 117       }
 118       break;
 119     case DataLayout::bit_data_tag:
 120       break;
 121     case DataLayout::no_tag:
 122     case DataLayout::arg_info_data_tag:
 123       return ss.as_string();
 124       break;
 125     default:
 126       fatal("unexpected tag %d", dp->tag());
 127     }
 128   }
 129   return nullptr;
 130 }
 131 
 132 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 133   print_data_on(st, print_data_on_helper(md));
 134 }
 135 
 136 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 137   st->print("bci: %d ", bci());
 138   st->fill_to(tab_width_one + 1);
 139   st->print("%s", name);
 140   tab(st);
 141   int trap = trap_state();
 142   if (trap != 0) {
 143     char buf[100];
 144     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 145   }
 146   if (extra != nullptr) {
 147     st->print("%s", extra);
 148   }
 149   int flags = data()->flags();
 150   if (flags != 0) {
 151     st->print("flags(%d) %p/%d", flags, data(), in_bytes(DataLayout::flags_offset()));
 152   }
 153 }
 154 
 155 void ProfileData::tab(outputStream* st, bool first) const {
 156   st->fill_to(first ? tab_width_one : tab_width_two);
 157 }
 158 
 159 // ==================================================================
 160 // BitData
 161 //
 162 // A BitData corresponds to a one-bit flag.  This is used to indicate
 163 // whether a checkcast bytecode has seen a null value.
 164 
 165 
 166 void BitData::print_data_on(outputStream* st, const char* extra) const {
 167   print_shared(st, "BitData", extra);
 168   st->cr();
 169 }
 170 
 171 // ==================================================================
 172 // CounterData
 173 //
 174 // A CounterData corresponds to a simple counter.
 175 
 176 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 177   print_shared(st, "CounterData", extra);
 178   st->print_cr("count(%u)", count());
 179 }
 180 
 181 // ==================================================================
 182 // JumpData
 183 //
 184 // A JumpData is used to access profiling information for a direct
 185 // branch.  It is a counter, used for counting the number of branches,
 186 // plus a data displacement, used for realigning the data pointer to
 187 // the corresponding target bci.
 188 
 189 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 190   assert(stream->bci() == bci(), "wrong pos");
 191   int target;
 192   Bytecodes::Code c = stream->code();
 193   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 194     target = stream->dest_w();
 195   } else {
 196     target = stream->dest();
 197   }
 198   int my_di = mdo->dp_to_di(dp());
 199   int target_di = mdo->bci_to_di(target);
 200   int offset = target_di - my_di;
 201   set_displacement(offset);
 202 }
 203 
 204 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 205   print_shared(st, "JumpData", extra);
 206   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 207 }
 208 
 209 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 210   // Parameter profiling include the receiver
 211   int args_count = include_receiver ? 1 : 0;
 212   ResourceMark rm;
 213   ReferenceArgumentCount rac(signature);
 214   args_count += rac.count();
 215   args_count = MIN2(args_count, max);
 216   return args_count * per_arg_cell_count;
 217 }
 218 
 219 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 220   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 221   assert(TypeStackSlotEntries::per_arg_count() > SingleTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 222   const methodHandle m = stream->method();
 223   int bci = stream->bci();
 224   Bytecode_invoke inv(m, bci);
 225   int args_cell = 0;
 226   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 227     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 228   }
 229   int ret_cell = 0;
 230   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 231     ret_cell = SingleTypeEntry::static_cell_count();
 232   }
 233   int header_cell = 0;
 234   if (args_cell + ret_cell > 0) {
 235     header_cell = header_cell_count();
 236   }
 237 
 238   return header_cell + args_cell + ret_cell;
 239 }
 240 
 241 class ArgumentOffsetComputer : public SignatureIterator {
 242 private:
 243   int _max;
 244   int _offset;
 245   GrowableArray<int> _offsets;
 246 
 247   friend class SignatureIterator;  // so do_parameters_on can call do_type
 248   void do_type(BasicType type) {
 249     if (is_reference_type(type) && _offsets.length() < _max) {
 250       _offsets.push(_offset);
 251     }
 252     _offset += parameter_type_word_count(type);
 253   }
 254 
 255  public:
 256   ArgumentOffsetComputer(Symbol* signature, int max)
 257     : SignatureIterator(signature),
 258       _max(max), _offset(0),
 259       _offsets(max) {
 260     do_parameters_on(this);  // non-virtual template execution
 261   }
 262 
 263   int off_at(int i) const { return _offsets.at(i); }
 264 };
 265 
 266 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 267   ResourceMark rm;
 268   int start = 0;
 269   // Parameter profiling include the receiver
 270   if (include_receiver && has_receiver) {
 271     set_stack_slot(0, 0);
 272     set_type(0, type_none());
 273     start += 1;
 274   }
 275   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 276   for (int i = start; i < _number_of_entries; i++) {
 277     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 278     set_type(i, type_none());
 279   }
 280 }
 281 
 282 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 283   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 284   Bytecode_invoke inv(stream->method(), stream->bci());
 285 
 286   if (has_arguments()) {
 287 #ifdef ASSERT
 288     ResourceMark rm;
 289     ReferenceArgumentCount rac(inv.signature());
 290     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 291     assert(count > 0, "room for args type but none found?");
 292     check_number_of_arguments(count);
 293 #endif
 294     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 295   }
 296 
 297   if (has_return()) {
 298     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 299     _ret.post_initialize();
 300   }
 301 }
 302 
 303 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 304   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 305   Bytecode_invoke inv(stream->method(), stream->bci());
 306 
 307   if (has_arguments()) {
 308 #ifdef ASSERT
 309     ResourceMark rm;
 310     ReferenceArgumentCount rac(inv.signature());
 311     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 312     assert(count > 0, "room for args type but none found?");
 313     check_number_of_arguments(count);
 314 #endif
 315     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 316   }
 317 
 318   if (has_return()) {
 319     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 320     _ret.post_initialize();
 321   }
 322 }
 323 
 324 static bool is_excluded(Klass* k) {
 325 #if INCLUDE_CDS
 326   if (SafepointSynchronize::is_at_safepoint() &&
 327       CDSConfig::is_dumping_archive() &&
 328       CDSConfig::current_thread_is_vm_or_dumper()) {
 329     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
 330       log_debug(aot, training)("Purged %s from MDO: unloaded class", k->name()->as_C_string());
 331       return true;
 332     } else {
 333       bool excluded = SystemDictionaryShared::should_be_excluded(k);
 334       if (excluded) {
 335         log_debug(aot, training)("Purged %s from MDO: excluded class", k->name()->as_C_string());
 336       }
 337       return excluded;
 338     }
 339   }
 340 #endif
 341   return false;
 342 }
 343 
 344 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 345   for (int i = 0; i < _number_of_entries; i++) {
 346     intptr_t p = type(i);
 347     Klass* k = (Klass*)klass_part(p);
 348     if (k != nullptr) {
 349       if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 350         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 351       }
 352       if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) {
 353         set_type(i, with_status((Klass*)nullptr, p));
 354       }
 355     }
 356   }
 357 }
 358 
 359 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
 360   for (int i = 0; i < _number_of_entries; i++) {
 361     Klass** k = (Klass**)type_adr(i); // tagged
 362     it->push(k);
 363   }
 364 }
 365 
 366 void SingleTypeEntry::clean_weak_klass_links(bool always_clean) {
 367   intptr_t p = type();
 368   Klass* k = (Klass*)klass_part(p);
 369   if (k != nullptr) {
 370     if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 371       return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 372     }
 373     if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) {
 374       set_type(with_status((Klass*)nullptr, p));
 375     }
 376   }
 377 }
 378 
 379 void SingleTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
 380   Klass** k = (Klass**)type_adr(); // tagged
 381   it->push(k);
 382 }
 383 
 384 bool TypeEntriesAtCall::return_profiling_enabled() {
 385   return MethodData::profile_return();
 386 }
 387 
 388 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 389   return MethodData::profile_arguments();
 390 }
 391 
 392 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 393   if (is_type_none(k)) {
 394     st->print("none");
 395   } else if (is_type_unknown(k)) {
 396     st->print("unknown");
 397   } else {
 398     valid_klass(k)->print_value_on(st);
 399   }
 400   if (was_null_seen(k)) {
 401     st->print(" (null seen)");
 402   }
 403 }
 404 
 405 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 406   for (int i = 0; i < _number_of_entries; i++) {
 407     _pd->tab(st);
 408     st->print("%d: stack(%u) ", i, stack_slot(i));
 409     print_klass(st, type(i));
 410     st->cr();
 411   }
 412 }
 413 
 414 void SingleTypeEntry::print_data_on(outputStream* st) const {
 415   _pd->tab(st);
 416   print_klass(st, type());
 417   st->cr();
 418 }
 419 
 420 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 421   CounterData::print_data_on(st, extra);
 422   if (has_arguments()) {
 423     tab(st, true);
 424     st->print("argument types");
 425     _args.print_data_on(st);
 426   }
 427   if (has_return()) {
 428     tab(st, true);
 429     st->print("return type");
 430     _ret.print_data_on(st);
 431   }
 432 }
 433 
 434 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 435   VirtualCallData::print_data_on(st, extra);
 436   if (has_arguments()) {
 437     tab(st, true);
 438     st->print("argument types");
 439     _args.print_data_on(st);
 440   }
 441   if (has_return()) {
 442     tab(st, true);
 443     st->print("return type");
 444     _ret.print_data_on(st);
 445   }
 446 }
 447 
 448 // ==================================================================
 449 // ReceiverTypeData
 450 //
 451 // A ReceiverTypeData is used to access profiling information about a
 452 // dynamic type check.  It consists of a counter which counts the total times
 453 // that the check is reached, and a series of (Klass*, count) pairs
 454 // which are used to store a type profile for the receiver of the check.
 455 
 456 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 457     for (uint row = 0; row < row_limit(); row++) {
 458     Klass* p = receiver(row);
 459     if (p != nullptr) {
 460       if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
 461         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 462       }
 463       if (always_clean || !p->is_loader_present_and_alive() || is_excluded(p)) {
 464         clear_row(row);
 465       }
 466     }
 467   }
 468 }
 469 
 470 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
 471   for (uint row = 0; row < row_limit(); row++) {
 472     Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
 473     it->push(recv);
 474   }
 475 }
 476 
 477 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 478   uint row;
 479   int entries = 0;
 480   for (row = 0; row < row_limit(); row++) {
 481     if (receiver(row) != nullptr)  entries++;
 482   }
 483   st->print_cr("count(%u) entries(%u)", count(), entries);
 484   int total = count();
 485   for (row = 0; row < row_limit(); row++) {
 486     if (receiver(row) != nullptr) {
 487       total += receiver_count(row);
 488     }
 489   }
 490   for (row = 0; row < row_limit(); row++) {
 491     if (receiver(row) != nullptr) {
 492       tab(st);
 493       receiver(row)->print_value_on(st);
 494       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 495     }
 496   }
 497 }
 498 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 499   print_shared(st, "ReceiverTypeData", extra);
 500   print_receiver_data_on(st);
 501 }
 502 
 503 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 504   print_shared(st, "VirtualCallData", extra);
 505   print_receiver_data_on(st);
 506 }
 507 
 508 // ==================================================================
 509 // RetData
 510 //
 511 // A RetData is used to access profiling information for a ret bytecode.
 512 // It is composed of a count of the number of times that the ret has
 513 // been executed, followed by a series of triples of the form
 514 // (bci, count, di) which count the number of times that some bci was the
 515 // target of the ret and cache a corresponding displacement.
 516 
 517 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 518   for (uint row = 0; row < row_limit(); row++) {
 519     set_bci_displacement(row, -1);
 520     set_bci(row, no_bci);
 521   }
 522   // release so other threads see a consistent state.  bci is used as
 523   // a valid flag for bci_displacement.
 524   OrderAccess::release();
 525 }
 526 
 527 // This routine needs to atomically update the RetData structure, so the
 528 // caller needs to hold the RetData_lock before it gets here.  Since taking
 529 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 530 // wrapper around a derived oop, taking the lock in _this_ method will
 531 // basically cause the 'this' pointer's _data field to contain junk after the
 532 // lock.  We require the caller to take the lock before making the ProfileData
 533 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 534 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 535   // First find the mdp which corresponds to the return bci.
 536   address mdp = h_mdo->bci_to_dp(return_bci);
 537 
 538   // Now check to see if any of the cache slots are open.
 539   for (uint row = 0; row < row_limit(); row++) {
 540     if (bci(row) == no_bci) {
 541       set_bci_displacement(row, checked_cast<int>(mdp - dp()));
 542       set_bci_count(row, DataLayout::counter_increment);
 543       // Barrier to ensure displacement is written before the bci; allows
 544       // the interpreter to read displacement without fear of race condition.
 545       release_set_bci(row, return_bci);
 546       break;
 547     }
 548   }
 549   return mdp;
 550 }
 551 
 552 void RetData::print_data_on(outputStream* st, const char* extra) const {
 553   print_shared(st, "RetData", extra);
 554   uint row;
 555   int entries = 0;
 556   for (row = 0; row < row_limit(); row++) {
 557     if (bci(row) != no_bci)  entries++;
 558   }
 559   st->print_cr("count(%u) entries(%u)", count(), entries);
 560   for (row = 0; row < row_limit(); row++) {
 561     if (bci(row) != no_bci) {
 562       tab(st);
 563       st->print_cr("bci(%d: count(%u) displacement(%d))",
 564                    bci(row), bci_count(row), bci_displacement(row));
 565     }
 566   }
 567 }
 568 
 569 // ==================================================================
 570 // BranchData
 571 //
 572 // A BranchData is used to access profiling data for a two-way branch.
 573 // It consists of taken and not_taken counts as well as a data displacement
 574 // for the taken case.
 575 
 576 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 577   assert(stream->bci() == bci(), "wrong pos");
 578   int target = stream->dest();
 579   int my_di = mdo->dp_to_di(dp());
 580   int target_di = mdo->bci_to_di(target);
 581   int offset = target_di - my_di;
 582   set_displacement(offset);
 583 }
 584 
 585 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 586   print_shared(st, "BranchData", extra);
 587   if (data()->flags()) {
 588     st->cr();
 589     tab(st);
 590   }
 591   st->print_cr("taken(%u) displacement(%d)",
 592                taken(), displacement());
 593   tab(st);
 594   st->print_cr("not taken(%u)", not_taken());
 595 }
 596 
 597 // ==================================================================
 598 // MultiBranchData
 599 //
 600 // A MultiBranchData is used to access profiling information for
 601 // a multi-way branch (*switch bytecodes).  It consists of a series
 602 // of (count, displacement) pairs, which count the number of times each
 603 // case was taken and specify the data displacement for each branch target.
 604 
 605 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 606   int cell_count = 0;
 607   if (stream->code() == Bytecodes::_tableswitch) {
 608     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 609     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 610   } else {
 611     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 612     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 613   }
 614   return cell_count;
 615 }
 616 
 617 void MultiBranchData::post_initialize(BytecodeStream* stream,
 618                                       MethodData* mdo) {
 619   assert(stream->bci() == bci(), "wrong pos");
 620   int target;
 621   int my_di;
 622   int target_di;
 623   int offset;
 624   if (stream->code() == Bytecodes::_tableswitch) {
 625     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 626     int len = sw.length();
 627     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 628     for (int count = 0; count < len; count++) {
 629       target = sw.dest_offset_at(count) + bci();
 630       my_di = mdo->dp_to_di(dp());
 631       target_di = mdo->bci_to_di(target);
 632       offset = target_di - my_di;
 633       set_displacement_at(count, offset);
 634     }
 635     target = sw.default_offset() + bci();
 636     my_di = mdo->dp_to_di(dp());
 637     target_di = mdo->bci_to_di(target);
 638     offset = target_di - my_di;
 639     set_default_displacement(offset);
 640 
 641   } else {
 642     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 643     int npairs = sw.number_of_pairs();
 644     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 645     for (int count = 0; count < npairs; count++) {
 646       LookupswitchPair pair = sw.pair_at(count);
 647       target = pair.offset() + bci();
 648       my_di = mdo->dp_to_di(dp());
 649       target_di = mdo->bci_to_di(target);
 650       offset = target_di - my_di;
 651       set_displacement_at(count, offset);
 652     }
 653     target = sw.default_offset() + bci();
 654     my_di = mdo->dp_to_di(dp());
 655     target_di = mdo->bci_to_di(target);
 656     offset = target_di - my_di;
 657     set_default_displacement(offset);
 658   }
 659 }
 660 
 661 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 662   print_shared(st, "MultiBranchData", extra);
 663   st->print_cr("default_count(%u) displacement(%d)",
 664                default_count(), default_displacement());
 665   int cases = number_of_cases();
 666   for (int i = 0; i < cases; i++) {
 667     tab(st);
 668     st->print_cr("count(%u) displacement(%d)",
 669                  count_at(i), displacement_at(i));
 670   }
 671 }
 672 
 673 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 674   print_shared(st, "ArgInfoData", extra);
 675   int nargs = number_of_args();
 676   for (int i = 0; i < nargs; i++) {
 677     st->print("  0x%x", arg_modified(i));
 678   }
 679   st->cr();
 680 }
 681 
 682 int ParametersTypeData::compute_cell_count(Method* m) {
 683   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 684     return 0;
 685   }
 686   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 687   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 688   if (obj_args > 0) {
 689     return obj_args + 1; // 1 cell for array len
 690   }
 691   return 0;
 692 }
 693 
 694 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 695   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 696 }
 697 
 698 bool ParametersTypeData::profiling_enabled() {
 699   return MethodData::profile_parameters();
 700 }
 701 
 702 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 703   print_shared(st, "ParametersTypeData", extra);
 704   tab(st);
 705   _parameters.print_data_on(st);
 706   st->cr();
 707 }
 708 
 709 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
 710   Method** m = (Method**)intptr_at_adr(speculative_trap_method);
 711   it->push(m);
 712 }
 713 
 714 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 715   print_shared(st, "SpeculativeTrapData", extra);
 716   tab(st);
 717   method()->print_short_name(st);
 718   st->cr();
 719 }
 720 
 721 void ArrayStoreData::print_data_on(outputStream* st, const char* extra) const {
 722   print_shared(st, "ArrayStore", extra);
 723   st->cr();
 724   tab(st, true);
 725   st->print("array");
 726   _array.print_data_on(st);
 727   tab(st, true);
 728   st->print("element");
 729   if (null_seen()) {
 730     st->print(" (null seen)");
 731   }
 732   tab(st);
 733   print_receiver_data_on(st);
 734 }
 735 
 736 void ArrayLoadData::print_data_on(outputStream* st, const char* extra) const {
 737   print_shared(st, "ArrayLoad", extra);
 738   st->cr();
 739   tab(st, true);
 740   st->print("array");
 741   _array.print_data_on(st);
 742   tab(st, true);
 743   st->print("element");
 744   _element.print_data_on(st);
 745 }
 746 
 747 void ACmpData::print_data_on(outputStream* st, const char* extra) const {
 748   BranchData::print_data_on(st, extra);
 749   tab(st, true);
 750   st->print("left");
 751   _left.print_data_on(st);
 752   tab(st, true);
 753   st->print("right");
 754   _right.print_data_on(st);
 755 }
 756 
 757 // ==================================================================
 758 // MethodData*
 759 //
 760 // A MethodData* holds information which has been collected about
 761 // a method.
 762 
 763 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 764   assert(!THREAD->owns_locks(), "Should not own any locks");
 765   int size = MethodData::compute_allocation_size_in_words(method);
 766 
 767   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 768     MethodData(method);
 769 }
 770 
 771 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 772   switch (code) {
 773   case Bytecodes::_checkcast:
 774   case Bytecodes::_instanceof:
 775     if (TypeProfileCasts) {
 776       return ReceiverTypeData::static_cell_count();
 777     } else {
 778       return BitData::static_cell_count();
 779     }
 780   case Bytecodes::_aaload:
 781     return ArrayLoadData::static_cell_count();
 782   case Bytecodes::_aastore:
 783     return ArrayStoreData::static_cell_count();
 784   case Bytecodes::_invokespecial:
 785   case Bytecodes::_invokestatic:
 786     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 787       return variable_cell_count;
 788     } else {
 789       return CounterData::static_cell_count();
 790     }
 791   case Bytecodes::_goto:
 792   case Bytecodes::_goto_w:
 793   case Bytecodes::_jsr:
 794   case Bytecodes::_jsr_w:
 795     return JumpData::static_cell_count();
 796   case Bytecodes::_invokevirtual:
 797   case Bytecodes::_invokeinterface:
 798     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 799       return variable_cell_count;
 800     } else {
 801       return VirtualCallData::static_cell_count();
 802     }
 803   case Bytecodes::_invokedynamic:
 804     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 805       return variable_cell_count;
 806     } else {
 807       return CounterData::static_cell_count();
 808     }
 809   case Bytecodes::_ret:
 810     return RetData::static_cell_count();
 811   case Bytecodes::_ifeq:
 812   case Bytecodes::_ifne:
 813   case Bytecodes::_iflt:
 814   case Bytecodes::_ifge:
 815   case Bytecodes::_ifgt:
 816   case Bytecodes::_ifle:
 817   case Bytecodes::_if_icmpeq:
 818   case Bytecodes::_if_icmpne:
 819   case Bytecodes::_if_icmplt:
 820   case Bytecodes::_if_icmpge:
 821   case Bytecodes::_if_icmpgt:
 822   case Bytecodes::_if_icmple:
 823   case Bytecodes::_ifnull:
 824   case Bytecodes::_ifnonnull:
 825     return BranchData::static_cell_count();
 826   case Bytecodes::_if_acmpne:
 827   case Bytecodes::_if_acmpeq:
 828     return ACmpData::static_cell_count();
 829   case Bytecodes::_lookupswitch:
 830   case Bytecodes::_tableswitch:
 831     return variable_cell_count;
 832   default:
 833     return no_profile_data;
 834   }
 835 }
 836 
 837 // Compute the size of the profiling information corresponding to
 838 // the current bytecode.
 839 int MethodData::compute_data_size(BytecodeStream* stream) {
 840   int cell_count = bytecode_cell_count(stream->code());
 841   if (cell_count == no_profile_data) {
 842     return 0;
 843   }
 844   if (cell_count == variable_cell_count) {
 845     switch (stream->code()) {
 846     case Bytecodes::_lookupswitch:
 847     case Bytecodes::_tableswitch:
 848       cell_count = MultiBranchData::compute_cell_count(stream);
 849       break;
 850     case Bytecodes::_invokespecial:
 851     case Bytecodes::_invokestatic:
 852     case Bytecodes::_invokedynamic:
 853       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 854       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 855           profile_return_for_invoke(stream->method(), stream->bci())) {
 856         cell_count = CallTypeData::compute_cell_count(stream);
 857       } else {
 858         cell_count = CounterData::static_cell_count();
 859       }
 860       break;
 861     case Bytecodes::_invokevirtual:
 862     case Bytecodes::_invokeinterface: {
 863       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 864       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 865           profile_return_for_invoke(stream->method(), stream->bci())) {
 866         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 867       } else {
 868         cell_count = VirtualCallData::static_cell_count();
 869       }
 870       break;
 871     }
 872     default:
 873       fatal("unexpected bytecode for var length profile data");
 874     }
 875   }
 876   // Note:  cell_count might be zero, meaning that there is just
 877   //        a DataLayout header, with no extra cells.
 878   assert(cell_count >= 0, "sanity");
 879   return DataLayout::compute_size_in_bytes(cell_count);
 880 }
 881 
 882 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 883   // Bytecodes for which we may use speculation
 884   switch (code) {
 885   case Bytecodes::_checkcast:
 886   case Bytecodes::_instanceof:
 887   case Bytecodes::_aaload:
 888   case Bytecodes::_aastore:
 889   case Bytecodes::_invokevirtual:
 890   case Bytecodes::_invokeinterface:
 891   case Bytecodes::_if_acmpeq:
 892   case Bytecodes::_if_acmpne:
 893   case Bytecodes::_ifnull:
 894   case Bytecodes::_ifnonnull:
 895   case Bytecodes::_invokestatic:
 896 #ifdef COMPILER2
 897     if (CompilerConfig::is_c2_enabled()) {
 898       return UseTypeSpeculation;
 899     }
 900 #endif
 901   default:
 902     return false;
 903   }
 904   return false;
 905 }
 906 
 907 #if INCLUDE_JVMCI
 908 
 909 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 910   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 911 }
 912 
 913 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 914   memcpy(data(), speculation, speculation_len);
 915 }
 916 
 917 // A heuristic check to detect nmethods that outlive a failed speculations list.
 918 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 919   jlong head = (jlong)(address) *failed_speculations_address;
 920   if ((head & 0x1) == 0x1) {
 921     stringStream st;
 922     if (nm != nullptr) {
 923       st.print("%d", nm->compile_id());
 924       Method* method = nm->method();
 925       st.print_raw("{");
 926       if (method != nullptr) {
 927         method->print_name(&st);
 928       } else {
 929         const char* jvmci_name = nm->jvmci_name();
 930         if (jvmci_name != nullptr) {
 931           st.print_raw(jvmci_name);
 932         }
 933       }
 934       st.print_raw("}");
 935     } else {
 936       st.print("<unknown>");
 937     }
 938     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 939   }
 940 }
 941 
 942 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 943   assert(failed_speculations_address != nullptr, "must be");
 944   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 945 
 946   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 947 
 948   FailedSpeculation** cursor = failed_speculations_address;
 949   FailedSpeculation* fs = nullptr;
 950   do {
 951     if (*cursor == nullptr) {
 952       if (fs == nullptr) {
 953         // lazily allocate FailedSpeculation
 954         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 955         if (fs == nullptr) {
 956           // no memory -> ignore failed speculation
 957           return false;
 958         }
 959         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 960       }
 961       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 962       if (old_fs == nullptr) {
 963         // Successfully appended fs to end of the list
 964         return true;
 965       }
 966     }
 967     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 968     // check if the current entry matches this thread's failed speculation
 969     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 970       if (fs != nullptr) {
 971         delete fs;
 972       }
 973       return false;
 974     }
 975     cursor = (*cursor)->next_adr();
 976   } while (true);
 977 }
 978 
 979 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 980   assert(failed_speculations_address != nullptr, "must be");
 981   FailedSpeculation* fs = *failed_speculations_address;
 982   while (fs != nullptr) {
 983     FailedSpeculation* next = fs->next();
 984     delete fs;
 985     fs = next;
 986   }
 987 
 988   // Write an unaligned value to failed_speculations_address to denote
 989   // that it is no longer a valid pointer. This is allows for the check
 990   // in add_failed_speculation against adding to a freed failed
 991   // speculations list.
 992   long* head = (long*) failed_speculations_address;
 993   (*head) = (*head) | 0x1;
 994 }
 995 #endif // INCLUDE_JVMCI
 996 
 997 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 998 #if INCLUDE_JVMCI
 999   if (ProfileTraps) {
1000     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
1001     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
1002 
1003     // Make sure we have a minimum number of extra data slots to
1004     // allocate SpeculativeTrapData entries. We would want to have one
1005     // entry per compilation that inlines this method and for which
1006     // some type speculation assumption fails. So the room we need for
1007     // the SpeculativeTrapData entries doesn't directly depend on the
1008     // size of the method. Because it's hard to estimate, we reserve
1009     // space for an arbitrary number of entries.
1010     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
1011       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
1012 
1013     return MAX2(extra_data_count, spec_data_count);
1014   } else {
1015     return 0;
1016   }
1017 #else // INCLUDE_JVMCI
1018   if (ProfileTraps) {
1019     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
1020     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
1021     // If the method is large, let the extra BCIs grow numerous (to ~1%).
1022     int one_percent_of_data
1023       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
1024     if (extra_data_count < one_percent_of_data)
1025       extra_data_count = one_percent_of_data;
1026     if (extra_data_count > empty_bc_count)
1027       extra_data_count = empty_bc_count;  // no need for more
1028 
1029     // Make sure we have a minimum number of extra data slots to
1030     // allocate SpeculativeTrapData entries. We would want to have one
1031     // entry per compilation that inlines this method and for which
1032     // some type speculation assumption fails. So the room we need for
1033     // the SpeculativeTrapData entries doesn't directly depend on the
1034     // size of the method. Because it's hard to estimate, we reserve
1035     // space for an arbitrary number of entries.
1036     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
1037       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
1038 
1039     return MAX2(extra_data_count, spec_data_count);
1040   } else {
1041     return 0;
1042   }
1043 #endif // INCLUDE_JVMCI
1044 }
1045 
1046 // Compute the size of the MethodData* necessary to store
1047 // profiling information about a given method.  Size is in bytes.
1048 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
1049   int data_size = 0;
1050   BytecodeStream stream(method);
1051   Bytecodes::Code c;
1052   int empty_bc_count = 0;  // number of bytecodes lacking data
1053   bool needs_speculative_traps = false;
1054   while ((c = stream.next()) >= 0) {
1055     int size_in_bytes = compute_data_size(&stream);
1056     data_size += size_in_bytes;
1057     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1058     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1059   }
1060   int object_size = in_bytes(data_offset()) + data_size;
1061 
1062   // Add some extra DataLayout cells (at least one) to track stray traps.
1063   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1064   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
1065 
1066   // Add a cell to record information about modified arguments.
1067   int arg_size = method->size_of_parameters();
1068   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1069 
1070   // Reserve room for an area of the MDO dedicated to profiling of
1071   // parameters
1072   int args_cell = ParametersTypeData::compute_cell_count(method());
1073   if (args_cell > 0) {
1074     object_size += DataLayout::compute_size_in_bytes(args_cell);
1075   }
1076 
1077   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1078     int num_exception_handlers = method()->exception_table_length();
1079     object_size += num_exception_handlers * single_exception_handler_data_size();
1080   }
1081 
1082   return object_size;
1083 }
1084 
1085 // Compute the size of the MethodData* necessary to store
1086 // profiling information about a given method.  Size is in words
1087 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1088   int byte_size = compute_allocation_size_in_bytes(method);
1089   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1090   return align_metadata_size(word_size);
1091 }
1092 
1093 // Initialize an individual data segment.  Returns the size of
1094 // the segment in bytes.
1095 int MethodData::initialize_data(BytecodeStream* stream,
1096                                        int data_index) {
1097   int cell_count = -1;
1098   u1 tag = DataLayout::no_tag;
1099   DataLayout* data_layout = data_layout_at(data_index);
1100   Bytecodes::Code c = stream->code();
1101   switch (c) {
1102   case Bytecodes::_checkcast:
1103   case Bytecodes::_instanceof:
1104     if (TypeProfileCasts) {
1105       cell_count = ReceiverTypeData::static_cell_count();
1106       tag = DataLayout::receiver_type_data_tag;
1107     } else {
1108       cell_count = BitData::static_cell_count();
1109       tag = DataLayout::bit_data_tag;
1110     }
1111     break;
1112   case Bytecodes::_aaload:
1113     cell_count = ArrayLoadData::static_cell_count();
1114     tag = DataLayout::array_load_data_tag;
1115     break;
1116   case Bytecodes::_aastore:
1117     cell_count = ArrayStoreData::static_cell_count();
1118     tag = DataLayout::array_store_data_tag;
1119     break;
1120   case Bytecodes::_invokespecial:
1121   case Bytecodes::_invokestatic: {
1122     int counter_data_cell_count = CounterData::static_cell_count();
1123     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1124         profile_return_for_invoke(stream->method(), stream->bci())) {
1125       cell_count = CallTypeData::compute_cell_count(stream);
1126     } else {
1127       cell_count = counter_data_cell_count;
1128     }
1129     if (cell_count > counter_data_cell_count) {
1130       tag = DataLayout::call_type_data_tag;
1131     } else {
1132       tag = DataLayout::counter_data_tag;
1133     }
1134     break;
1135   }
1136   case Bytecodes::_goto:
1137   case Bytecodes::_goto_w:
1138   case Bytecodes::_jsr:
1139   case Bytecodes::_jsr_w:
1140     cell_count = JumpData::static_cell_count();
1141     tag = DataLayout::jump_data_tag;
1142     break;
1143   case Bytecodes::_invokevirtual:
1144   case Bytecodes::_invokeinterface: {
1145     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1146     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1147         profile_return_for_invoke(stream->method(), stream->bci())) {
1148       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1149     } else {
1150       cell_count = virtual_call_data_cell_count;
1151     }
1152     if (cell_count > virtual_call_data_cell_count) {
1153       tag = DataLayout::virtual_call_type_data_tag;
1154     } else {
1155       tag = DataLayout::virtual_call_data_tag;
1156     }
1157     break;
1158   }
1159   case Bytecodes::_invokedynamic: {
1160     // %%% should make a type profile for any invokedynamic that takes a ref argument
1161     int counter_data_cell_count = CounterData::static_cell_count();
1162     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1163         profile_return_for_invoke(stream->method(), stream->bci())) {
1164       cell_count = CallTypeData::compute_cell_count(stream);
1165     } else {
1166       cell_count = counter_data_cell_count;
1167     }
1168     if (cell_count > counter_data_cell_count) {
1169       tag = DataLayout::call_type_data_tag;
1170     } else {
1171       tag = DataLayout::counter_data_tag;
1172     }
1173     break;
1174   }
1175   case Bytecodes::_ret:
1176     cell_count = RetData::static_cell_count();
1177     tag = DataLayout::ret_data_tag;
1178     break;
1179   case Bytecodes::_ifeq:
1180   case Bytecodes::_ifne:
1181   case Bytecodes::_iflt:
1182   case Bytecodes::_ifge:
1183   case Bytecodes::_ifgt:
1184   case Bytecodes::_ifle:
1185   case Bytecodes::_if_icmpeq:
1186   case Bytecodes::_if_icmpne:
1187   case Bytecodes::_if_icmplt:
1188   case Bytecodes::_if_icmpge:
1189   case Bytecodes::_if_icmpgt:
1190   case Bytecodes::_if_icmple:
1191   case Bytecodes::_ifnull:
1192   case Bytecodes::_ifnonnull:
1193     cell_count = BranchData::static_cell_count();
1194     tag = DataLayout::branch_data_tag;
1195     break;
1196   case Bytecodes::_if_acmpeq:
1197   case Bytecodes::_if_acmpne:
1198     cell_count = ACmpData::static_cell_count();
1199     tag = DataLayout::acmp_data_tag;
1200     break;
1201   case Bytecodes::_lookupswitch:
1202   case Bytecodes::_tableswitch:
1203     cell_count = MultiBranchData::compute_cell_count(stream);
1204     tag = DataLayout::multi_branch_data_tag;
1205     break;
1206   default:
1207     break;
1208   }
1209   assert(tag == DataLayout::multi_branch_data_tag ||
1210          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1211           (tag == DataLayout::call_type_data_tag ||
1212            tag == DataLayout::counter_data_tag ||
1213            tag == DataLayout::virtual_call_type_data_tag ||
1214            tag == DataLayout::virtual_call_data_tag)) ||
1215          cell_count == bytecode_cell_count(c), "cell counts must agree");
1216   if (cell_count >= 0) {
1217     assert(tag != DataLayout::no_tag, "bad tag");
1218     assert(bytecode_has_profile(c), "agree w/ BHP");
1219     data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1220     return DataLayout::compute_size_in_bytes(cell_count);
1221   } else {
1222     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1223     return 0;
1224   }
1225 }
1226 
1227 // Get the data at an arbitrary (sort of) data index.
1228 ProfileData* MethodData::data_at(int data_index) const {
1229   if (out_of_bounds(data_index)) {
1230     return nullptr;
1231   }
1232   DataLayout* data_layout = data_layout_at(data_index);
1233   return data_layout->data_in();
1234 }
1235 
1236 int DataLayout::cell_count() {
1237   switch (tag()) {
1238   case DataLayout::no_tag:
1239   default:
1240     ShouldNotReachHere();
1241     return 0;
1242   case DataLayout::bit_data_tag:
1243     return BitData::static_cell_count();
1244   case DataLayout::counter_data_tag:
1245     return CounterData::static_cell_count();
1246   case DataLayout::jump_data_tag:
1247     return JumpData::static_cell_count();
1248   case DataLayout::receiver_type_data_tag:
1249     return ReceiverTypeData::static_cell_count();
1250   case DataLayout::virtual_call_data_tag:
1251     return VirtualCallData::static_cell_count();
1252   case DataLayout::ret_data_tag:
1253     return RetData::static_cell_count();
1254   case DataLayout::branch_data_tag:
1255     return BranchData::static_cell_count();
1256   case DataLayout::multi_branch_data_tag:
1257     return ((new MultiBranchData(this))->cell_count());
1258   case DataLayout::arg_info_data_tag:
1259     return ((new ArgInfoData(this))->cell_count());
1260   case DataLayout::call_type_data_tag:
1261     return ((new CallTypeData(this))->cell_count());
1262   case DataLayout::virtual_call_type_data_tag:
1263     return ((new VirtualCallTypeData(this))->cell_count());
1264   case DataLayout::parameters_type_data_tag:
1265     return ((new ParametersTypeData(this))->cell_count());
1266   case DataLayout::speculative_trap_data_tag:
1267     return SpeculativeTrapData::static_cell_count();
1268   case DataLayout::array_store_data_tag:
1269     return ((new ArrayStoreData(this))->cell_count());
1270   case DataLayout::array_load_data_tag:
1271     return ((new ArrayLoadData(this))->cell_count());
1272   case DataLayout::acmp_data_tag:
1273     return ((new ACmpData(this))->cell_count());
1274   }
1275 }
1276 ProfileData* DataLayout::data_in() {
1277   switch (tag()) {
1278   case DataLayout::no_tag:
1279   default:
1280     ShouldNotReachHere();
1281     return nullptr;
1282   case DataLayout::bit_data_tag:
1283     return new BitData(this);
1284   case DataLayout::counter_data_tag:
1285     return new CounterData(this);
1286   case DataLayout::jump_data_tag:
1287     return new JumpData(this);
1288   case DataLayout::receiver_type_data_tag:
1289     return new ReceiverTypeData(this);
1290   case DataLayout::virtual_call_data_tag:
1291     return new VirtualCallData(this);
1292   case DataLayout::ret_data_tag:
1293     return new RetData(this);
1294   case DataLayout::branch_data_tag:
1295     return new BranchData(this);
1296   case DataLayout::multi_branch_data_tag:
1297     return new MultiBranchData(this);
1298   case DataLayout::arg_info_data_tag:
1299     return new ArgInfoData(this);
1300   case DataLayout::call_type_data_tag:
1301     return new CallTypeData(this);
1302   case DataLayout::virtual_call_type_data_tag:
1303     return new VirtualCallTypeData(this);
1304   case DataLayout::parameters_type_data_tag:
1305     return new ParametersTypeData(this);
1306   case DataLayout::speculative_trap_data_tag:
1307     return new SpeculativeTrapData(this);
1308   case DataLayout::array_store_data_tag:
1309     return new ArrayStoreData(this);
1310   case DataLayout::array_load_data_tag:
1311     return new ArrayLoadData(this);
1312   case DataLayout::acmp_data_tag:
1313     return new ACmpData(this);
1314   }
1315 }
1316 
1317 // Iteration over data.
1318 ProfileData* MethodData::next_data(ProfileData* current) const {
1319   int current_index = dp_to_di(current->dp());
1320   int next_index = current_index + current->size_in_bytes();
1321   ProfileData* next = data_at(next_index);
1322   return next;
1323 }
1324 
1325 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1326   int current_index = dp_to_di((address)current);
1327   int next_index = current_index + current->size_in_bytes();
1328   if (out_of_bounds(next_index)) {
1329     return nullptr;
1330   }
1331   DataLayout* next = data_layout_at(next_index);
1332   return next;
1333 }
1334 
1335 // Give each of the data entries a chance to perform specific
1336 // data initialization.
1337 void MethodData::post_initialize(BytecodeStream* stream) {
1338   ResourceMark rm;
1339   ProfileData* data;
1340   for (data = first_data(); is_valid(data); data = next_data(data)) {
1341     stream->set_start(data->bci());
1342     stream->next();
1343     data->post_initialize(stream, this);
1344   }
1345   if (_parameters_type_data_di != no_parameters) {
1346     parameters_type_data()->post_initialize(nullptr, this);
1347   }
1348 }
1349 
1350 // Initialize the MethodData* corresponding to a given method.
1351 MethodData::MethodData(const methodHandle& method)
1352   : _method(method()),
1353     // Holds Compile_lock
1354     _compiler_counters(),
1355     _parameters_type_data_di(parameters_uninitialized) {
1356     _extra_data_lock = nullptr;
1357     initialize();
1358 }
1359 
1360 #if INCLUDE_CDS
1361 MethodData::MethodData() {
1362   // Used by cppVtables.cpp only
1363   assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1364 }
1365 #endif
1366 
1367 // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
1368 // not being accessed while the contents are being rewritten.
1369 class VM_ReinitializeMDO: public VM_Operation {
1370  private:
1371   MethodData* _mdo;
1372  public:
1373   VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1374   VMOp_Type type() const                         { return VMOp_ReinitializeMDO; }
1375   void doit() {
1376     // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1377     // over a safepoint.  This means that we don't actually need to acquire the lock.
1378     _mdo->initialize();
1379   }
1380   bool allow_nested_vm_operations() const        { return true; }
1381 };
1382 
1383 void MethodData::reinitialize() {
1384   VM_ReinitializeMDO op(this);
1385   VMThread::execute(&op);
1386 }
1387 
1388 
1389 void MethodData::initialize() {
1390   Thread* thread = Thread::current();
1391   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1392   ResourceMark rm(thread);
1393 
1394   init();
1395 
1396   // Go through the bytecodes and allocate and initialize the
1397   // corresponding data cells.
1398   int data_size = 0;
1399   int empty_bc_count = 0;  // number of bytecodes lacking data
1400   _data[0] = 0;  // apparently not set below.
1401   BytecodeStream stream(methodHandle(thread, method()));
1402   Bytecodes::Code c;
1403   bool needs_speculative_traps = false;
1404   while ((c = stream.next()) >= 0) {
1405     int size_in_bytes = initialize_data(&stream, data_size);
1406     data_size += size_in_bytes;
1407     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1408     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1409   }
1410   _data_size = data_size;
1411   int object_size = in_bytes(data_offset()) + data_size;
1412 
1413   // Add some extra DataLayout cells (at least one) to track stray traps.
1414   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1415   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1416 
1417   // Let's zero the space for the extra data
1418   if (extra_size > 0) {
1419     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1420   }
1421 
1422   // Add a cell to record information about modified arguments.
1423   // Set up _args_modified array after traps cells so that
1424   // the code for traps cells works.
1425   DataLayout *dp = data_layout_at(data_size + extra_size);
1426 
1427   int arg_size = method()->size_of_parameters();
1428   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1429 
1430   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1431   object_size += extra_size + arg_data_size;
1432 
1433   int parms_cell = ParametersTypeData::compute_cell_count(method());
1434   // If we are profiling parameters, we reserved an area near the end
1435   // of the MDO after the slots for bytecodes (because there's no bci
1436   // for method entry so they don't fit with the framework for the
1437   // profiling of bytecodes). We store the offset within the MDO of
1438   // this area (or -1 if no parameter is profiled)
1439   int parm_data_size = 0;
1440   if (parms_cell > 0) {
1441     parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1442     object_size += parm_data_size;
1443     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1444     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1445     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1446   } else {
1447     _parameters_type_data_di = no_parameters;
1448   }
1449 
1450   _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1451   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1452     int num_exception_handlers = method()->exception_table_length();
1453     object_size += num_exception_handlers * single_exception_handler_data_size();
1454     ExceptionTableElement* exception_handlers = method()->exception_table_start();
1455     for (int i = 0; i < num_exception_handlers; i++) {
1456       DataLayout *dp = exception_handler_data_at(i);
1457       dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1458     }
1459   }
1460 
1461   // Set an initial hint. Don't use set_hint_di() because
1462   // first_di() may be out of bounds if data_size is 0.
1463   // In that situation, _hint_di is never used, but at
1464   // least well-defined.
1465   _hint_di = first_di();
1466 
1467   post_initialize(&stream);
1468 
1469   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1470   set_size(object_size);
1471 }
1472 
1473 void MethodData::init() {
1474   _compiler_counters = CompilerCounters(); // reset compiler counters
1475   _invocation_counter.init();
1476   _backedge_counter.init();
1477   _invocation_counter_start = 0;
1478   _backedge_counter_start = 0;
1479 
1480   // Set per-method invoke- and backedge mask.
1481   double scale = 1.0;
1482   methodHandle mh(Thread::current(), _method);
1483   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1484   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1485   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1486 
1487   _tenure_traps = 0;
1488   _num_loops = 0;
1489   _num_blocks = 0;
1490   _would_profile = unknown;
1491 
1492 #if INCLUDE_JVMCI
1493   _jvmci_ir_size = 0;
1494   _failed_speculations = nullptr;
1495 #endif
1496 
1497   // Initialize escape flags.
1498   clear_escape_info();
1499 }
1500 
1501 bool MethodData::is_mature() const {
1502   return CompilationPolicy::is_mature(const_cast<MethodData*>(this));
1503 }
1504 
1505 // Translate a bci to its corresponding data index (di).
1506 address MethodData::bci_to_dp(int bci) {
1507   ResourceMark rm;
1508   DataLayout* data = data_layout_before(bci);
1509   DataLayout* prev = nullptr;
1510   for ( ; is_valid(data); data = next_data_layout(data)) {
1511     if (data->bci() >= bci) {
1512       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1513       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1514       return (address)data;
1515     }
1516     prev = data;
1517   }
1518   return (address)limit_data_position();
1519 }
1520 
1521 // Translate a bci to its corresponding data, or null.
1522 ProfileData* MethodData::bci_to_data(int bci) {
1523   check_extra_data_locked();
1524 
1525   DataLayout* data = data_layout_before(bci);
1526   for ( ; is_valid(data); data = next_data_layout(data)) {
1527     if (data->bci() == bci) {
1528       set_hint_di(dp_to_di((address)data));
1529       return data->data_in();
1530     } else if (data->bci() > bci) {
1531       break;
1532     }
1533   }
1534   return bci_to_extra_data(bci, nullptr, false);
1535 }
1536 
1537 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1538   assert(ProfileExceptionHandlers, "not profiling");
1539   for (int i = 0; i < num_exception_handler_data(); i++) {
1540     DataLayout* exception_handler_data = exception_handler_data_at(i);
1541     if (exception_handler_data->bci() == bci) {
1542       return exception_handler_data;
1543     }
1544   }
1545   return nullptr;
1546 }
1547 
1548 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1549   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1550   return data != nullptr ? new BitData(data) : nullptr;
1551 }
1552 
1553 BitData MethodData::exception_handler_bci_to_data(int bci) {
1554   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1555   assert(data != nullptr, "invalid bci");
1556   return BitData(data);
1557 }
1558 
1559 DataLayout* MethodData::next_extra(DataLayout* dp) {
1560   int nb_cells = 0;
1561   switch(dp->tag()) {
1562   case DataLayout::bit_data_tag:
1563   case DataLayout::no_tag:
1564     nb_cells = BitData::static_cell_count();
1565     break;
1566   case DataLayout::speculative_trap_data_tag:
1567     nb_cells = SpeculativeTrapData::static_cell_count();
1568     break;
1569   default:
1570     fatal("unexpected tag %d", dp->tag());
1571   }
1572   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1573 }
1574 
1575 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
1576   check_extra_data_locked();
1577 
1578   DataLayout* end = args_data_limit();
1579 
1580   for (;; dp = next_extra(dp)) {
1581     assert(dp < end, "moved past end of extra data");
1582     // No need for "Atomic::load_acquire" ops,
1583     // since the data structure is monotonic.
1584     switch(dp->tag()) {
1585     case DataLayout::no_tag:
1586       return nullptr;
1587     case DataLayout::arg_info_data_tag:
1588       dp = end;
1589       return nullptr; // ArgInfoData is at the end of extra data section.
1590     case DataLayout::bit_data_tag:
1591       if (m == nullptr && dp->bci() == bci) {
1592         return new BitData(dp);
1593       }
1594       break;
1595     case DataLayout::speculative_trap_data_tag:
1596       if (m != nullptr) {
1597         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1598         if (dp->bci() == bci) {
1599           assert(data->method() != nullptr, "method must be set");
1600           if (data->method() == m) {
1601             return data;
1602           }
1603         }
1604       }
1605       break;
1606     default:
1607       fatal("unexpected tag %d", dp->tag());
1608     }
1609   }
1610   return nullptr;
1611 }
1612 
1613 
1614 // Translate a bci to its corresponding extra data, or null.
1615 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1616   check_extra_data_locked();
1617 
1618   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1619   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1620          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1621          "code needs to be adjusted");
1622 
1623   // Do not create one of these if method has been redefined.
1624   if (m != nullptr && m->is_old()) {
1625     return nullptr;
1626   }
1627 
1628   DataLayout* dp  = extra_data_base();
1629   DataLayout* end = args_data_limit();
1630 
1631   // Find if already exists
1632   ProfileData* result = bci_to_extra_data_find(bci, m, dp);
1633   if (result != nullptr || dp >= end) {
1634     return result;
1635   }
1636 
1637   if (create_if_missing) {
1638     // Not found -> Allocate
1639     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1640     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1641     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1642     // SpeculativeTrapData is 2 slots. Make sure we have room.
1643     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1644       return nullptr;
1645     }
1646     DataLayout temp;
1647     temp.initialize(tag, checked_cast<u2>(bci), 0);
1648 
1649     dp->set_header(temp.header());
1650     assert(dp->tag() == tag, "sane");
1651     assert(dp->bci() == bci, "no concurrent allocation");
1652     if (tag == DataLayout::bit_data_tag) {
1653       return new BitData(dp);
1654     } else {
1655       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1656       data->set_method(m);
1657       return data;
1658     }
1659   }
1660   return nullptr;
1661 }
1662 
1663 ArgInfoData *MethodData::arg_info() {
1664   DataLayout* dp    = extra_data_base();
1665   DataLayout* end   = args_data_limit();
1666   for (; dp < end; dp = next_extra(dp)) {
1667     if (dp->tag() == DataLayout::arg_info_data_tag)
1668       return new ArgInfoData(dp);
1669   }
1670   return nullptr;
1671 }
1672 
1673 // Printing
1674 
1675 void MethodData::print_on(outputStream* st) const {
1676   assert(is_methodData(), "should be method data");
1677   st->print("method data for ");
1678   method()->print_value_on(st);
1679   st->cr();
1680   print_data_on(st);
1681 }
1682 
1683 void MethodData::print_value_on(outputStream* st) const {
1684   assert(is_methodData(), "should be method data");
1685   st->print("method data for ");
1686   method()->print_value_on(st);
1687 }
1688 
1689 void MethodData::print_data_on(outputStream* st) const {
1690   Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
1691   ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
1692                             Mutex::_no_safepoint_check_flag);
1693   ResourceMark rm;
1694   ProfileData* data = first_data();
1695   if (_parameters_type_data_di != no_parameters) {
1696     parameters_type_data()->print_data_on(st);
1697   }
1698   for ( ; is_valid(data); data = next_data(data)) {
1699     st->print("%d", dp_to_di(data->dp()));
1700     st->fill_to(6);
1701     data->print_data_on(st, this);
1702   }
1703 
1704   st->print_cr("--- Extra data:");
1705   DataLayout* dp    = extra_data_base();
1706   DataLayout* end   = args_data_limit();
1707   for (;; dp = next_extra(dp)) {
1708     assert(dp < end, "moved past end of extra data");
1709     // No need for "Atomic::load_acquire" ops,
1710     // since the data structure is monotonic.
1711     switch(dp->tag()) {
1712     case DataLayout::no_tag:
1713       continue;
1714     case DataLayout::bit_data_tag:
1715       data = new BitData(dp);
1716       break;
1717     case DataLayout::speculative_trap_data_tag:
1718       data = new SpeculativeTrapData(dp);
1719       break;
1720     case DataLayout::arg_info_data_tag:
1721       data = new ArgInfoData(dp);
1722       dp = end; // ArgInfoData is at the end of extra data section.
1723       break;
1724     default:
1725       fatal("unexpected tag %d", dp->tag());
1726     }
1727     st->print("%d", dp_to_di(data->dp()));
1728     st->fill_to(6);
1729     data->print_data_on(st);
1730     if (dp >= end) return;
1731   }
1732 }
1733 
1734 // Verification
1735 
1736 void MethodData::verify_on(outputStream* st) {
1737   guarantee(is_methodData(), "object must be method data");
1738   // guarantee(m->is_perm(), "should be in permspace");
1739   this->verify_data_on(st);
1740 }
1741 
1742 void MethodData::verify_data_on(outputStream* st) {
1743   NEEDS_CLEANUP;
1744   // not yet implemented.
1745 }
1746 
1747 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1748   if (m->is_compiled_lambda_form()) {
1749     return true;
1750   }
1751 
1752   Bytecode_invoke inv(m , bci);
1753   return inv.is_invokedynamic() || inv.is_invokehandle();
1754 }
1755 
1756 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1757   Bytecode_invoke inv(m , bci);
1758   if (inv.is_invokevirtual()) {
1759     Symbol* klass = inv.klass();
1760     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1761         klass == vmSymbols::sun_misc_Unsafe() ||
1762         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1763       Symbol* name = inv.name();
1764       if (name->starts_with("get") || name->starts_with("put")) {
1765         return true;
1766       }
1767     }
1768   }
1769   return false;
1770 }
1771 
1772 int MethodData::profile_arguments_flag() {
1773   return TypeProfileLevel % 10;
1774 }
1775 
1776 bool MethodData::profile_arguments() {
1777   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1778 }
1779 
1780 bool MethodData::profile_arguments_jsr292_only() {
1781   return profile_arguments_flag() == type_profile_jsr292;
1782 }
1783 
1784 bool MethodData::profile_all_arguments() {
1785   return profile_arguments_flag() == type_profile_all;
1786 }
1787 
1788 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1789   if (!profile_arguments()) {
1790     return false;
1791   }
1792 
1793   if (profile_all_arguments()) {
1794     return true;
1795   }
1796 
1797   if (profile_unsafe(m, bci)) {
1798     return true;
1799   }
1800 
1801   assert(profile_arguments_jsr292_only(), "inconsistent");
1802   return profile_jsr292(m, bci);
1803 }
1804 
1805 int MethodData::profile_return_flag() {
1806   return (TypeProfileLevel % 100) / 10;
1807 }
1808 
1809 bool MethodData::profile_return() {
1810   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1811 }
1812 
1813 bool MethodData::profile_return_jsr292_only() {
1814   return profile_return_flag() == type_profile_jsr292;
1815 }
1816 
1817 bool MethodData::profile_all_return() {
1818   return profile_return_flag() == type_profile_all;
1819 }
1820 
1821 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1822   if (!profile_return()) {
1823     return false;
1824   }
1825 
1826   if (profile_all_return()) {
1827     return true;
1828   }
1829 
1830   assert(profile_return_jsr292_only(), "inconsistent");
1831   return profile_jsr292(m, bci);
1832 }
1833 
1834 int MethodData::profile_parameters_flag() {
1835   return TypeProfileLevel / 100;
1836 }
1837 
1838 bool MethodData::profile_parameters() {
1839   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1840 }
1841 
1842 bool MethodData::profile_parameters_jsr292_only() {
1843   return profile_parameters_flag() == type_profile_jsr292;
1844 }
1845 
1846 bool MethodData::profile_all_parameters() {
1847   return profile_parameters_flag() == type_profile_all;
1848 }
1849 
1850 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1851   if (!profile_parameters()) {
1852     return false;
1853   }
1854 
1855   if (profile_all_parameters()) {
1856     return true;
1857   }
1858 
1859   assert(profile_parameters_jsr292_only(), "inconsistent");
1860   return m->is_compiled_lambda_form();
1861 }
1862 
1863 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1864   log_trace(aot, training)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1865   it->push(&_method);
1866   if (_parameters_type_data_di != no_parameters) {
1867     parameters_type_data()->metaspace_pointers_do(it);
1868   }
1869   for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1870     data->metaspace_pointers_do(it);
1871   }
1872   for (DataLayout* dp = extra_data_base();
1873                    dp < extra_data_limit();
1874                    dp = MethodData::next_extra(dp)) {
1875     if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1876       ResourceMark rm;
1877       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1878       data->metaspace_pointers_do(it);
1879     } else if (dp->tag() == DataLayout::no_tag ||
1880                dp->tag() == DataLayout::arg_info_data_tag) {
1881       break;
1882     }
1883   }
1884 }
1885 
1886 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1887   check_extra_data_locked();
1888 
1889   if (shift == 0) {
1890     return;
1891   }
1892   if (!reset) {
1893     // Move all cells of trap entry at dp left by "shift" cells
1894     intptr_t* start = (intptr_t*)dp;
1895     intptr_t* end = (intptr_t*)next_extra(dp);
1896     for (intptr_t* ptr = start; ptr < end; ptr++) {
1897       *(ptr-shift) = *ptr;
1898     }
1899   } else {
1900     // Reset "shift" cells stopping at dp
1901     intptr_t* start = ((intptr_t*)dp) - shift;
1902     intptr_t* end = (intptr_t*)dp;
1903     for (intptr_t* ptr = start; ptr < end; ptr++) {
1904       *ptr = 0;
1905     }
1906   }
1907 }
1908 
1909 // Check for entries that reference an unloaded method
1910 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1911   bool _always_clean;
1912 public:
1913   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1914   bool is_live(Method* m) {
1915     if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1916       return true; // TODO: treat as unloaded instead?
1917     }
1918     return !(_always_clean) && m->method_holder()->is_loader_alive();
1919   }
1920 };
1921 
1922 // Check for entries that reference a redefined method
1923 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1924 public:
1925   CleanExtraDataMethodClosure() {}
1926   bool is_live(Method* m) { return !m->is_old(); }
1927 };
1928 
1929 Mutex* MethodData::extra_data_lock() {
1930   Mutex* lock = Atomic::load(&_extra_data_lock);
1931   if (lock == nullptr) {
1932     // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint
1933     lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock");
1934     Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1935     if (old != nullptr) {
1936       // Another thread created the lock before us. Use that lock instead.
1937       delete lock;
1938       return old;
1939     }
1940   }
1941   return lock;
1942 }
1943 
1944 // Remove SpeculativeTrapData entries that reference an unloaded or
1945 // redefined method
1946 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1947   check_extra_data_locked();
1948 
1949   DataLayout* dp  = extra_data_base();
1950   DataLayout* end = args_data_limit();
1951 
1952   int shift = 0;
1953   for (; dp < end; dp = next_extra(dp)) {
1954     switch(dp->tag()) {
1955     case DataLayout::speculative_trap_data_tag: {
1956       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1957       Method* m = data->method();
1958       assert(m != nullptr, "should have a method");
1959       if (is_excluded(m->method_holder()) || !cl->is_live(m)) {
1960         // "shift" accumulates the number of cells for dead
1961         // SpeculativeTrapData entries that have been seen so
1962         // far. Following entries must be shifted left by that many
1963         // cells to remove the dead SpeculativeTrapData entries.
1964         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1965       } else {
1966         // Shift this entry left if it follows dead
1967         // SpeculativeTrapData entries
1968         clean_extra_data_helper(dp, shift);
1969       }
1970       break;
1971     }
1972     case DataLayout::bit_data_tag:
1973       // Shift this entry left if it follows dead SpeculativeTrapData
1974       // entries
1975       clean_extra_data_helper(dp, shift);
1976       continue;
1977     case DataLayout::no_tag:
1978     case DataLayout::arg_info_data_tag:
1979       // We are at end of the live trap entries. The previous "shift"
1980       // cells contain entries that are either dead or were shifted
1981       // left. They need to be reset to no_tag
1982       clean_extra_data_helper(dp, shift, true);
1983       return;
1984     default:
1985       fatal("unexpected tag %d", dp->tag());
1986     }
1987   }
1988 }
1989 
1990 // Verify there's no unloaded or redefined method referenced by a
1991 // SpeculativeTrapData entry
1992 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1993   check_extra_data_locked();
1994 
1995 #ifdef ASSERT
1996   DataLayout* dp  = extra_data_base();
1997   DataLayout* end = args_data_limit();
1998 
1999   for (; dp < end; dp = next_extra(dp)) {
2000     switch(dp->tag()) {
2001     case DataLayout::speculative_trap_data_tag: {
2002       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
2003       Method* m = data->method();
2004       assert(m != nullptr && cl->is_live(m), "Method should exist");
2005       break;
2006     }
2007     case DataLayout::bit_data_tag:
2008       continue;
2009     case DataLayout::no_tag:
2010     case DataLayout::arg_info_data_tag:
2011       return;
2012     default:
2013       fatal("unexpected tag %d", dp->tag());
2014     }
2015   }
2016 #endif
2017 }
2018 
2019 void MethodData::clean_method_data(bool always_clean) {
2020   ResourceMark rm;
2021   for (ProfileData* data = first_data();
2022        is_valid(data);
2023        data = next_data(data)) {
2024     data->clean_weak_klass_links(always_clean);
2025   }
2026   ParametersTypeData* parameters = parameters_type_data();
2027   if (parameters != nullptr) {
2028     parameters->clean_weak_klass_links(always_clean);
2029   }
2030 
2031   CleanExtraDataKlassClosure cl(always_clean);
2032 
2033   // Lock to modify extra data, and prevent Safepoint from breaking the lock
2034   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
2035 
2036   clean_extra_data(&cl);
2037   verify_extra_data_clean(&cl);
2038 }
2039 
2040 // This is called during redefinition to clean all "old" redefined
2041 // methods out of MethodData for all methods.
2042 void MethodData::clean_weak_method_links() {
2043   ResourceMark rm;
2044   CleanExtraDataMethodClosure cl;
2045 
2046   // Lock to modify extra data, and prevent Safepoint from breaking the lock
2047   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
2048 
2049   clean_extra_data(&cl);
2050   verify_extra_data_clean(&cl);
2051 }
2052 
2053 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
2054   release_C_heap_structures();
2055 }
2056 
2057 void MethodData::release_C_heap_structures() {
2058 #if INCLUDE_JVMCI
2059   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
2060 #endif
2061 }
2062 
2063 #if INCLUDE_CDS
2064 void MethodData::remove_unshareable_info() {
2065   _extra_data_lock = nullptr;
2066 #if INCLUDE_JVMCI
2067   _failed_speculations = nullptr;
2068 #endif
2069 }
2070 
2071 void MethodData::restore_unshareable_info(TRAPS) {
2072   //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
2073 }
2074 #endif // INCLUDE_CDS
2075 
2076 #ifdef ASSERT
2077 void MethodData::check_extra_data_locked() const {
2078     // Cast const away, just to be able to verify the lock
2079     // Usually we only want non-const accesses on the lock,
2080     // so this here is an exception.
2081     MethodData* self = (MethodData*)this;
2082     assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
2083     assert(!Thread::current()->is_Java_thread() ||
2084            JavaThread::current()->is_in_no_safepoint_scope(),
2085            "JavaThread must have NoSafepointVerifier inside lock scope");
2086 }
2087 #endif