1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/cdsConfig.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/systemDictionaryShared.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "interpreter/bytecode.hpp"
  33 #include "interpreter/bytecodeStream.hpp"
  34 #include "interpreter/linkResolver.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/method.inline.hpp"
  39 #include "oops/methodData.inline.hpp"
  40 #include "prims/jvmtiRedefineClasses.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/orderAccess.hpp"
  45 #include "runtime/safepointVerifiers.hpp"
  46 #include "runtime/signature.hpp"
  47 #include "utilities/align.hpp"
  48 #include "utilities/checkedCast.hpp"
  49 #include "utilities/copy.hpp"
  50 
  51 // ==================================================================
  52 // DataLayout
  53 //
  54 // Overlay for generic profiling data.
  55 
  56 // Some types of data layouts need a length field.
  57 bool DataLayout::needs_array_len(u1 tag) {
  58   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  59 }
  60 
  61 // Perform generic initialization of the data.  More specific
  62 // initialization occurs in overrides of ProfileData::post_initialize.
  63 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  64   DataLayout temp;
  65   temp._header._bits = (intptr_t)0;
  66   temp._header._struct._tag = tag;
  67   temp._header._struct._bci = bci;
  68   // Write the header using a single intptr_t write.  This ensures that if the layout is
  69   // reinitialized readers will never see the transient state where the header is 0.
  70   _header = temp._header;
  71 
  72   for (int i = 0; i < cell_count; i++) {
  73     set_cell_at(i, (intptr_t)0);
  74   }
  75   if (needs_array_len(tag)) {
  76     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  77   }
  78   if (tag == call_type_data_tag) {
  79     CallTypeData::initialize(this, cell_count);
  80   } else if (tag == virtual_call_type_data_tag) {
  81     VirtualCallTypeData::initialize(this, cell_count);
  82   }
  83 }
  84 
  85 void DataLayout::clean_weak_klass_links(bool always_clean) {
  86   ResourceMark m;
  87   data_in()->clean_weak_klass_links(always_clean);
  88 }
  89 
  90 
  91 // ==================================================================
  92 // ProfileData
  93 //
  94 // A ProfileData object is created to refer to a section of profiling
  95 // data in a structured way.
  96 
  97 // Constructor for invalid ProfileData.
  98 ProfileData::ProfileData() {
  99   _data = nullptr;
 100 }
 101 
 102 char* ProfileData::print_data_on_helper(const MethodData* md) const {
 103   DataLayout* dp  = md->extra_data_base();
 104   DataLayout* end = md->args_data_limit();
 105   stringStream ss;
 106   for (;; dp = MethodData::next_extra(dp)) {
 107     assert(dp < end, "moved past end of extra data");
 108     switch(dp->tag()) {
 109     case DataLayout::speculative_trap_data_tag:
 110       if (dp->bci() == bci()) {
 111         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 112         int trap = data->trap_state();
 113         char buf[100];
 114         ss.print("trap/");
 115         data->method()->print_short_name(&ss);
 116         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 117       }
 118       break;
 119     case DataLayout::bit_data_tag:
 120       break;
 121     case DataLayout::no_tag:
 122     case DataLayout::arg_info_data_tag:
 123       return ss.as_string();
 124       break;
 125     default:
 126       fatal("unexpected tag %d", dp->tag());
 127     }
 128   }
 129   return nullptr;
 130 }
 131 
 132 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 133   print_data_on(st, print_data_on_helper(md));
 134 }
 135 
 136 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 137   st->print("bci: %d ", bci());
 138   st->fill_to(tab_width_one + 1);
 139   st->print("%s", name);
 140   tab(st);
 141   int trap = trap_state();
 142   if (trap != 0) {
 143     char buf[100];
 144     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 145   }
 146   if (extra != nullptr) {
 147     st->print("%s", extra);
 148   }
 149   int flags = data()->flags();
 150   if (flags != 0) {
 151     st->print("flags(%d) ", flags);
 152   }
 153 }
 154 
 155 void ProfileData::tab(outputStream* st, bool first) const {
 156   st->fill_to(first ? tab_width_one : tab_width_two);
 157 }
 158 
 159 // ==================================================================
 160 // BitData
 161 //
 162 // A BitData corresponds to a one-bit flag.  This is used to indicate
 163 // whether a checkcast bytecode has seen a null value.
 164 
 165 
 166 void BitData::print_data_on(outputStream* st, const char* extra) const {
 167   print_shared(st, "BitData", extra);
 168   st->cr();
 169 }
 170 
 171 // ==================================================================
 172 // CounterData
 173 //
 174 // A CounterData corresponds to a simple counter.
 175 
 176 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 177   print_shared(st, "CounterData", extra);
 178   st->print_cr("count(%u)", count());
 179 }
 180 
 181 // ==================================================================
 182 // JumpData
 183 //
 184 // A JumpData is used to access profiling information for a direct
 185 // branch.  It is a counter, used for counting the number of branches,
 186 // plus a data displacement, used for realigning the data pointer to
 187 // the corresponding target bci.
 188 
 189 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 190   assert(stream->bci() == bci(), "wrong pos");
 191   int target;
 192   Bytecodes::Code c = stream->code();
 193   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 194     target = stream->dest_w();
 195   } else {
 196     target = stream->dest();
 197   }
 198   int my_di = mdo->dp_to_di(dp());
 199   int target_di = mdo->bci_to_di(target);
 200   int offset = target_di - my_di;
 201   set_displacement(offset);
 202 }
 203 
 204 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 205   print_shared(st, "JumpData", extra);
 206   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 207 }
 208 
 209 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 210   // Parameter profiling include the receiver
 211   int args_count = include_receiver ? 1 : 0;
 212   ResourceMark rm;
 213   ReferenceArgumentCount rac(signature);
 214   args_count += rac.count();
 215   args_count = MIN2(args_count, max);
 216   return args_count * per_arg_cell_count;
 217 }
 218 
 219 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 220   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 221   assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 222   const methodHandle m = stream->method();
 223   int bci = stream->bci();
 224   Bytecode_invoke inv(m, bci);
 225   int args_cell = 0;
 226   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 227     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 228   }
 229   int ret_cell = 0;
 230   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 231     ret_cell = ReturnTypeEntry::static_cell_count();
 232   }
 233   int header_cell = 0;
 234   if (args_cell + ret_cell > 0) {
 235     header_cell = header_cell_count();
 236   }
 237 
 238   return header_cell + args_cell + ret_cell;
 239 }
 240 
 241 class ArgumentOffsetComputer : public SignatureIterator {
 242 private:
 243   int _max;
 244   int _offset;
 245   GrowableArray<int> _offsets;
 246 
 247   friend class SignatureIterator;  // so do_parameters_on can call do_type
 248   void do_type(BasicType type) {
 249     if (is_reference_type(type) && _offsets.length() < _max) {
 250       _offsets.push(_offset);
 251     }
 252     _offset += parameter_type_word_count(type);
 253   }
 254 
 255  public:
 256   ArgumentOffsetComputer(Symbol* signature, int max)
 257     : SignatureIterator(signature),
 258       _max(max), _offset(0),
 259       _offsets(max) {
 260     do_parameters_on(this);  // non-virtual template execution
 261   }
 262 
 263   int off_at(int i) const { return _offsets.at(i); }
 264 };
 265 
 266 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 267   ResourceMark rm;
 268   int start = 0;
 269   // Parameter profiling include the receiver
 270   if (include_receiver && has_receiver) {
 271     set_stack_slot(0, 0);
 272     set_type(0, type_none());
 273     start += 1;
 274   }
 275   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 276   for (int i = start; i < _number_of_entries; i++) {
 277     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 278     set_type(i, type_none());
 279   }
 280 }
 281 
 282 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 283   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 284   Bytecode_invoke inv(stream->method(), stream->bci());
 285 
 286   if (has_arguments()) {
 287 #ifdef ASSERT
 288     ResourceMark rm;
 289     ReferenceArgumentCount rac(inv.signature());
 290     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 291     assert(count > 0, "room for args type but none found?");
 292     check_number_of_arguments(count);
 293 #endif
 294     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 295   }
 296 
 297   if (has_return()) {
 298     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 299     _ret.post_initialize();
 300   }
 301 }
 302 
 303 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 304   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 305   Bytecode_invoke inv(stream->method(), stream->bci());
 306 
 307   if (has_arguments()) {
 308 #ifdef ASSERT
 309     ResourceMark rm;
 310     ReferenceArgumentCount rac(inv.signature());
 311     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 312     assert(count > 0, "room for args type but none found?");
 313     check_number_of_arguments(count);
 314 #endif
 315     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 316   }
 317 
 318   if (has_return()) {
 319     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 320     _ret.post_initialize();
 321   }
 322 }
 323 
 324 static bool is_excluded(Klass* k) {
 325 #if INCLUDE_CDS
 326   if (SafepointSynchronize::is_at_safepoint() &&
 327       CDSConfig::is_dumping_archive() &&
 328       CDSConfig::current_thread_is_vm_or_dumper()) {
 329     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
 330       log_debug(cds)("Purged %s from MDO: unloaded class", k->name()->as_C_string());
 331       return true;
 332     } else if (CDSConfig::is_dumping_dynamic_archive() && k->is_shared()) {
 333       return false;
 334     } else {
 335       bool excluded = SystemDictionaryShared::should_be_excluded(k);
 336       if (excluded) {
 337         log_debug(cds)("Purged %s from MDO: excluded class", k->name()->as_C_string());
 338       }
 339       return excluded;
 340     }
 341   }
 342 #endif
 343   return false;
 344 }
 345 
 346 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 347   for (int i = 0; i < _number_of_entries; i++) {
 348     intptr_t p = type(i);
 349     Klass* k = (Klass*)klass_part(p);
 350     if (k != nullptr) {
 351       if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 352         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 353       }
 354       if (always_clean || !k->is_loader_alive() || is_excluded(k)) {
 355         set_type(i, with_status((Klass*)nullptr, p));
 356       }
 357     }
 358   }
 359 }
 360 
 361 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
 362   for (int i = 0; i < _number_of_entries; i++) {
 363     Klass** k = (Klass**)type_adr(i); // tagged
 364     it->push(k);
 365   }
 366 }
 367 
 368 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 369   intptr_t p = type();
 370   Klass* k = (Klass*)klass_part(p);
 371   if (k != nullptr) {
 372     if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 373       return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 374     }
 375     if (always_clean || !k->is_loader_alive() || is_excluded(k)) {
 376       set_type(with_status((Klass*)nullptr, p));
 377     }
 378   }
 379 }
 380 
 381 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
 382   Klass** k = (Klass**)type_adr(); // tagged
 383   it->push(k);
 384 }
 385 
 386 bool TypeEntriesAtCall::return_profiling_enabled() {
 387   return MethodData::profile_return();
 388 }
 389 
 390 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 391   return MethodData::profile_arguments();
 392 }
 393 
 394 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 395   if (is_type_none(k)) {
 396     st->print("none");
 397   } else if (is_type_unknown(k)) {
 398     st->print("unknown");
 399   } else {
 400     valid_klass(k)->print_value_on(st);
 401   }
 402   if (was_null_seen(k)) {
 403     st->print(" (null seen)");
 404   }
 405 }
 406 
 407 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 408   for (int i = 0; i < _number_of_entries; i++) {
 409     _pd->tab(st);
 410     st->print("%d: stack(%u) ", i, stack_slot(i));
 411     print_klass(st, type(i));
 412     st->cr();
 413   }
 414 }
 415 
 416 void ReturnTypeEntry::print_data_on(outputStream* st) const {
 417   _pd->tab(st);
 418   print_klass(st, type());
 419   st->cr();
 420 }
 421 
 422 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 423   CounterData::print_data_on(st, extra);
 424   if (has_arguments()) {
 425     tab(st, true);
 426     st->print("argument types");
 427     _args.print_data_on(st);
 428   }
 429   if (has_return()) {
 430     tab(st, true);
 431     st->print("return type");
 432     _ret.print_data_on(st);
 433   }
 434 }
 435 
 436 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 437   VirtualCallData::print_data_on(st, extra);
 438   if (has_arguments()) {
 439     tab(st, true);
 440     st->print("argument types");
 441     _args.print_data_on(st);
 442   }
 443   if (has_return()) {
 444     tab(st, true);
 445     st->print("return type");
 446     _ret.print_data_on(st);
 447   }
 448 }
 449 
 450 // ==================================================================
 451 // ReceiverTypeData
 452 //
 453 // A ReceiverTypeData is used to access profiling information about a
 454 // dynamic type check.  It consists of a counter which counts the total times
 455 // that the check is reached, and a series of (Klass*, count) pairs
 456 // which are used to store a type profile for the receiver of the check.
 457 
 458 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 459     for (uint row = 0; row < row_limit(); row++) {
 460     Klass* p = receiver(row);
 461     if (p != nullptr) {
 462       if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
 463         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 464       }
 465       if (always_clean || !p->is_loader_alive() || is_excluded(p)) {
 466         clear_row(row);
 467       }
 468     }
 469   }
 470 }
 471 
 472 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
 473   for (uint row = 0; row < row_limit(); row++) {
 474     Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
 475     it->push(recv);
 476   }
 477 }
 478 
 479 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 480   uint row;
 481   int entries = 0;
 482   for (row = 0; row < row_limit(); row++) {
 483     if (receiver(row) != nullptr)  entries++;
 484   }
 485   st->print_cr("count(%u) entries(%u)", count(), entries);
 486   int total = count();
 487   for (row = 0; row < row_limit(); row++) {
 488     if (receiver(row) != nullptr) {
 489       total += receiver_count(row);
 490     }
 491   }
 492   for (row = 0; row < row_limit(); row++) {
 493     if (receiver(row) != nullptr) {
 494       tab(st);
 495       receiver(row)->print_value_on(st);
 496       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 497     }
 498   }
 499 }
 500 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 501   print_shared(st, "ReceiverTypeData", extra);
 502   print_receiver_data_on(st);
 503 }
 504 
 505 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 506   print_shared(st, "VirtualCallData", extra);
 507   print_receiver_data_on(st);
 508 }
 509 
 510 // ==================================================================
 511 // RetData
 512 //
 513 // A RetData is used to access profiling information for a ret bytecode.
 514 // It is composed of a count of the number of times that the ret has
 515 // been executed, followed by a series of triples of the form
 516 // (bci, count, di) which count the number of times that some bci was the
 517 // target of the ret and cache a corresponding displacement.
 518 
 519 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 520   for (uint row = 0; row < row_limit(); row++) {
 521     set_bci_displacement(row, -1);
 522     set_bci(row, no_bci);
 523   }
 524   // release so other threads see a consistent state.  bci is used as
 525   // a valid flag for bci_displacement.
 526   OrderAccess::release();
 527 }
 528 
 529 // This routine needs to atomically update the RetData structure, so the
 530 // caller needs to hold the RetData_lock before it gets here.  Since taking
 531 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 532 // wrapper around a derived oop, taking the lock in _this_ method will
 533 // basically cause the 'this' pointer's _data field to contain junk after the
 534 // lock.  We require the caller to take the lock before making the ProfileData
 535 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 536 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 537   // First find the mdp which corresponds to the return bci.
 538   address mdp = h_mdo->bci_to_dp(return_bci);
 539 
 540   // Now check to see if any of the cache slots are open.
 541   for (uint row = 0; row < row_limit(); row++) {
 542     if (bci(row) == no_bci) {
 543       set_bci_displacement(row, checked_cast<int>(mdp - dp()));
 544       set_bci_count(row, DataLayout::counter_increment);
 545       // Barrier to ensure displacement is written before the bci; allows
 546       // the interpreter to read displacement without fear of race condition.
 547       release_set_bci(row, return_bci);
 548       break;
 549     }
 550   }
 551   return mdp;
 552 }
 553 
 554 void RetData::print_data_on(outputStream* st, const char* extra) const {
 555   print_shared(st, "RetData", extra);
 556   uint row;
 557   int entries = 0;
 558   for (row = 0; row < row_limit(); row++) {
 559     if (bci(row) != no_bci)  entries++;
 560   }
 561   st->print_cr("count(%u) entries(%u)", count(), entries);
 562   for (row = 0; row < row_limit(); row++) {
 563     if (bci(row) != no_bci) {
 564       tab(st);
 565       st->print_cr("bci(%d: count(%u) displacement(%d))",
 566                    bci(row), bci_count(row), bci_displacement(row));
 567     }
 568   }
 569 }
 570 
 571 // ==================================================================
 572 // BranchData
 573 //
 574 // A BranchData is used to access profiling data for a two-way branch.
 575 // It consists of taken and not_taken counts as well as a data displacement
 576 // for the taken case.
 577 
 578 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 579   assert(stream->bci() == bci(), "wrong pos");
 580   int target = stream->dest();
 581   int my_di = mdo->dp_to_di(dp());
 582   int target_di = mdo->bci_to_di(target);
 583   int offset = target_di - my_di;
 584   set_displacement(offset);
 585 }
 586 
 587 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 588   print_shared(st, "BranchData", extra);
 589   st->print_cr("taken(%u) displacement(%d)",
 590                taken(), displacement());
 591   tab(st);
 592   st->print_cr("not taken(%u)", not_taken());
 593 }
 594 
 595 // ==================================================================
 596 // MultiBranchData
 597 //
 598 // A MultiBranchData is used to access profiling information for
 599 // a multi-way branch (*switch bytecodes).  It consists of a series
 600 // of (count, displacement) pairs, which count the number of times each
 601 // case was taken and specify the data displacement for each branch target.
 602 
 603 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 604   int cell_count = 0;
 605   if (stream->code() == Bytecodes::_tableswitch) {
 606     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 607     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 608   } else {
 609     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 610     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 611   }
 612   return cell_count;
 613 }
 614 
 615 void MultiBranchData::post_initialize(BytecodeStream* stream,
 616                                       MethodData* mdo) {
 617   assert(stream->bci() == bci(), "wrong pos");
 618   int target;
 619   int my_di;
 620   int target_di;
 621   int offset;
 622   if (stream->code() == Bytecodes::_tableswitch) {
 623     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 624     int len = sw.length();
 625     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 626     for (int count = 0; count < len; count++) {
 627       target = sw.dest_offset_at(count) + bci();
 628       my_di = mdo->dp_to_di(dp());
 629       target_di = mdo->bci_to_di(target);
 630       offset = target_di - my_di;
 631       set_displacement_at(count, offset);
 632     }
 633     target = sw.default_offset() + bci();
 634     my_di = mdo->dp_to_di(dp());
 635     target_di = mdo->bci_to_di(target);
 636     offset = target_di - my_di;
 637     set_default_displacement(offset);
 638 
 639   } else {
 640     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 641     int npairs = sw.number_of_pairs();
 642     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 643     for (int count = 0; count < npairs; count++) {
 644       LookupswitchPair pair = sw.pair_at(count);
 645       target = pair.offset() + bci();
 646       my_di = mdo->dp_to_di(dp());
 647       target_di = mdo->bci_to_di(target);
 648       offset = target_di - my_di;
 649       set_displacement_at(count, offset);
 650     }
 651     target = sw.default_offset() + bci();
 652     my_di = mdo->dp_to_di(dp());
 653     target_di = mdo->bci_to_di(target);
 654     offset = target_di - my_di;
 655     set_default_displacement(offset);
 656   }
 657 }
 658 
 659 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 660   print_shared(st, "MultiBranchData", extra);
 661   st->print_cr("default_count(%u) displacement(%d)",
 662                default_count(), default_displacement());
 663   int cases = number_of_cases();
 664   for (int i = 0; i < cases; i++) {
 665     tab(st);
 666     st->print_cr("count(%u) displacement(%d)",
 667                  count_at(i), displacement_at(i));
 668   }
 669 }
 670 
 671 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 672   print_shared(st, "ArgInfoData", extra);
 673   int nargs = number_of_args();
 674   for (int i = 0; i < nargs; i++) {
 675     st->print("  0x%x", arg_modified(i));
 676   }
 677   st->cr();
 678 }
 679 
 680 int ParametersTypeData::compute_cell_count(Method* m) {
 681   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 682     return 0;
 683   }
 684   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 685   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 686   if (obj_args > 0) {
 687     return obj_args + 1; // 1 cell for array len
 688   }
 689   return 0;
 690 }
 691 
 692 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 693   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 694 }
 695 
 696 bool ParametersTypeData::profiling_enabled() {
 697   return MethodData::profile_parameters();
 698 }
 699 
 700 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 701   print_shared(st, "ParametersTypeData", extra);
 702   tab(st);
 703   _parameters.print_data_on(st);
 704   st->cr();
 705 }
 706 
 707 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
 708   Method** m = (Method**)intptr_at_adr(speculative_trap_method);
 709   it->push(m);
 710 }
 711 
 712 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 713   print_shared(st, "SpeculativeTrapData", extra);
 714   tab(st);
 715   method()->print_short_name(st);
 716   st->cr();
 717 }
 718 
 719 // ==================================================================
 720 // MethodData*
 721 //
 722 // A MethodData* holds information which has been collected about
 723 // a method.
 724 
 725 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 726   assert(!THREAD->owns_locks(), "Should not own any locks");
 727   int size = MethodData::compute_allocation_size_in_words(method);
 728 
 729   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 730     MethodData(method);
 731 }
 732 
 733 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 734   switch (code) {
 735   case Bytecodes::_checkcast:
 736   case Bytecodes::_instanceof:
 737   case Bytecodes::_aastore:
 738     if (TypeProfileCasts) {
 739       return ReceiverTypeData::static_cell_count();
 740     } else {
 741       return BitData::static_cell_count();
 742     }
 743   case Bytecodes::_invokespecial:
 744   case Bytecodes::_invokestatic:
 745     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 746       return variable_cell_count;
 747     } else {
 748       return CounterData::static_cell_count();
 749     }
 750   case Bytecodes::_goto:
 751   case Bytecodes::_goto_w:
 752   case Bytecodes::_jsr:
 753   case Bytecodes::_jsr_w:
 754     return JumpData::static_cell_count();
 755   case Bytecodes::_invokevirtual:
 756   case Bytecodes::_invokeinterface:
 757     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 758       return variable_cell_count;
 759     } else {
 760       return VirtualCallData::static_cell_count();
 761     }
 762   case Bytecodes::_invokedynamic:
 763     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 764       return variable_cell_count;
 765     } else {
 766       return CounterData::static_cell_count();
 767     }
 768   case Bytecodes::_ret:
 769     return RetData::static_cell_count();
 770   case Bytecodes::_ifeq:
 771   case Bytecodes::_ifne:
 772   case Bytecodes::_iflt:
 773   case Bytecodes::_ifge:
 774   case Bytecodes::_ifgt:
 775   case Bytecodes::_ifle:
 776   case Bytecodes::_if_icmpeq:
 777   case Bytecodes::_if_icmpne:
 778   case Bytecodes::_if_icmplt:
 779   case Bytecodes::_if_icmpge:
 780   case Bytecodes::_if_icmpgt:
 781   case Bytecodes::_if_icmple:
 782   case Bytecodes::_if_acmpeq:
 783   case Bytecodes::_if_acmpne:
 784   case Bytecodes::_ifnull:
 785   case Bytecodes::_ifnonnull:
 786     return BranchData::static_cell_count();
 787   case Bytecodes::_lookupswitch:
 788   case Bytecodes::_tableswitch:
 789     return variable_cell_count;
 790   default:
 791     return no_profile_data;
 792   }
 793 }
 794 
 795 // Compute the size of the profiling information corresponding to
 796 // the current bytecode.
 797 int MethodData::compute_data_size(BytecodeStream* stream) {
 798   int cell_count = bytecode_cell_count(stream->code());
 799   if (cell_count == no_profile_data) {
 800     return 0;
 801   }
 802   if (cell_count == variable_cell_count) {
 803     switch (stream->code()) {
 804     case Bytecodes::_lookupswitch:
 805     case Bytecodes::_tableswitch:
 806       cell_count = MultiBranchData::compute_cell_count(stream);
 807       break;
 808     case Bytecodes::_invokespecial:
 809     case Bytecodes::_invokestatic:
 810     case Bytecodes::_invokedynamic:
 811       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 812       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 813           profile_return_for_invoke(stream->method(), stream->bci())) {
 814         cell_count = CallTypeData::compute_cell_count(stream);
 815       } else {
 816         cell_count = CounterData::static_cell_count();
 817       }
 818       break;
 819     case Bytecodes::_invokevirtual:
 820     case Bytecodes::_invokeinterface: {
 821       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 822       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 823           profile_return_for_invoke(stream->method(), stream->bci())) {
 824         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 825       } else {
 826         cell_count = VirtualCallData::static_cell_count();
 827       }
 828       break;
 829     }
 830     default:
 831       fatal("unexpected bytecode for var length profile data");
 832     }
 833   }
 834   // Note:  cell_count might be zero, meaning that there is just
 835   //        a DataLayout header, with no extra cells.
 836   assert(cell_count >= 0, "sanity");
 837   return DataLayout::compute_size_in_bytes(cell_count);
 838 }
 839 
 840 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 841   // Bytecodes for which we may use speculation
 842   switch (code) {
 843   case Bytecodes::_checkcast:
 844   case Bytecodes::_instanceof:
 845   case Bytecodes::_aastore:
 846   case Bytecodes::_invokevirtual:
 847   case Bytecodes::_invokeinterface:
 848   case Bytecodes::_if_acmpeq:
 849   case Bytecodes::_if_acmpne:
 850   case Bytecodes::_ifnull:
 851   case Bytecodes::_ifnonnull:
 852   case Bytecodes::_invokestatic:
 853 #ifdef COMPILER2
 854     if (CompilerConfig::is_c2_enabled()) {
 855       return UseTypeSpeculation;
 856     }
 857 #endif
 858   default:
 859     return false;
 860   }
 861   return false;
 862 }
 863 
 864 #if INCLUDE_JVMCI
 865 
 866 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 867   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 868 }
 869 
 870 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 871   memcpy(data(), speculation, speculation_len);
 872 }
 873 
 874 // A heuristic check to detect nmethods that outlive a failed speculations list.
 875 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 876   jlong head = (jlong)(address) *failed_speculations_address;
 877   if ((head & 0x1) == 0x1) {
 878     stringStream st;
 879     if (nm != nullptr) {
 880       st.print("%d", nm->compile_id());
 881       Method* method = nm->method();
 882       st.print_raw("{");
 883       if (method != nullptr) {
 884         method->print_name(&st);
 885       } else {
 886         const char* jvmci_name = nm->jvmci_name();
 887         if (jvmci_name != nullptr) {
 888           st.print_raw(jvmci_name);
 889         }
 890       }
 891       st.print_raw("}");
 892     } else {
 893       st.print("<unknown>");
 894     }
 895     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 896   }
 897 }
 898 
 899 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 900   assert(failed_speculations_address != nullptr, "must be");
 901   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 902 
 903   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 904 
 905   FailedSpeculation** cursor = failed_speculations_address;
 906   FailedSpeculation* fs = nullptr;
 907   do {
 908     if (*cursor == nullptr) {
 909       if (fs == nullptr) {
 910         // lazily allocate FailedSpeculation
 911         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 912         if (fs == nullptr) {
 913           // no memory -> ignore failed speculation
 914           return false;
 915         }
 916         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 917       }
 918       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 919       if (old_fs == nullptr) {
 920         // Successfully appended fs to end of the list
 921         return true;
 922       }
 923     }
 924     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 925     // check if the current entry matches this thread's failed speculation
 926     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 927       if (fs != nullptr) {
 928         delete fs;
 929       }
 930       return false;
 931     }
 932     cursor = (*cursor)->next_adr();
 933   } while (true);
 934 }
 935 
 936 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 937   assert(failed_speculations_address != nullptr, "must be");
 938   FailedSpeculation* fs = *failed_speculations_address;
 939   while (fs != nullptr) {
 940     FailedSpeculation* next = fs->next();
 941     delete fs;
 942     fs = next;
 943   }
 944 
 945   // Write an unaligned value to failed_speculations_address to denote
 946   // that it is no longer a valid pointer. This is allows for the check
 947   // in add_failed_speculation against adding to a freed failed
 948   // speculations list.
 949   long* head = (long*) failed_speculations_address;
 950   (*head) = (*head) | 0x1;
 951 }
 952 #endif // INCLUDE_JVMCI
 953 
 954 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 955 #if INCLUDE_JVMCI
 956   if (ProfileTraps) {
 957     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 958     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 959 
 960     // Make sure we have a minimum number of extra data slots to
 961     // allocate SpeculativeTrapData entries. We would want to have one
 962     // entry per compilation that inlines this method and for which
 963     // some type speculation assumption fails. So the room we need for
 964     // the SpeculativeTrapData entries doesn't directly depend on the
 965     // size of the method. Because it's hard to estimate, we reserve
 966     // space for an arbitrary number of entries.
 967     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 968       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 969 
 970     return MAX2(extra_data_count, spec_data_count);
 971   } else {
 972     return 0;
 973   }
 974 #else // INCLUDE_JVMCI
 975   if (ProfileTraps) {
 976     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 977     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 978     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 979     int one_percent_of_data
 980       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 981     if (extra_data_count < one_percent_of_data)
 982       extra_data_count = one_percent_of_data;
 983     if (extra_data_count > empty_bc_count)
 984       extra_data_count = empty_bc_count;  // no need for more
 985 
 986     // Make sure we have a minimum number of extra data slots to
 987     // allocate SpeculativeTrapData entries. We would want to have one
 988     // entry per compilation that inlines this method and for which
 989     // some type speculation assumption fails. So the room we need for
 990     // the SpeculativeTrapData entries doesn't directly depend on the
 991     // size of the method. Because it's hard to estimate, we reserve
 992     // space for an arbitrary number of entries.
 993     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 994       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 995 
 996     return MAX2(extra_data_count, spec_data_count);
 997   } else {
 998     return 0;
 999   }
1000 #endif // INCLUDE_JVMCI
1001 }
1002 
1003 // Compute the size of the MethodData* necessary to store
1004 // profiling information about a given method.  Size is in bytes.
1005 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
1006   int data_size = 0;
1007   BytecodeStream stream(method);
1008   Bytecodes::Code c;
1009   int empty_bc_count = 0;  // number of bytecodes lacking data
1010   bool needs_speculative_traps = false;
1011   while ((c = stream.next()) >= 0) {
1012     int size_in_bytes = compute_data_size(&stream);
1013     data_size += size_in_bytes;
1014     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1015     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1016   }
1017   int object_size = in_bytes(data_offset()) + data_size;
1018 
1019   // Add some extra DataLayout cells (at least one) to track stray traps.
1020   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1021   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
1022 
1023   // Add a cell to record information about modified arguments.
1024   int arg_size = method->size_of_parameters();
1025   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1026 
1027   // Reserve room for an area of the MDO dedicated to profiling of
1028   // parameters
1029   int args_cell = ParametersTypeData::compute_cell_count(method());
1030   if (args_cell > 0) {
1031     object_size += DataLayout::compute_size_in_bytes(args_cell);
1032   }
1033 
1034   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1035     int num_exception_handlers = method()->exception_table_length();
1036     object_size += num_exception_handlers * single_exception_handler_data_size();
1037   }
1038 
1039   return object_size;
1040 }
1041 
1042 // Compute the size of the MethodData* necessary to store
1043 // profiling information about a given method.  Size is in words
1044 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1045   int byte_size = compute_allocation_size_in_bytes(method);
1046   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1047   return align_metadata_size(word_size);
1048 }
1049 
1050 // Initialize an individual data segment.  Returns the size of
1051 // the segment in bytes.
1052 int MethodData::initialize_data(BytecodeStream* stream,
1053                                        int data_index) {
1054   int cell_count = -1;
1055   u1 tag = DataLayout::no_tag;
1056   DataLayout* data_layout = data_layout_at(data_index);
1057   Bytecodes::Code c = stream->code();
1058   switch (c) {
1059   case Bytecodes::_checkcast:
1060   case Bytecodes::_instanceof:
1061   case Bytecodes::_aastore:
1062     if (TypeProfileCasts) {
1063       cell_count = ReceiverTypeData::static_cell_count();
1064       tag = DataLayout::receiver_type_data_tag;
1065     } else {
1066       cell_count = BitData::static_cell_count();
1067       tag = DataLayout::bit_data_tag;
1068     }
1069     break;
1070   case Bytecodes::_invokespecial:
1071   case Bytecodes::_invokestatic: {
1072     int counter_data_cell_count = CounterData::static_cell_count();
1073     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1074         profile_return_for_invoke(stream->method(), stream->bci())) {
1075       cell_count = CallTypeData::compute_cell_count(stream);
1076     } else {
1077       cell_count = counter_data_cell_count;
1078     }
1079     if (cell_count > counter_data_cell_count) {
1080       tag = DataLayout::call_type_data_tag;
1081     } else {
1082       tag = DataLayout::counter_data_tag;
1083     }
1084     break;
1085   }
1086   case Bytecodes::_goto:
1087   case Bytecodes::_goto_w:
1088   case Bytecodes::_jsr:
1089   case Bytecodes::_jsr_w:
1090     cell_count = JumpData::static_cell_count();
1091     tag = DataLayout::jump_data_tag;
1092     break;
1093   case Bytecodes::_invokevirtual:
1094   case Bytecodes::_invokeinterface: {
1095     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1096     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1097         profile_return_for_invoke(stream->method(), stream->bci())) {
1098       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1099     } else {
1100       cell_count = virtual_call_data_cell_count;
1101     }
1102     if (cell_count > virtual_call_data_cell_count) {
1103       tag = DataLayout::virtual_call_type_data_tag;
1104     } else {
1105       tag = DataLayout::virtual_call_data_tag;
1106     }
1107     break;
1108   }
1109   case Bytecodes::_invokedynamic: {
1110     // %%% should make a type profile for any invokedynamic that takes a ref argument
1111     int counter_data_cell_count = CounterData::static_cell_count();
1112     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1113         profile_return_for_invoke(stream->method(), stream->bci())) {
1114       cell_count = CallTypeData::compute_cell_count(stream);
1115     } else {
1116       cell_count = counter_data_cell_count;
1117     }
1118     if (cell_count > counter_data_cell_count) {
1119       tag = DataLayout::call_type_data_tag;
1120     } else {
1121       tag = DataLayout::counter_data_tag;
1122     }
1123     break;
1124   }
1125   case Bytecodes::_ret:
1126     cell_count = RetData::static_cell_count();
1127     tag = DataLayout::ret_data_tag;
1128     break;
1129   case Bytecodes::_ifeq:
1130   case Bytecodes::_ifne:
1131   case Bytecodes::_iflt:
1132   case Bytecodes::_ifge:
1133   case Bytecodes::_ifgt:
1134   case Bytecodes::_ifle:
1135   case Bytecodes::_if_icmpeq:
1136   case Bytecodes::_if_icmpne:
1137   case Bytecodes::_if_icmplt:
1138   case Bytecodes::_if_icmpge:
1139   case Bytecodes::_if_icmpgt:
1140   case Bytecodes::_if_icmple:
1141   case Bytecodes::_if_acmpeq:
1142   case Bytecodes::_if_acmpne:
1143   case Bytecodes::_ifnull:
1144   case Bytecodes::_ifnonnull:
1145     cell_count = BranchData::static_cell_count();
1146     tag = DataLayout::branch_data_tag;
1147     break;
1148   case Bytecodes::_lookupswitch:
1149   case Bytecodes::_tableswitch:
1150     cell_count = MultiBranchData::compute_cell_count(stream);
1151     tag = DataLayout::multi_branch_data_tag;
1152     break;
1153   default:
1154     break;
1155   }
1156   assert(tag == DataLayout::multi_branch_data_tag ||
1157          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1158           (tag == DataLayout::call_type_data_tag ||
1159            tag == DataLayout::counter_data_tag ||
1160            tag == DataLayout::virtual_call_type_data_tag ||
1161            tag == DataLayout::virtual_call_data_tag)) ||
1162          cell_count == bytecode_cell_count(c), "cell counts must agree");
1163   if (cell_count >= 0) {
1164     assert(tag != DataLayout::no_tag, "bad tag");
1165     assert(bytecode_has_profile(c), "agree w/ BHP");
1166     data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1167     return DataLayout::compute_size_in_bytes(cell_count);
1168   } else {
1169     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1170     return 0;
1171   }
1172 }
1173 
1174 // Get the data at an arbitrary (sort of) data index.
1175 ProfileData* MethodData::data_at(int data_index) const {
1176   if (out_of_bounds(data_index)) {
1177     return nullptr;
1178   }
1179   DataLayout* data_layout = data_layout_at(data_index);
1180   return data_layout->data_in();
1181 }
1182 
1183 int DataLayout::cell_count() {
1184   switch (tag()) {
1185   case DataLayout::no_tag:
1186   default:
1187     ShouldNotReachHere();
1188     return 0;
1189   case DataLayout::bit_data_tag:
1190     return BitData::static_cell_count();
1191   case DataLayout::counter_data_tag:
1192     return CounterData::static_cell_count();
1193   case DataLayout::jump_data_tag:
1194     return JumpData::static_cell_count();
1195   case DataLayout::receiver_type_data_tag:
1196     return ReceiverTypeData::static_cell_count();
1197   case DataLayout::virtual_call_data_tag:
1198     return VirtualCallData::static_cell_count();
1199   case DataLayout::ret_data_tag:
1200     return RetData::static_cell_count();
1201   case DataLayout::branch_data_tag:
1202     return BranchData::static_cell_count();
1203   case DataLayout::multi_branch_data_tag:
1204     return ((new MultiBranchData(this))->cell_count());
1205   case DataLayout::arg_info_data_tag:
1206     return ((new ArgInfoData(this))->cell_count());
1207   case DataLayout::call_type_data_tag:
1208     return ((new CallTypeData(this))->cell_count());
1209   case DataLayout::virtual_call_type_data_tag:
1210     return ((new VirtualCallTypeData(this))->cell_count());
1211   case DataLayout::parameters_type_data_tag:
1212     return ((new ParametersTypeData(this))->cell_count());
1213   case DataLayout::speculative_trap_data_tag:
1214     return SpeculativeTrapData::static_cell_count();
1215   }
1216 }
1217 ProfileData* DataLayout::data_in() {
1218   switch (tag()) {
1219   case DataLayout::no_tag:
1220   default:
1221     ShouldNotReachHere();
1222     return nullptr;
1223   case DataLayout::bit_data_tag:
1224     return new BitData(this);
1225   case DataLayout::counter_data_tag:
1226     return new CounterData(this);
1227   case DataLayout::jump_data_tag:
1228     return new JumpData(this);
1229   case DataLayout::receiver_type_data_tag:
1230     return new ReceiverTypeData(this);
1231   case DataLayout::virtual_call_data_tag:
1232     return new VirtualCallData(this);
1233   case DataLayout::ret_data_tag:
1234     return new RetData(this);
1235   case DataLayout::branch_data_tag:
1236     return new BranchData(this);
1237   case DataLayout::multi_branch_data_tag:
1238     return new MultiBranchData(this);
1239   case DataLayout::arg_info_data_tag:
1240     return new ArgInfoData(this);
1241   case DataLayout::call_type_data_tag:
1242     return new CallTypeData(this);
1243   case DataLayout::virtual_call_type_data_tag:
1244     return new VirtualCallTypeData(this);
1245   case DataLayout::parameters_type_data_tag:
1246     return new ParametersTypeData(this);
1247   case DataLayout::speculative_trap_data_tag:
1248     return new SpeculativeTrapData(this);
1249   }
1250 }
1251 
1252 // Iteration over data.
1253 ProfileData* MethodData::next_data(ProfileData* current) const {
1254   int current_index = dp_to_di(current->dp());
1255   int next_index = current_index + current->size_in_bytes();
1256   ProfileData* next = data_at(next_index);
1257   return next;
1258 }
1259 
1260 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1261   int current_index = dp_to_di((address)current);
1262   int next_index = current_index + current->size_in_bytes();
1263   if (out_of_bounds(next_index)) {
1264     return nullptr;
1265   }
1266   DataLayout* next = data_layout_at(next_index);
1267   return next;
1268 }
1269 
1270 // Give each of the data entries a chance to perform specific
1271 // data initialization.
1272 void MethodData::post_initialize(BytecodeStream* stream) {
1273   ResourceMark rm;
1274   ProfileData* data;
1275   for (data = first_data(); is_valid(data); data = next_data(data)) {
1276     stream->set_start(data->bci());
1277     stream->next();
1278     data->post_initialize(stream, this);
1279   }
1280   if (_parameters_type_data_di != no_parameters) {
1281     parameters_type_data()->post_initialize(nullptr, this);
1282   }
1283 }
1284 
1285 // Initialize the MethodData* corresponding to a given method.
1286 MethodData::MethodData(const methodHandle& method)
1287   : _method(method()),
1288     // Holds Compile_lock
1289     _compiler_counters(),
1290     _parameters_type_data_di(parameters_uninitialized) {
1291     _extra_data_lock = nullptr;
1292     initialize();
1293 }
1294 
1295 MethodData::MethodData() {
1296   assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1297 }
1298 
1299 // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
1300 // not being accessed while the contents are being rewritten.
1301 class VM_ReinitializeMDO: public VM_Operation {
1302  private:
1303   MethodData* _mdo;
1304  public:
1305   VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1306   VMOp_Type type() const                         { return VMOp_ReinitializeMDO; }
1307   void doit() {
1308     // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1309     // over a safepoint.  This means that we don't actually need to acquire the lock.
1310     _mdo->initialize();
1311   }
1312   bool allow_nested_vm_operations() const        { return true; }
1313 };
1314 
1315 void MethodData::reinitialize() {
1316   VM_ReinitializeMDO op(this);
1317   VMThread::execute(&op);
1318 }
1319 
1320 
1321 void MethodData::initialize() {
1322   Thread* thread = Thread::current();
1323   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1324   ResourceMark rm(thread);
1325 
1326   init();
1327 
1328   // Go through the bytecodes and allocate and initialize the
1329   // corresponding data cells.
1330   int data_size = 0;
1331   int empty_bc_count = 0;  // number of bytecodes lacking data
1332   _data[0] = 0;  // apparently not set below.
1333   BytecodeStream stream(methodHandle(thread, method()));
1334   Bytecodes::Code c;
1335   bool needs_speculative_traps = false;
1336   while ((c = stream.next()) >= 0) {
1337     int size_in_bytes = initialize_data(&stream, data_size);
1338     data_size += size_in_bytes;
1339     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1340     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1341   }
1342   _data_size = data_size;
1343   int object_size = in_bytes(data_offset()) + data_size;
1344 
1345   // Add some extra DataLayout cells (at least one) to track stray traps.
1346   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1347   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1348 
1349   // Let's zero the space for the extra data
1350   if (extra_size > 0) {
1351     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1352   }
1353 
1354   // Add a cell to record information about modified arguments.
1355   // Set up _args_modified array after traps cells so that
1356   // the code for traps cells works.
1357   DataLayout *dp = data_layout_at(data_size + extra_size);
1358 
1359   int arg_size = method()->size_of_parameters();
1360   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1361 
1362   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1363   object_size += extra_size + arg_data_size;
1364 
1365   int parms_cell = ParametersTypeData::compute_cell_count(method());
1366   // If we are profiling parameters, we reserved an area near the end
1367   // of the MDO after the slots for bytecodes (because there's no bci
1368   // for method entry so they don't fit with the framework for the
1369   // profiling of bytecodes). We store the offset within the MDO of
1370   // this area (or -1 if no parameter is profiled)
1371   int parm_data_size = 0;
1372   if (parms_cell > 0) {
1373     parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1374     object_size += parm_data_size;
1375     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1376     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1377     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1378   } else {
1379     _parameters_type_data_di = no_parameters;
1380   }
1381 
1382   _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1383   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1384     int num_exception_handlers = method()->exception_table_length();
1385     object_size += num_exception_handlers * single_exception_handler_data_size();
1386     ExceptionTableElement* exception_handlers = method()->exception_table_start();
1387     for (int i = 0; i < num_exception_handlers; i++) {
1388       DataLayout *dp = exception_handler_data_at(i);
1389       dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1390     }
1391   }
1392 
1393   // Set an initial hint. Don't use set_hint_di() because
1394   // first_di() may be out of bounds if data_size is 0.
1395   // In that situation, _hint_di is never used, but at
1396   // least well-defined.
1397   _hint_di = first_di();
1398 
1399   post_initialize(&stream);
1400 
1401   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1402   set_size(object_size);
1403 }
1404 
1405 void MethodData::init() {
1406   _compiler_counters = CompilerCounters(); // reset compiler counters
1407   _invocation_counter.init();
1408   _backedge_counter.init();
1409   _invocation_counter_start = 0;
1410   _backedge_counter_start = 0;
1411 
1412   // Set per-method invoke- and backedge mask.
1413   double scale = 1.0;
1414   methodHandle mh(Thread::current(), _method);
1415   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1416   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1417   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1418 
1419   _tenure_traps = 0;
1420   _num_loops = 0;
1421   _num_blocks = 0;
1422   _would_profile = unknown;
1423 
1424 #if INCLUDE_JVMCI
1425   _jvmci_ir_size = 0;
1426   _failed_speculations = nullptr;
1427 #endif
1428 
1429   // Initialize escape flags.
1430   clear_escape_info();
1431 }
1432 
1433 bool MethodData::is_mature() const {
1434   return CompilationPolicy::is_mature((MethodData*)this);
1435 }
1436 
1437 // Translate a bci to its corresponding data index (di).
1438 address MethodData::bci_to_dp(int bci) {
1439   ResourceMark rm;
1440   DataLayout* data = data_layout_before(bci);
1441   DataLayout* prev = nullptr;
1442   for ( ; is_valid(data); data = next_data_layout(data)) {
1443     if (data->bci() >= bci) {
1444       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1445       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1446       return (address)data;
1447     }
1448     prev = data;
1449   }
1450   return (address)limit_data_position();
1451 }
1452 
1453 // Translate a bci to its corresponding data, or null.
1454 ProfileData* MethodData::bci_to_data(int bci) {
1455   check_extra_data_locked();
1456 
1457   DataLayout* data = data_layout_before(bci);
1458   for ( ; is_valid(data); data = next_data_layout(data)) {
1459     if (data->bci() == bci) {
1460       set_hint_di(dp_to_di((address)data));
1461       return data->data_in();
1462     } else if (data->bci() > bci) {
1463       break;
1464     }
1465   }
1466   return bci_to_extra_data(bci, nullptr, false);
1467 }
1468 
1469 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1470   assert(ProfileExceptionHandlers, "not profiling");
1471   for (int i = 0; i < num_exception_handler_data(); i++) {
1472     DataLayout* exception_handler_data = exception_handler_data_at(i);
1473     if (exception_handler_data->bci() == bci) {
1474       return exception_handler_data;
1475     }
1476   }
1477   return nullptr;
1478 }
1479 
1480 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1481   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1482   return data != nullptr ? new BitData(data) : nullptr;
1483 }
1484 
1485 BitData MethodData::exception_handler_bci_to_data(int bci) {
1486   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1487   assert(data != nullptr, "invalid bci");
1488   return BitData(data);
1489 }
1490 
1491 DataLayout* MethodData::next_extra(DataLayout* dp) {
1492   int nb_cells = 0;
1493   switch(dp->tag()) {
1494   case DataLayout::bit_data_tag:
1495   case DataLayout::no_tag:
1496     nb_cells = BitData::static_cell_count();
1497     break;
1498   case DataLayout::speculative_trap_data_tag:
1499     nb_cells = SpeculativeTrapData::static_cell_count();
1500     break;
1501   default:
1502     fatal("unexpected tag %d", dp->tag());
1503   }
1504   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1505 }
1506 
1507 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
1508   check_extra_data_locked();
1509 
1510   DataLayout* end = args_data_limit();
1511 
1512   for (;; dp = next_extra(dp)) {
1513     assert(dp < end, "moved past end of extra data");
1514     // No need for "Atomic::load_acquire" ops,
1515     // since the data structure is monotonic.
1516     switch(dp->tag()) {
1517     case DataLayout::no_tag:
1518       return nullptr;
1519     case DataLayout::arg_info_data_tag:
1520       dp = end;
1521       return nullptr; // ArgInfoData is at the end of extra data section.
1522     case DataLayout::bit_data_tag:
1523       if (m == nullptr && dp->bci() == bci) {
1524         return new BitData(dp);
1525       }
1526       break;
1527     case DataLayout::speculative_trap_data_tag:
1528       if (m != nullptr) {
1529         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1530         if (dp->bci() == bci) {
1531           assert(data->method() != nullptr, "method must be set");
1532           if (data->method() == m) {
1533             return data;
1534           }
1535         }
1536       }
1537       break;
1538     default:
1539       fatal("unexpected tag %d", dp->tag());
1540     }
1541   }
1542   return nullptr;
1543 }
1544 
1545 
1546 // Translate a bci to its corresponding extra data, or null.
1547 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1548   check_extra_data_locked();
1549 
1550   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1551   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1552          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1553          "code needs to be adjusted");
1554 
1555   // Do not create one of these if method has been redefined.
1556   if (m != nullptr && m->is_old()) {
1557     return nullptr;
1558   }
1559 
1560   DataLayout* dp  = extra_data_base();
1561   DataLayout* end = args_data_limit();
1562 
1563   // Find if already exists
1564   ProfileData* result = bci_to_extra_data_find(bci, m, dp);
1565   if (result != nullptr || dp >= end) {
1566     return result;
1567   }
1568 
1569   if (create_if_missing) {
1570     // Not found -> Allocate
1571     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1572     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1573     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1574     // SpeculativeTrapData is 2 slots. Make sure we have room.
1575     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1576       return nullptr;
1577     }
1578     DataLayout temp;
1579     temp.initialize(tag, checked_cast<u2>(bci), 0);
1580 
1581     dp->set_header(temp.header());
1582     assert(dp->tag() == tag, "sane");
1583     assert(dp->bci() == bci, "no concurrent allocation");
1584     if (tag == DataLayout::bit_data_tag) {
1585       return new BitData(dp);
1586     } else {
1587       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1588       data->set_method(m);
1589       return data;
1590     }
1591   }
1592   return nullptr;
1593 }
1594 
1595 ArgInfoData *MethodData::arg_info() {
1596   DataLayout* dp    = extra_data_base();
1597   DataLayout* end   = args_data_limit();
1598   for (; dp < end; dp = next_extra(dp)) {
1599     if (dp->tag() == DataLayout::arg_info_data_tag)
1600       return new ArgInfoData(dp);
1601   }
1602   return nullptr;
1603 }
1604 
1605 // Printing
1606 
1607 void MethodData::print_on(outputStream* st) const {
1608   assert(is_methodData(), "should be method data");
1609   st->print("method data for ");
1610   method()->print_value_on(st);
1611   st->cr();
1612   print_data_on(st);
1613 }
1614 
1615 void MethodData::print_value_on(outputStream* st) const {
1616   assert(is_methodData(), "should be method data");
1617   st->print("method data for ");
1618   method()->print_value_on(st);
1619 }
1620 
1621 void MethodData::print_data_on(outputStream* st) const {
1622   Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
1623   ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
1624                             Mutex::_no_safepoint_check_flag);
1625   ResourceMark rm;
1626   ProfileData* data = first_data();
1627   if (_parameters_type_data_di != no_parameters) {
1628     parameters_type_data()->print_data_on(st);
1629   }
1630   for ( ; is_valid(data); data = next_data(data)) {
1631     st->print("%d", dp_to_di(data->dp()));
1632     st->fill_to(6);
1633     data->print_data_on(st, this);
1634   }
1635 
1636   st->print_cr("--- Extra data:");
1637   DataLayout* dp    = extra_data_base();
1638   DataLayout* end   = args_data_limit();
1639   for (;; dp = next_extra(dp)) {
1640     assert(dp < end, "moved past end of extra data");
1641     // No need for "Atomic::load_acquire" ops,
1642     // since the data structure is monotonic.
1643     switch(dp->tag()) {
1644     case DataLayout::no_tag:
1645       continue;
1646     case DataLayout::bit_data_tag:
1647       data = new BitData(dp);
1648       break;
1649     case DataLayout::speculative_trap_data_tag:
1650       data = new SpeculativeTrapData(dp);
1651       break;
1652     case DataLayout::arg_info_data_tag:
1653       data = new ArgInfoData(dp);
1654       dp = end; // ArgInfoData is at the end of extra data section.
1655       break;
1656     default:
1657       fatal("unexpected tag %d", dp->tag());
1658     }
1659     st->print("%d", dp_to_di(data->dp()));
1660     st->fill_to(6);
1661     data->print_data_on(st);
1662     if (dp >= end) return;
1663   }
1664 }
1665 
1666 // Verification
1667 
1668 void MethodData::verify_on(outputStream* st) {
1669   guarantee(is_methodData(), "object must be method data");
1670   // guarantee(m->is_perm(), "should be in permspace");
1671   this->verify_data_on(st);
1672 }
1673 
1674 void MethodData::verify_data_on(outputStream* st) {
1675   NEEDS_CLEANUP;
1676   // not yet implemented.
1677 }
1678 
1679 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1680   if (m->is_compiled_lambda_form()) {
1681     return true;
1682   }
1683 
1684   Bytecode_invoke inv(m , bci);
1685   return inv.is_invokedynamic() || inv.is_invokehandle();
1686 }
1687 
1688 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1689   Bytecode_invoke inv(m , bci);
1690   if (inv.is_invokevirtual()) {
1691     Symbol* klass = inv.klass();
1692     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1693         klass == vmSymbols::sun_misc_Unsafe() ||
1694         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1695       Symbol* name = inv.name();
1696       if (name->starts_with("get") || name->starts_with("put")) {
1697         return true;
1698       }
1699     }
1700   }
1701   return false;
1702 }
1703 
1704 int MethodData::profile_arguments_flag() {
1705   return TypeProfileLevel % 10;
1706 }
1707 
1708 bool MethodData::profile_arguments() {
1709   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1710 }
1711 
1712 bool MethodData::profile_arguments_jsr292_only() {
1713   return profile_arguments_flag() == type_profile_jsr292;
1714 }
1715 
1716 bool MethodData::profile_all_arguments() {
1717   return profile_arguments_flag() == type_profile_all;
1718 }
1719 
1720 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1721   if (!profile_arguments()) {
1722     return false;
1723   }
1724 
1725   if (profile_all_arguments()) {
1726     return true;
1727   }
1728 
1729   if (profile_unsafe(m, bci)) {
1730     return true;
1731   }
1732 
1733   assert(profile_arguments_jsr292_only(), "inconsistent");
1734   return profile_jsr292(m, bci);
1735 }
1736 
1737 int MethodData::profile_return_flag() {
1738   return (TypeProfileLevel % 100) / 10;
1739 }
1740 
1741 bool MethodData::profile_return() {
1742   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1743 }
1744 
1745 bool MethodData::profile_return_jsr292_only() {
1746   return profile_return_flag() == type_profile_jsr292;
1747 }
1748 
1749 bool MethodData::profile_all_return() {
1750   return profile_return_flag() == type_profile_all;
1751 }
1752 
1753 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1754   if (!profile_return()) {
1755     return false;
1756   }
1757 
1758   if (profile_all_return()) {
1759     return true;
1760   }
1761 
1762   assert(profile_return_jsr292_only(), "inconsistent");
1763   return profile_jsr292(m, bci);
1764 }
1765 
1766 int MethodData::profile_parameters_flag() {
1767   return TypeProfileLevel / 100;
1768 }
1769 
1770 bool MethodData::profile_parameters() {
1771   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1772 }
1773 
1774 bool MethodData::profile_parameters_jsr292_only() {
1775   return profile_parameters_flag() == type_profile_jsr292;
1776 }
1777 
1778 bool MethodData::profile_all_parameters() {
1779   return profile_parameters_flag() == type_profile_all;
1780 }
1781 
1782 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1783   if (!profile_parameters()) {
1784     return false;
1785   }
1786 
1787   if (profile_all_parameters()) {
1788     return true;
1789   }
1790 
1791   assert(profile_parameters_jsr292_only(), "inconsistent");
1792   return m->is_compiled_lambda_form();
1793 }
1794 
1795 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1796   log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1797   it->push(&_method);
1798   if (_parameters_type_data_di != no_parameters) {
1799     parameters_type_data()->metaspace_pointers_do(it);
1800   }
1801   for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1802     data->metaspace_pointers_do(it);
1803   }
1804   for (DataLayout* dp = extra_data_base();
1805                    dp < extra_data_limit();
1806                    dp = MethodData::next_extra(dp)) {
1807     if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1808       ResourceMark rm;
1809       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1810       data->metaspace_pointers_do(it);
1811     } else if (dp->tag() == DataLayout::no_tag ||
1812                dp->tag() == DataLayout::arg_info_data_tag) {
1813       break;
1814     }
1815   }
1816 }
1817 
1818 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1819   check_extra_data_locked();
1820 
1821   if (shift == 0) {
1822     return;
1823   }
1824   if (!reset) {
1825     // Move all cells of trap entry at dp left by "shift" cells
1826     intptr_t* start = (intptr_t*)dp;
1827     intptr_t* end = (intptr_t*)next_extra(dp);
1828     for (intptr_t* ptr = start; ptr < end; ptr++) {
1829       *(ptr-shift) = *ptr;
1830     }
1831   } else {
1832     // Reset "shift" cells stopping at dp
1833     intptr_t* start = ((intptr_t*)dp) - shift;
1834     intptr_t* end = (intptr_t*)dp;
1835     for (intptr_t* ptr = start; ptr < end; ptr++) {
1836       *ptr = 0;
1837     }
1838   }
1839 }
1840 
1841 // Check for entries that reference an unloaded method
1842 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1843   bool _always_clean;
1844 public:
1845   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1846   bool is_live(Method* m) {
1847     if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1848       return true; // TODO: treat as unloaded instead?
1849     }
1850     return !(_always_clean) && m->method_holder()->is_loader_alive();
1851   }
1852 };
1853 
1854 // Check for entries that reference a redefined method
1855 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1856 public:
1857   CleanExtraDataMethodClosure() {}
1858   bool is_live(Method* m) { return !m->is_old(); }
1859 };
1860 
1861 Mutex* MethodData::extra_data_lock() {
1862   Mutex* lock = Atomic::load(&_extra_data_lock);
1863   if (lock == nullptr) {
1864     // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint
1865     lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock");
1866     Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1867     if (old != nullptr) {
1868       // Another thread created the lock before us. Use that lock instead.
1869       delete lock;
1870       return old;
1871     }
1872   }
1873   return lock;
1874 }
1875 
1876 // Remove SpeculativeTrapData entries that reference an unloaded or
1877 // redefined method
1878 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1879   check_extra_data_locked();
1880 
1881   DataLayout* dp  = extra_data_base();
1882   DataLayout* end = args_data_limit();
1883 
1884   int shift = 0;
1885   for (; dp < end; dp = next_extra(dp)) {
1886     switch(dp->tag()) {
1887     case DataLayout::speculative_trap_data_tag: {
1888       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1889       Method* m = data->method();
1890       assert(m != nullptr, "should have a method");
1891       if (is_excluded(m->method_holder()) || !cl->is_live(m)) {
1892         // "shift" accumulates the number of cells for dead
1893         // SpeculativeTrapData entries that have been seen so
1894         // far. Following entries must be shifted left by that many
1895         // cells to remove the dead SpeculativeTrapData entries.
1896         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1897       } else {
1898         // Shift this entry left if it follows dead
1899         // SpeculativeTrapData entries
1900         clean_extra_data_helper(dp, shift);
1901       }
1902       break;
1903     }
1904     case DataLayout::bit_data_tag:
1905       // Shift this entry left if it follows dead SpeculativeTrapData
1906       // entries
1907       clean_extra_data_helper(dp, shift);
1908       continue;
1909     case DataLayout::no_tag:
1910     case DataLayout::arg_info_data_tag:
1911       // We are at end of the live trap entries. The previous "shift"
1912       // cells contain entries that are either dead or were shifted
1913       // left. They need to be reset to no_tag
1914       clean_extra_data_helper(dp, shift, true);
1915       return;
1916     default:
1917       fatal("unexpected tag %d", dp->tag());
1918     }
1919   }
1920 }
1921 
1922 // Verify there's no unloaded or redefined method referenced by a
1923 // SpeculativeTrapData entry
1924 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1925   check_extra_data_locked();
1926 
1927 #ifdef ASSERT
1928   DataLayout* dp  = extra_data_base();
1929   DataLayout* end = args_data_limit();
1930 
1931   for (; dp < end; dp = next_extra(dp)) {
1932     switch(dp->tag()) {
1933     case DataLayout::speculative_trap_data_tag: {
1934       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1935       Method* m = data->method();
1936       assert(m != nullptr && cl->is_live(m), "Method should exist");
1937       break;
1938     }
1939     case DataLayout::bit_data_tag:
1940       continue;
1941     case DataLayout::no_tag:
1942     case DataLayout::arg_info_data_tag:
1943       return;
1944     default:
1945       fatal("unexpected tag %d", dp->tag());
1946     }
1947   }
1948 #endif
1949 }
1950 
1951 void MethodData::clean_method_data(bool always_clean) {
1952   ResourceMark rm;
1953   for (ProfileData* data = first_data();
1954        is_valid(data);
1955        data = next_data(data)) {
1956     data->clean_weak_klass_links(always_clean);
1957   }
1958   ParametersTypeData* parameters = parameters_type_data();
1959   if (parameters != nullptr) {
1960     parameters->clean_weak_klass_links(always_clean);
1961   }
1962 
1963   CleanExtraDataKlassClosure cl(always_clean);
1964 
1965   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1966   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1967 
1968   clean_extra_data(&cl);
1969   verify_extra_data_clean(&cl);
1970 }
1971 
1972 // This is called during redefinition to clean all "old" redefined
1973 // methods out of MethodData for all methods.
1974 void MethodData::clean_weak_method_links() {
1975   ResourceMark rm;
1976   CleanExtraDataMethodClosure cl;
1977 
1978   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1979   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1980 
1981   clean_extra_data(&cl);
1982   verify_extra_data_clean(&cl);
1983 }
1984 
1985 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1986   release_C_heap_structures();
1987 }
1988 
1989 void MethodData::release_C_heap_structures() {
1990 #if INCLUDE_JVMCI
1991   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1992 #endif
1993 }
1994 
1995 #if INCLUDE_CDS
1996 void MethodData::remove_unshareable_info() {
1997   _extra_data_lock = nullptr;
1998 }
1999 
2000 void MethodData::restore_unshareable_info(TRAPS) {
2001   //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
2002 }
2003 #endif // INCLUDE_CDS
2004        
2005 #ifdef ASSERT
2006 void MethodData::check_extra_data_locked() const {
2007     // Cast const away, just to be able to verify the lock
2008     // Usually we only want non-const accesses on the lock,
2009     // so this here is an exception.
2010     MethodData* self = (MethodData*)this;
2011     assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
2012     assert(!Thread::current()->is_Java_thread() ||
2013            JavaThread::current()->is_in_no_safepoint_scope(),
2014            "JavaThread must have NoSafepointVerifier inside lock scope");
2015 }
2016 #endif