1 /*
   2  * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/cdsConfig.hpp"
  27 #include "ci/ciMethodData.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "interpreter/bytecode.hpp"
  33 #include "interpreter/bytecodeStream.hpp"
  34 #include "interpreter/linkResolver.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/methodData.inline.hpp"
  39 #include "prims/jvmtiRedefineClasses.hpp"
  40 #include "runtime/atomic.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/orderAccess.hpp"
  44 #include "runtime/safepointVerifiers.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "utilities/align.hpp"
  47 #include "utilities/checkedCast.hpp"
  48 #include "utilities/copy.hpp"
  49 
  50 // ==================================================================
  51 // DataLayout
  52 //
  53 // Overlay for generic profiling data.
  54 
  55 // Some types of data layouts need a length field.
  56 bool DataLayout::needs_array_len(u1 tag) {
  57   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  58 }
  59 
  60 // Perform generic initialization of the data.  More specific
  61 // initialization occurs in overrides of ProfileData::post_initialize.
  62 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  63   DataLayout temp;
  64   temp._header._bits = (intptr_t)0;
  65   temp._header._struct._tag = tag;
  66   temp._header._struct._bci = bci;
  67   // Write the header using a single intptr_t write.  This ensures that if the layout is
  68   // reinitialized readers will never see the transient state where the header is 0.
  69   _header = temp._header;
  70 
  71   for (int i = 0; i < cell_count; i++) {
  72     set_cell_at(i, (intptr_t)0);
  73   }
  74   if (needs_array_len(tag)) {
  75     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  76   }
  77   if (tag == call_type_data_tag) {
  78     CallTypeData::initialize(this, cell_count);
  79   } else if (tag == virtual_call_type_data_tag) {
  80     VirtualCallTypeData::initialize(this, cell_count);
  81   }
  82 }
  83 
  84 void DataLayout::clean_weak_klass_links(bool always_clean) {
  85   ResourceMark m;
  86   data_in()->clean_weak_klass_links(always_clean);
  87 }
  88 
  89 
  90 // ==================================================================
  91 // ProfileData
  92 //
  93 // A ProfileData object is created to refer to a section of profiling
  94 // data in a structured way.
  95 
  96 // Constructor for invalid ProfileData.
  97 ProfileData::ProfileData() {
  98   _data = nullptr;
  99 }
 100 
 101 char* ProfileData::print_data_on_helper(const MethodData* md) const {
 102   DataLayout* dp  = md->extra_data_base();
 103   DataLayout* end = md->args_data_limit();
 104   stringStream ss;
 105   for (;; dp = MethodData::next_extra(dp)) {
 106     assert(dp < end, "moved past end of extra data");
 107     switch(dp->tag()) {
 108     case DataLayout::speculative_trap_data_tag:
 109       if (dp->bci() == bci()) {
 110         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 111         int trap = data->trap_state();
 112         char buf[100];
 113         ss.print("trap/");
 114         data->method()->print_short_name(&ss);
 115         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 116       }
 117       break;
 118     case DataLayout::bit_data_tag:
 119       break;
 120     case DataLayout::no_tag:
 121     case DataLayout::arg_info_data_tag:
 122       return ss.as_string();
 123       break;
 124     default:
 125       fatal("unexpected tag %d", dp->tag());
 126     }
 127   }
 128   return nullptr;
 129 }
 130 
 131 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 132   print_data_on(st, print_data_on_helper(md));
 133 }
 134 
 135 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 136   st->print("bci: %d ", bci());
 137   st->fill_to(tab_width_one + 1);
 138   st->print("%s", name);
 139   tab(st);
 140   int trap = trap_state();
 141   if (trap != 0) {
 142     char buf[100];
 143     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 144   }
 145   if (extra != nullptr) {
 146     st->print("%s", extra);
 147   }
 148   int flags = data()->flags();
 149   if (flags != 0) {
 150     st->print("flags(%d) ", flags);
 151   }
 152 }
 153 
 154 void ProfileData::tab(outputStream* st, bool first) const {
 155   st->fill_to(first ? tab_width_one : tab_width_two);
 156 }
 157 
 158 // ==================================================================
 159 // BitData
 160 //
 161 // A BitData corresponds to a one-bit flag.  This is used to indicate
 162 // whether a checkcast bytecode has seen a null value.
 163 
 164 
 165 void BitData::print_data_on(outputStream* st, const char* extra) const {
 166   print_shared(st, "BitData", extra);
 167   st->cr();
 168 }
 169 
 170 // ==================================================================
 171 // CounterData
 172 //
 173 // A CounterData corresponds to a simple counter.
 174 
 175 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 176   print_shared(st, "CounterData", extra);
 177   st->print_cr("count(%u)", count());
 178 }
 179 
 180 // ==================================================================
 181 // JumpData
 182 //
 183 // A JumpData is used to access profiling information for a direct
 184 // branch.  It is a counter, used for counting the number of branches,
 185 // plus a data displacement, used for realigning the data pointer to
 186 // the corresponding target bci.
 187 
 188 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 189   assert(stream->bci() == bci(), "wrong pos");
 190   int target;
 191   Bytecodes::Code c = stream->code();
 192   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 193     target = stream->dest_w();
 194   } else {
 195     target = stream->dest();
 196   }
 197   int my_di = mdo->dp_to_di(dp());
 198   int target_di = mdo->bci_to_di(target);
 199   int offset = target_di - my_di;
 200   set_displacement(offset);
 201 }
 202 
 203 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 204   print_shared(st, "JumpData", extra);
 205   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 206 }
 207 
 208 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 209   // Parameter profiling include the receiver
 210   int args_count = include_receiver ? 1 : 0;
 211   ResourceMark rm;
 212   ReferenceArgumentCount rac(signature);
 213   args_count += rac.count();
 214   args_count = MIN2(args_count, max);
 215   return args_count * per_arg_cell_count;
 216 }
 217 
 218 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 219   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 220   assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 221   const methodHandle m = stream->method();
 222   int bci = stream->bci();
 223   Bytecode_invoke inv(m, bci);
 224   int args_cell = 0;
 225   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 226     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 227   }
 228   int ret_cell = 0;
 229   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 230     ret_cell = ReturnTypeEntry::static_cell_count();
 231   }
 232   int header_cell = 0;
 233   if (args_cell + ret_cell > 0) {
 234     header_cell = header_cell_count();
 235   }
 236 
 237   return header_cell + args_cell + ret_cell;
 238 }
 239 
 240 class ArgumentOffsetComputer : public SignatureIterator {
 241 private:
 242   int _max;
 243   int _offset;
 244   GrowableArray<int> _offsets;
 245 
 246   friend class SignatureIterator;  // so do_parameters_on can call do_type
 247   void do_type(BasicType type) {
 248     if (is_reference_type(type) && _offsets.length() < _max) {
 249       _offsets.push(_offset);
 250     }
 251     _offset += parameter_type_word_count(type);
 252   }
 253 
 254  public:
 255   ArgumentOffsetComputer(Symbol* signature, int max)
 256     : SignatureIterator(signature),
 257       _max(max), _offset(0),
 258       _offsets(max) {
 259     do_parameters_on(this);  // non-virtual template execution
 260   }
 261 
 262   int off_at(int i) const { return _offsets.at(i); }
 263 };
 264 
 265 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 266   ResourceMark rm;
 267   int start = 0;
 268   // Parameter profiling include the receiver
 269   if (include_receiver && has_receiver) {
 270     set_stack_slot(0, 0);
 271     set_type(0, type_none());
 272     start += 1;
 273   }
 274   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 275   for (int i = start; i < _number_of_entries; i++) {
 276     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 277     set_type(i, type_none());
 278   }
 279 }
 280 
 281 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 282   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 283   Bytecode_invoke inv(stream->method(), stream->bci());
 284 
 285   if (has_arguments()) {
 286 #ifdef ASSERT
 287     ResourceMark rm;
 288     ReferenceArgumentCount rac(inv.signature());
 289     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 290     assert(count > 0, "room for args type but none found?");
 291     check_number_of_arguments(count);
 292 #endif
 293     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 294   }
 295 
 296   if (has_return()) {
 297     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 298     _ret.post_initialize();
 299   }
 300 }
 301 
 302 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 303   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 304   Bytecode_invoke inv(stream->method(), stream->bci());
 305 
 306   if (has_arguments()) {
 307 #ifdef ASSERT
 308     ResourceMark rm;
 309     ReferenceArgumentCount rac(inv.signature());
 310     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 311     assert(count > 0, "room for args type but none found?");
 312     check_number_of_arguments(count);
 313 #endif
 314     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 315   }
 316 
 317   if (has_return()) {
 318     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 319     _ret.post_initialize();
 320   }
 321 }
 322 
 323 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 324   for (int i = 0; i < _number_of_entries; i++) {
 325     intptr_t p = type(i);
 326     Klass* k = (Klass*)klass_part(p);
 327     if (k != nullptr) {
 328       if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 329         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 330       }
 331       if (always_clean || !k->is_loader_alive()) {
 332         set_type(i, with_status((Klass*)nullptr, p));
 333       }
 334     }
 335   }
 336 }
 337 
 338 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
 339   for (int i = 0; i < _number_of_entries; i++) {
 340     set_type(i, klass_part(type(i))); // reset tag; FIXME: properly handle tagged pointers
 341     Klass** k = (Klass**)type_adr(i);
 342     it->push(k);
 343 //    it->push_tagged(k);
 344   }
 345 }
 346 
 347 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 348   intptr_t p = type();
 349   Klass* k = (Klass*)klass_part(p);
 350   if (k != nullptr) {
 351     if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 352       return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 353     }
 354     if (always_clean || !k->is_loader_alive()) {
 355       set_type(with_status((Klass*)nullptr, p));
 356     }
 357   }
 358 }
 359 
 360 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
 361   Klass** k = (Klass**)type_adr(); // tagged
 362   set_type(klass_part(type())); // reset tag; FIXME: properly handle tagged pointers
 363   it->push(k);
 364 //  it->push_tagged(k);
 365 }
 366 
 367 bool TypeEntriesAtCall::return_profiling_enabled() {
 368   return MethodData::profile_return();
 369 }
 370 
 371 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 372   return MethodData::profile_arguments();
 373 }
 374 
 375 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 376   if (is_type_none(k)) {
 377     st->print("none");
 378   } else if (is_type_unknown(k)) {
 379     st->print("unknown");
 380   } else {
 381     valid_klass(k)->print_value_on(st);
 382   }
 383   if (was_null_seen(k)) {
 384     st->print(" (null seen)");
 385   }
 386 }
 387 
 388 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 389   for (int i = 0; i < _number_of_entries; i++) {
 390     _pd->tab(st);
 391     st->print("%d: stack(%u) ", i, stack_slot(i));
 392     print_klass(st, type(i));
 393     st->cr();
 394   }
 395 }
 396 
 397 void ReturnTypeEntry::print_data_on(outputStream* st) const {
 398   _pd->tab(st);
 399   print_klass(st, type());
 400   st->cr();
 401 }
 402 
 403 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 404   CounterData::print_data_on(st, extra);
 405   if (has_arguments()) {
 406     tab(st, true);
 407     st->print("argument types");
 408     _args.print_data_on(st);
 409   }
 410   if (has_return()) {
 411     tab(st, true);
 412     st->print("return type");
 413     _ret.print_data_on(st);
 414   }
 415 }
 416 
 417 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 418   VirtualCallData::print_data_on(st, extra);
 419   if (has_arguments()) {
 420     tab(st, true);
 421     st->print("argument types");
 422     _args.print_data_on(st);
 423   }
 424   if (has_return()) {
 425     tab(st, true);
 426     st->print("return type");
 427     _ret.print_data_on(st);
 428   }
 429 }
 430 
 431 // ==================================================================
 432 // ReceiverTypeData
 433 //
 434 // A ReceiverTypeData is used to access profiling information about a
 435 // dynamic type check.  It consists of a counter which counts the total times
 436 // that the check is reached, and a series of (Klass*, count) pairs
 437 // which are used to store a type profile for the receiver of the check.
 438 
 439 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 440     for (uint row = 0; row < row_limit(); row++) {
 441     Klass* p = receiver(row);
 442     if (p != nullptr) {
 443       if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
 444         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 445       }
 446       if (always_clean || !p->is_loader_alive()) {
 447         clear_row(row);
 448       }
 449     }
 450   }
 451 }
 452 
 453 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
 454   for (uint row = 0; row < row_limit(); row++) {
 455     Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
 456     it->push(recv);
 457   }
 458 }
 459 
 460 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 461   uint row;
 462   int entries = 0;
 463   for (row = 0; row < row_limit(); row++) {
 464     if (receiver(row) != nullptr)  entries++;
 465   }
 466   st->print_cr("count(%u) entries(%u)", count(), entries);
 467   int total = count();
 468   for (row = 0; row < row_limit(); row++) {
 469     if (receiver(row) != nullptr) {
 470       total += receiver_count(row);
 471     }
 472   }
 473   for (row = 0; row < row_limit(); row++) {
 474     if (receiver(row) != nullptr) {
 475       tab(st);
 476       receiver(row)->print_value_on(st);
 477       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 478     }
 479   }
 480 }
 481 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 482   print_shared(st, "ReceiverTypeData", extra);
 483   print_receiver_data_on(st);
 484 }
 485 
 486 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 487   print_shared(st, "VirtualCallData", extra);
 488   print_receiver_data_on(st);
 489 }
 490 
 491 // ==================================================================
 492 // RetData
 493 //
 494 // A RetData is used to access profiling information for a ret bytecode.
 495 // It is composed of a count of the number of times that the ret has
 496 // been executed, followed by a series of triples of the form
 497 // (bci, count, di) which count the number of times that some bci was the
 498 // target of the ret and cache a corresponding displacement.
 499 
 500 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 501   for (uint row = 0; row < row_limit(); row++) {
 502     set_bci_displacement(row, -1);
 503     set_bci(row, no_bci);
 504   }
 505   // release so other threads see a consistent state.  bci is used as
 506   // a valid flag for bci_displacement.
 507   OrderAccess::release();
 508 }
 509 
 510 // This routine needs to atomically update the RetData structure, so the
 511 // caller needs to hold the RetData_lock before it gets here.  Since taking
 512 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 513 // wrapper around a derived oop, taking the lock in _this_ method will
 514 // basically cause the 'this' pointer's _data field to contain junk after the
 515 // lock.  We require the caller to take the lock before making the ProfileData
 516 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 517 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 518   // First find the mdp which corresponds to the return bci.
 519   address mdp = h_mdo->bci_to_dp(return_bci);
 520 
 521   // Now check to see if any of the cache slots are open.
 522   for (uint row = 0; row < row_limit(); row++) {
 523     if (bci(row) == no_bci) {
 524       set_bci_displacement(row, checked_cast<int>(mdp - dp()));
 525       set_bci_count(row, DataLayout::counter_increment);
 526       // Barrier to ensure displacement is written before the bci; allows
 527       // the interpreter to read displacement without fear of race condition.
 528       release_set_bci(row, return_bci);
 529       break;
 530     }
 531   }
 532   return mdp;
 533 }
 534 
 535 void RetData::print_data_on(outputStream* st, const char* extra) const {
 536   print_shared(st, "RetData", extra);
 537   uint row;
 538   int entries = 0;
 539   for (row = 0; row < row_limit(); row++) {
 540     if (bci(row) != no_bci)  entries++;
 541   }
 542   st->print_cr("count(%u) entries(%u)", count(), entries);
 543   for (row = 0; row < row_limit(); row++) {
 544     if (bci(row) != no_bci) {
 545       tab(st);
 546       st->print_cr("bci(%d: count(%u) displacement(%d))",
 547                    bci(row), bci_count(row), bci_displacement(row));
 548     }
 549   }
 550 }
 551 
 552 // ==================================================================
 553 // BranchData
 554 //
 555 // A BranchData is used to access profiling data for a two-way branch.
 556 // It consists of taken and not_taken counts as well as a data displacement
 557 // for the taken case.
 558 
 559 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 560   assert(stream->bci() == bci(), "wrong pos");
 561   int target = stream->dest();
 562   int my_di = mdo->dp_to_di(dp());
 563   int target_di = mdo->bci_to_di(target);
 564   int offset = target_di - my_di;
 565   set_displacement(offset);
 566 }
 567 
 568 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 569   print_shared(st, "BranchData", extra);
 570   st->print_cr("taken(%u) displacement(%d)",
 571                taken(), displacement());
 572   tab(st);
 573   st->print_cr("not taken(%u)", not_taken());
 574 }
 575 
 576 // ==================================================================
 577 // MultiBranchData
 578 //
 579 // A MultiBranchData is used to access profiling information for
 580 // a multi-way branch (*switch bytecodes).  It consists of a series
 581 // of (count, displacement) pairs, which count the number of times each
 582 // case was taken and specify the data displacement for each branch target.
 583 
 584 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 585   int cell_count = 0;
 586   if (stream->code() == Bytecodes::_tableswitch) {
 587     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 588     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 589   } else {
 590     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 591     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 592   }
 593   return cell_count;
 594 }
 595 
 596 void MultiBranchData::post_initialize(BytecodeStream* stream,
 597                                       MethodData* mdo) {
 598   assert(stream->bci() == bci(), "wrong pos");
 599   int target;
 600   int my_di;
 601   int target_di;
 602   int offset;
 603   if (stream->code() == Bytecodes::_tableswitch) {
 604     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 605     int len = sw.length();
 606     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 607     for (int count = 0; count < len; count++) {
 608       target = sw.dest_offset_at(count) + bci();
 609       my_di = mdo->dp_to_di(dp());
 610       target_di = mdo->bci_to_di(target);
 611       offset = target_di - my_di;
 612       set_displacement_at(count, offset);
 613     }
 614     target = sw.default_offset() + bci();
 615     my_di = mdo->dp_to_di(dp());
 616     target_di = mdo->bci_to_di(target);
 617     offset = target_di - my_di;
 618     set_default_displacement(offset);
 619 
 620   } else {
 621     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 622     int npairs = sw.number_of_pairs();
 623     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 624     for (int count = 0; count < npairs; count++) {
 625       LookupswitchPair pair = sw.pair_at(count);
 626       target = pair.offset() + bci();
 627       my_di = mdo->dp_to_di(dp());
 628       target_di = mdo->bci_to_di(target);
 629       offset = target_di - my_di;
 630       set_displacement_at(count, offset);
 631     }
 632     target = sw.default_offset() + bci();
 633     my_di = mdo->dp_to_di(dp());
 634     target_di = mdo->bci_to_di(target);
 635     offset = target_di - my_di;
 636     set_default_displacement(offset);
 637   }
 638 }
 639 
 640 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 641   print_shared(st, "MultiBranchData", extra);
 642   st->print_cr("default_count(%u) displacement(%d)",
 643                default_count(), default_displacement());
 644   int cases = number_of_cases();
 645   for (int i = 0; i < cases; i++) {
 646     tab(st);
 647     st->print_cr("count(%u) displacement(%d)",
 648                  count_at(i), displacement_at(i));
 649   }
 650 }
 651 
 652 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 653   print_shared(st, "ArgInfoData", extra);
 654   int nargs = number_of_args();
 655   for (int i = 0; i < nargs; i++) {
 656     st->print("  0x%x", arg_modified(i));
 657   }
 658   st->cr();
 659 }
 660 
 661 int ParametersTypeData::compute_cell_count(Method* m) {
 662   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 663     return 0;
 664   }
 665   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 666   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 667   if (obj_args > 0) {
 668     return obj_args + 1; // 1 cell for array len
 669   }
 670   return 0;
 671 }
 672 
 673 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 674   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 675 }
 676 
 677 bool ParametersTypeData::profiling_enabled() {
 678   return MethodData::profile_parameters();
 679 }
 680 
 681 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 682   print_shared(st, "ParametersTypeData", extra);
 683   tab(st);
 684   _parameters.print_data_on(st);
 685   st->cr();
 686 }
 687 
 688 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
 689   Method** m = (Method**)intptr_at_adr(speculative_trap_method);
 690   it->push(m);
 691 }
 692 
 693 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 694   print_shared(st, "SpeculativeTrapData", extra);
 695   tab(st);
 696   method()->print_short_name(st);
 697   st->cr();
 698 }
 699 
 700 // ==================================================================
 701 // MethodData*
 702 //
 703 // A MethodData* holds information which has been collected about
 704 // a method.
 705 
 706 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 707   assert(!THREAD->owns_locks(), "Should not own any locks");
 708   int size = MethodData::compute_allocation_size_in_words(method);
 709 
 710   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 711     MethodData(method);
 712 }
 713 
 714 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 715   switch (code) {
 716   case Bytecodes::_checkcast:
 717   case Bytecodes::_instanceof:
 718   case Bytecodes::_aastore:
 719     if (TypeProfileCasts) {
 720       return ReceiverTypeData::static_cell_count();
 721     } else {
 722       return BitData::static_cell_count();
 723     }
 724   case Bytecodes::_invokespecial:
 725   case Bytecodes::_invokestatic:
 726     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 727       return variable_cell_count;
 728     } else {
 729       return CounterData::static_cell_count();
 730     }
 731   case Bytecodes::_goto:
 732   case Bytecodes::_goto_w:
 733   case Bytecodes::_jsr:
 734   case Bytecodes::_jsr_w:
 735     return JumpData::static_cell_count();
 736   case Bytecodes::_invokevirtual:
 737   case Bytecodes::_invokeinterface:
 738     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 739       return variable_cell_count;
 740     } else {
 741       return VirtualCallData::static_cell_count();
 742     }
 743   case Bytecodes::_invokedynamic:
 744     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 745       return variable_cell_count;
 746     } else {
 747       return CounterData::static_cell_count();
 748     }
 749   case Bytecodes::_ret:
 750     return RetData::static_cell_count();
 751   case Bytecodes::_ifeq:
 752   case Bytecodes::_ifne:
 753   case Bytecodes::_iflt:
 754   case Bytecodes::_ifge:
 755   case Bytecodes::_ifgt:
 756   case Bytecodes::_ifle:
 757   case Bytecodes::_if_icmpeq:
 758   case Bytecodes::_if_icmpne:
 759   case Bytecodes::_if_icmplt:
 760   case Bytecodes::_if_icmpge:
 761   case Bytecodes::_if_icmpgt:
 762   case Bytecodes::_if_icmple:
 763   case Bytecodes::_if_acmpeq:
 764   case Bytecodes::_if_acmpne:
 765   case Bytecodes::_ifnull:
 766   case Bytecodes::_ifnonnull:
 767     return BranchData::static_cell_count();
 768   case Bytecodes::_lookupswitch:
 769   case Bytecodes::_tableswitch:
 770     return variable_cell_count;
 771   default:
 772     return no_profile_data;
 773   }
 774 }
 775 
 776 // Compute the size of the profiling information corresponding to
 777 // the current bytecode.
 778 int MethodData::compute_data_size(BytecodeStream* stream) {
 779   int cell_count = bytecode_cell_count(stream->code());
 780   if (cell_count == no_profile_data) {
 781     return 0;
 782   }
 783   if (cell_count == variable_cell_count) {
 784     switch (stream->code()) {
 785     case Bytecodes::_lookupswitch:
 786     case Bytecodes::_tableswitch:
 787       cell_count = MultiBranchData::compute_cell_count(stream);
 788       break;
 789     case Bytecodes::_invokespecial:
 790     case Bytecodes::_invokestatic:
 791     case Bytecodes::_invokedynamic:
 792       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 793       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 794           profile_return_for_invoke(stream->method(), stream->bci())) {
 795         cell_count = CallTypeData::compute_cell_count(stream);
 796       } else {
 797         cell_count = CounterData::static_cell_count();
 798       }
 799       break;
 800     case Bytecodes::_invokevirtual:
 801     case Bytecodes::_invokeinterface: {
 802       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 803       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 804           profile_return_for_invoke(stream->method(), stream->bci())) {
 805         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 806       } else {
 807         cell_count = VirtualCallData::static_cell_count();
 808       }
 809       break;
 810     }
 811     default:
 812       fatal("unexpected bytecode for var length profile data");
 813     }
 814   }
 815   // Note:  cell_count might be zero, meaning that there is just
 816   //        a DataLayout header, with no extra cells.
 817   assert(cell_count >= 0, "sanity");
 818   return DataLayout::compute_size_in_bytes(cell_count);
 819 }
 820 
 821 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 822   // Bytecodes for which we may use speculation
 823   switch (code) {
 824   case Bytecodes::_checkcast:
 825   case Bytecodes::_instanceof:
 826   case Bytecodes::_aastore:
 827   case Bytecodes::_invokevirtual:
 828   case Bytecodes::_invokeinterface:
 829   case Bytecodes::_if_acmpeq:
 830   case Bytecodes::_if_acmpne:
 831   case Bytecodes::_ifnull:
 832   case Bytecodes::_ifnonnull:
 833   case Bytecodes::_invokestatic:
 834 #ifdef COMPILER2
 835     if (CompilerConfig::is_c2_enabled()) {
 836       return UseTypeSpeculation;
 837     }
 838 #endif
 839   default:
 840     return false;
 841   }
 842   return false;
 843 }
 844 
 845 #if INCLUDE_JVMCI
 846 
 847 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 848   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 849 }
 850 
 851 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 852   memcpy(data(), speculation, speculation_len);
 853 }
 854 
 855 // A heuristic check to detect nmethods that outlive a failed speculations list.
 856 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 857   jlong head = (jlong)(address) *failed_speculations_address;
 858   if ((head & 0x1) == 0x1) {
 859     stringStream st;
 860     if (nm != nullptr) {
 861       st.print("%d", nm->compile_id());
 862       Method* method = nm->method();
 863       st.print_raw("{");
 864       if (method != nullptr) {
 865         method->print_name(&st);
 866       } else {
 867         const char* jvmci_name = nm->jvmci_name();
 868         if (jvmci_name != nullptr) {
 869           st.print_raw(jvmci_name);
 870         }
 871       }
 872       st.print_raw("}");
 873     } else {
 874       st.print("<unknown>");
 875     }
 876     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 877   }
 878 }
 879 
 880 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 881   assert(failed_speculations_address != nullptr, "must be");
 882   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 883 
 884   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 885 
 886   FailedSpeculation** cursor = failed_speculations_address;
 887   FailedSpeculation* fs = nullptr;
 888   do {
 889     if (*cursor == nullptr) {
 890       if (fs == nullptr) {
 891         // lazily allocate FailedSpeculation
 892         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 893         if (fs == nullptr) {
 894           // no memory -> ignore failed speculation
 895           return false;
 896         }
 897         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 898       }
 899       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 900       if (old_fs == nullptr) {
 901         // Successfully appended fs to end of the list
 902         return true;
 903       }
 904     }
 905     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 906     // check if the current entry matches this thread's failed speculation
 907     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 908       if (fs != nullptr) {
 909         delete fs;
 910       }
 911       return false;
 912     }
 913     cursor = (*cursor)->next_adr();
 914   } while (true);
 915 }
 916 
 917 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 918   assert(failed_speculations_address != nullptr, "must be");
 919   FailedSpeculation* fs = *failed_speculations_address;
 920   while (fs != nullptr) {
 921     FailedSpeculation* next = fs->next();
 922     delete fs;
 923     fs = next;
 924   }
 925 
 926   // Write an unaligned value to failed_speculations_address to denote
 927   // that it is no longer a valid pointer. This is allows for the check
 928   // in add_failed_speculation against adding to a freed failed
 929   // speculations list.
 930   long* head = (long*) failed_speculations_address;
 931   (*head) = (*head) | 0x1;
 932 }
 933 #endif // INCLUDE_JVMCI
 934 
 935 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 936 #if INCLUDE_JVMCI
 937   if (ProfileTraps) {
 938     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 939     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 940 
 941     // Make sure we have a minimum number of extra data slots to
 942     // allocate SpeculativeTrapData entries. We would want to have one
 943     // entry per compilation that inlines this method and for which
 944     // some type speculation assumption fails. So the room we need for
 945     // the SpeculativeTrapData entries doesn't directly depend on the
 946     // size of the method. Because it's hard to estimate, we reserve
 947     // space for an arbitrary number of entries.
 948     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 949       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 950 
 951     return MAX2(extra_data_count, spec_data_count);
 952   } else {
 953     return 0;
 954   }
 955 #else // INCLUDE_JVMCI
 956   if (ProfileTraps) {
 957     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 958     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 959     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 960     int one_percent_of_data
 961       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 962     if (extra_data_count < one_percent_of_data)
 963       extra_data_count = one_percent_of_data;
 964     if (extra_data_count > empty_bc_count)
 965       extra_data_count = empty_bc_count;  // no need for more
 966 
 967     // Make sure we have a minimum number of extra data slots to
 968     // allocate SpeculativeTrapData entries. We would want to have one
 969     // entry per compilation that inlines this method and for which
 970     // some type speculation assumption fails. So the room we need for
 971     // the SpeculativeTrapData entries doesn't directly depend on the
 972     // size of the method. Because it's hard to estimate, we reserve
 973     // space for an arbitrary number of entries.
 974     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 975       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 976 
 977     return MAX2(extra_data_count, spec_data_count);
 978   } else {
 979     return 0;
 980   }
 981 #endif // INCLUDE_JVMCI
 982 }
 983 
 984 // Compute the size of the MethodData* necessary to store
 985 // profiling information about a given method.  Size is in bytes.
 986 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 987   int data_size = 0;
 988   BytecodeStream stream(method);
 989   Bytecodes::Code c;
 990   int empty_bc_count = 0;  // number of bytecodes lacking data
 991   bool needs_speculative_traps = false;
 992   while ((c = stream.next()) >= 0) {
 993     int size_in_bytes = compute_data_size(&stream);
 994     data_size += size_in_bytes;
 995     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 996     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 997   }
 998   int object_size = in_bytes(data_offset()) + data_size;
 999 
1000   // Add some extra DataLayout cells (at least one) to track stray traps.
1001   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1002   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
1003 
1004   // Add a cell to record information about modified arguments.
1005   int arg_size = method->size_of_parameters();
1006   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1007 
1008   // Reserve room for an area of the MDO dedicated to profiling of
1009   // parameters
1010   int args_cell = ParametersTypeData::compute_cell_count(method());
1011   if (args_cell > 0) {
1012     object_size += DataLayout::compute_size_in_bytes(args_cell);
1013   }
1014 
1015   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1016     int num_exception_handlers = method()->exception_table_length();
1017     object_size += num_exception_handlers * single_exception_handler_data_size();
1018   }
1019 
1020   return object_size;
1021 }
1022 
1023 // Compute the size of the MethodData* necessary to store
1024 // profiling information about a given method.  Size is in words
1025 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1026   int byte_size = compute_allocation_size_in_bytes(method);
1027   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1028   return align_metadata_size(word_size);
1029 }
1030 
1031 // Initialize an individual data segment.  Returns the size of
1032 // the segment in bytes.
1033 int MethodData::initialize_data(BytecodeStream* stream,
1034                                        int data_index) {
1035   int cell_count = -1;
1036   u1 tag = DataLayout::no_tag;
1037   DataLayout* data_layout = data_layout_at(data_index);
1038   Bytecodes::Code c = stream->code();
1039   switch (c) {
1040   case Bytecodes::_checkcast:
1041   case Bytecodes::_instanceof:
1042   case Bytecodes::_aastore:
1043     if (TypeProfileCasts) {
1044       cell_count = ReceiverTypeData::static_cell_count();
1045       tag = DataLayout::receiver_type_data_tag;
1046     } else {
1047       cell_count = BitData::static_cell_count();
1048       tag = DataLayout::bit_data_tag;
1049     }
1050     break;
1051   case Bytecodes::_invokespecial:
1052   case Bytecodes::_invokestatic: {
1053     int counter_data_cell_count = CounterData::static_cell_count();
1054     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1055         profile_return_for_invoke(stream->method(), stream->bci())) {
1056       cell_count = CallTypeData::compute_cell_count(stream);
1057     } else {
1058       cell_count = counter_data_cell_count;
1059     }
1060     if (cell_count > counter_data_cell_count) {
1061       tag = DataLayout::call_type_data_tag;
1062     } else {
1063       tag = DataLayout::counter_data_tag;
1064     }
1065     break;
1066   }
1067   case Bytecodes::_goto:
1068   case Bytecodes::_goto_w:
1069   case Bytecodes::_jsr:
1070   case Bytecodes::_jsr_w:
1071     cell_count = JumpData::static_cell_count();
1072     tag = DataLayout::jump_data_tag;
1073     break;
1074   case Bytecodes::_invokevirtual:
1075   case Bytecodes::_invokeinterface: {
1076     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1077     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1078         profile_return_for_invoke(stream->method(), stream->bci())) {
1079       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1080     } else {
1081       cell_count = virtual_call_data_cell_count;
1082     }
1083     if (cell_count > virtual_call_data_cell_count) {
1084       tag = DataLayout::virtual_call_type_data_tag;
1085     } else {
1086       tag = DataLayout::virtual_call_data_tag;
1087     }
1088     break;
1089   }
1090   case Bytecodes::_invokedynamic: {
1091     // %%% should make a type profile for any invokedynamic that takes a ref argument
1092     int counter_data_cell_count = CounterData::static_cell_count();
1093     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1094         profile_return_for_invoke(stream->method(), stream->bci())) {
1095       cell_count = CallTypeData::compute_cell_count(stream);
1096     } else {
1097       cell_count = counter_data_cell_count;
1098     }
1099     if (cell_count > counter_data_cell_count) {
1100       tag = DataLayout::call_type_data_tag;
1101     } else {
1102       tag = DataLayout::counter_data_tag;
1103     }
1104     break;
1105   }
1106   case Bytecodes::_ret:
1107     cell_count = RetData::static_cell_count();
1108     tag = DataLayout::ret_data_tag;
1109     break;
1110   case Bytecodes::_ifeq:
1111   case Bytecodes::_ifne:
1112   case Bytecodes::_iflt:
1113   case Bytecodes::_ifge:
1114   case Bytecodes::_ifgt:
1115   case Bytecodes::_ifle:
1116   case Bytecodes::_if_icmpeq:
1117   case Bytecodes::_if_icmpne:
1118   case Bytecodes::_if_icmplt:
1119   case Bytecodes::_if_icmpge:
1120   case Bytecodes::_if_icmpgt:
1121   case Bytecodes::_if_icmple:
1122   case Bytecodes::_if_acmpeq:
1123   case Bytecodes::_if_acmpne:
1124   case Bytecodes::_ifnull:
1125   case Bytecodes::_ifnonnull:
1126     cell_count = BranchData::static_cell_count();
1127     tag = DataLayout::branch_data_tag;
1128     break;
1129   case Bytecodes::_lookupswitch:
1130   case Bytecodes::_tableswitch:
1131     cell_count = MultiBranchData::compute_cell_count(stream);
1132     tag = DataLayout::multi_branch_data_tag;
1133     break;
1134   default:
1135     break;
1136   }
1137   assert(tag == DataLayout::multi_branch_data_tag ||
1138          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1139           (tag == DataLayout::call_type_data_tag ||
1140            tag == DataLayout::counter_data_tag ||
1141            tag == DataLayout::virtual_call_type_data_tag ||
1142            tag == DataLayout::virtual_call_data_tag)) ||
1143          cell_count == bytecode_cell_count(c), "cell counts must agree");
1144   if (cell_count >= 0) {
1145     assert(tag != DataLayout::no_tag, "bad tag");
1146     assert(bytecode_has_profile(c), "agree w/ BHP");
1147     data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1148     return DataLayout::compute_size_in_bytes(cell_count);
1149   } else {
1150     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1151     return 0;
1152   }
1153 }
1154 
1155 // Get the data at an arbitrary (sort of) data index.
1156 ProfileData* MethodData::data_at(int data_index) const {
1157   if (out_of_bounds(data_index)) {
1158     return nullptr;
1159   }
1160   DataLayout* data_layout = data_layout_at(data_index);
1161   return data_layout->data_in();
1162 }
1163 
1164 int DataLayout::cell_count() {
1165   switch (tag()) {
1166   case DataLayout::no_tag:
1167   default:
1168     ShouldNotReachHere();
1169     return 0;
1170   case DataLayout::bit_data_tag:
1171     return BitData::static_cell_count();
1172   case DataLayout::counter_data_tag:
1173     return CounterData::static_cell_count();
1174   case DataLayout::jump_data_tag:
1175     return JumpData::static_cell_count();
1176   case DataLayout::receiver_type_data_tag:
1177     return ReceiverTypeData::static_cell_count();
1178   case DataLayout::virtual_call_data_tag:
1179     return VirtualCallData::static_cell_count();
1180   case DataLayout::ret_data_tag:
1181     return RetData::static_cell_count();
1182   case DataLayout::branch_data_tag:
1183     return BranchData::static_cell_count();
1184   case DataLayout::multi_branch_data_tag:
1185     return ((new MultiBranchData(this))->cell_count());
1186   case DataLayout::arg_info_data_tag:
1187     return ((new ArgInfoData(this))->cell_count());
1188   case DataLayout::call_type_data_tag:
1189     return ((new CallTypeData(this))->cell_count());
1190   case DataLayout::virtual_call_type_data_tag:
1191     return ((new VirtualCallTypeData(this))->cell_count());
1192   case DataLayout::parameters_type_data_tag:
1193     return ((new ParametersTypeData(this))->cell_count());
1194   case DataLayout::speculative_trap_data_tag:
1195     return SpeculativeTrapData::static_cell_count();
1196   }
1197 }
1198 ProfileData* DataLayout::data_in() {
1199   switch (tag()) {
1200   case DataLayout::no_tag:
1201   default:
1202     ShouldNotReachHere();
1203     return nullptr;
1204   case DataLayout::bit_data_tag:
1205     return new BitData(this);
1206   case DataLayout::counter_data_tag:
1207     return new CounterData(this);
1208   case DataLayout::jump_data_tag:
1209     return new JumpData(this);
1210   case DataLayout::receiver_type_data_tag:
1211     return new ReceiverTypeData(this);
1212   case DataLayout::virtual_call_data_tag:
1213     return new VirtualCallData(this);
1214   case DataLayout::ret_data_tag:
1215     return new RetData(this);
1216   case DataLayout::branch_data_tag:
1217     return new BranchData(this);
1218   case DataLayout::multi_branch_data_tag:
1219     return new MultiBranchData(this);
1220   case DataLayout::arg_info_data_tag:
1221     return new ArgInfoData(this);
1222   case DataLayout::call_type_data_tag:
1223     return new CallTypeData(this);
1224   case DataLayout::virtual_call_type_data_tag:
1225     return new VirtualCallTypeData(this);
1226   case DataLayout::parameters_type_data_tag:
1227     return new ParametersTypeData(this);
1228   case DataLayout::speculative_trap_data_tag:
1229     return new SpeculativeTrapData(this);
1230   }
1231 }
1232 
1233 // Iteration over data.
1234 ProfileData* MethodData::next_data(ProfileData* current) const {
1235   int current_index = dp_to_di(current->dp());
1236   int next_index = current_index + current->size_in_bytes();
1237   ProfileData* next = data_at(next_index);
1238   return next;
1239 }
1240 
1241 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1242   int current_index = dp_to_di((address)current);
1243   int next_index = current_index + current->size_in_bytes();
1244   if (out_of_bounds(next_index)) {
1245     return nullptr;
1246   }
1247   DataLayout* next = data_layout_at(next_index);
1248   return next;
1249 }
1250 
1251 // Give each of the data entries a chance to perform specific
1252 // data initialization.
1253 void MethodData::post_initialize(BytecodeStream* stream) {
1254   ResourceMark rm;
1255   ProfileData* data;
1256   for (data = first_data(); is_valid(data); data = next_data(data)) {
1257     stream->set_start(data->bci());
1258     stream->next();
1259     data->post_initialize(stream, this);
1260   }
1261   if (_parameters_type_data_di != no_parameters) {
1262     parameters_type_data()->post_initialize(nullptr, this);
1263   }
1264 }
1265 
1266 // Initialize the MethodData* corresponding to a given method.
1267 MethodData::MethodData(const methodHandle& method)
1268   : _method(method()),
1269     // Holds Compile_lock
1270     _compiler_counters(),
1271     _parameters_type_data_di(parameters_uninitialized) {
1272     _extra_data_lock = nullptr;
1273     initialize();
1274 }
1275 
1276 MethodData::MethodData() {
1277   assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1278 }
1279 
1280 // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
1281 // not being accessed while the contents are being rewritten.
1282 class VM_ReinitializeMDO: public VM_Operation {
1283  private:
1284   MethodData* _mdo;
1285  public:
1286   VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1287   VMOp_Type type() const                         { return VMOp_ReinitializeMDO; }
1288   void doit() {
1289     // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1290     // over a safepoint.  This means that we don't actually need to acquire the lock.
1291     _mdo->initialize();
1292   }
1293   bool allow_nested_vm_operations() const        { return true; }
1294 };
1295 
1296 void MethodData::reinitialize() {
1297   VM_ReinitializeMDO op(this);
1298   VMThread::execute(&op);
1299 }
1300 
1301 
1302 void MethodData::initialize() {
1303   Thread* thread = Thread::current();
1304   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1305   ResourceMark rm(thread);
1306 
1307   init();
1308   set_creation_mileage(mileage_of(method()));
1309 
1310   // Go through the bytecodes and allocate and initialize the
1311   // corresponding data cells.
1312   int data_size = 0;
1313   int empty_bc_count = 0;  // number of bytecodes lacking data
1314   _data[0] = 0;  // apparently not set below.
1315   BytecodeStream stream(methodHandle(thread, method()));
1316   Bytecodes::Code c;
1317   bool needs_speculative_traps = false;
1318   while ((c = stream.next()) >= 0) {
1319     int size_in_bytes = initialize_data(&stream, data_size);
1320     data_size += size_in_bytes;
1321     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1322     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1323   }
1324   _data_size = data_size;
1325   int object_size = in_bytes(data_offset()) + data_size;
1326 
1327   // Add some extra DataLayout cells (at least one) to track stray traps.
1328   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1329   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1330 
1331   // Let's zero the space for the extra data
1332   if (extra_size > 0) {
1333     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1334   }
1335 
1336   // Add a cell to record information about modified arguments.
1337   // Set up _args_modified array after traps cells so that
1338   // the code for traps cells works.
1339   DataLayout *dp = data_layout_at(data_size + extra_size);
1340 
1341   int arg_size = method()->size_of_parameters();
1342   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1343 
1344   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1345   object_size += extra_size + arg_data_size;
1346 
1347   int parms_cell = ParametersTypeData::compute_cell_count(method());
1348   // If we are profiling parameters, we reserved an area near the end
1349   // of the MDO after the slots for bytecodes (because there's no bci
1350   // for method entry so they don't fit with the framework for the
1351   // profiling of bytecodes). We store the offset within the MDO of
1352   // this area (or -1 if no parameter is profiled)
1353   int parm_data_size = 0;
1354   if (parms_cell > 0) {
1355     parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1356     object_size += parm_data_size;
1357     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1358     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1359     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1360   } else {
1361     _parameters_type_data_di = no_parameters;
1362   }
1363 
1364   _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1365   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1366     int num_exception_handlers = method()->exception_table_length();
1367     object_size += num_exception_handlers * single_exception_handler_data_size();
1368     ExceptionTableElement* exception_handlers = method()->exception_table_start();
1369     for (int i = 0; i < num_exception_handlers; i++) {
1370       DataLayout *dp = exception_handler_data_at(i);
1371       dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1372     }
1373   }
1374 
1375   // Set an initial hint. Don't use set_hint_di() because
1376   // first_di() may be out of bounds if data_size is 0.
1377   // In that situation, _hint_di is never used, but at
1378   // least well-defined.
1379   _hint_di = first_di();
1380 
1381   post_initialize(&stream);
1382 
1383   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1384   set_size(object_size);
1385 }
1386 
1387 void MethodData::init() {
1388   _compiler_counters = CompilerCounters(); // reset compiler counters
1389   _invocation_counter.init();
1390   _backedge_counter.init();
1391   _invocation_counter_start = 0;
1392   _backedge_counter_start = 0;
1393 
1394   // Set per-method invoke- and backedge mask.
1395   double scale = 1.0;
1396   methodHandle mh(Thread::current(), _method);
1397   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1398   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1399   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1400 
1401   _tenure_traps = 0;
1402   _num_loops = 0;
1403   _num_blocks = 0;
1404   _would_profile = unknown;
1405 
1406 #if INCLUDE_JVMCI
1407   _jvmci_ir_size = 0;
1408   _failed_speculations = nullptr;
1409 #endif
1410 
1411   // Initialize escape flags.
1412   clear_escape_info();
1413 }
1414 
1415 // Get a measure of how much mileage the method has on it.
1416 int MethodData::mileage_of(Method* method) {
1417   return MAX2(method->invocation_count(), method->backedge_count());
1418 }
1419 
1420 bool MethodData::is_mature() const {
1421   return CompilationPolicy::is_mature((MethodData*)this);
1422 }
1423 
1424 // Translate a bci to its corresponding data index (di).
1425 address MethodData::bci_to_dp(int bci) {
1426   ResourceMark rm;
1427   DataLayout* data = data_layout_before(bci);
1428   DataLayout* prev = nullptr;
1429   for ( ; is_valid(data); data = next_data_layout(data)) {
1430     if (data->bci() >= bci) {
1431       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1432       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1433       return (address)data;
1434     }
1435     prev = data;
1436   }
1437   return (address)limit_data_position();
1438 }
1439 
1440 // Translate a bci to its corresponding data, or null.
1441 ProfileData* MethodData::bci_to_data(int bci) {
1442   check_extra_data_locked();
1443 
1444   DataLayout* data = data_layout_before(bci);
1445   for ( ; is_valid(data); data = next_data_layout(data)) {
1446     if (data->bci() == bci) {
1447       set_hint_di(dp_to_di((address)data));
1448       return data->data_in();
1449     } else if (data->bci() > bci) {
1450       break;
1451     }
1452   }
1453   return bci_to_extra_data(bci, nullptr, false);
1454 }
1455 
1456 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1457   assert(ProfileExceptionHandlers, "not profiling");
1458   for (int i = 0; i < num_exception_handler_data(); i++) {
1459     DataLayout* exception_handler_data = exception_handler_data_at(i);
1460     if (exception_handler_data->bci() == bci) {
1461       return exception_handler_data;
1462     }
1463   }
1464   return nullptr;
1465 }
1466 
1467 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1468   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1469   return data != nullptr ? new BitData(data) : nullptr;
1470 }
1471 
1472 BitData MethodData::exception_handler_bci_to_data(int bci) {
1473   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1474   assert(data != nullptr, "invalid bci");
1475   return BitData(data);
1476 }
1477 
1478 DataLayout* MethodData::next_extra(DataLayout* dp) {
1479   int nb_cells = 0;
1480   switch(dp->tag()) {
1481   case DataLayout::bit_data_tag:
1482   case DataLayout::no_tag:
1483     nb_cells = BitData::static_cell_count();
1484     break;
1485   case DataLayout::speculative_trap_data_tag:
1486     nb_cells = SpeculativeTrapData::static_cell_count();
1487     break;
1488   default:
1489     fatal("unexpected tag %d", dp->tag());
1490   }
1491   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1492 }
1493 
1494 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
1495   check_extra_data_locked();
1496 
1497   DataLayout* end = args_data_limit();
1498 
1499   for (;; dp = next_extra(dp)) {
1500     assert(dp < end, "moved past end of extra data");
1501     // No need for "Atomic::load_acquire" ops,
1502     // since the data structure is monotonic.
1503     switch(dp->tag()) {
1504     case DataLayout::no_tag:
1505       return nullptr;
1506     case DataLayout::arg_info_data_tag:
1507       dp = end;
1508       return nullptr; // ArgInfoData is at the end of extra data section.
1509     case DataLayout::bit_data_tag:
1510       if (m == nullptr && dp->bci() == bci) {
1511         return new BitData(dp);
1512       }
1513       break;
1514     case DataLayout::speculative_trap_data_tag:
1515       if (m != nullptr) {
1516         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1517         if (dp->bci() == bci) {
1518           assert(data->method() != nullptr, "method must be set");
1519           if (data->method() == m) {
1520             return data;
1521           }
1522         }
1523       }
1524       break;
1525     default:
1526       fatal("unexpected tag %d", dp->tag());
1527     }
1528   }
1529   return nullptr;
1530 }
1531 
1532 
1533 // Translate a bci to its corresponding extra data, or null.
1534 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1535   check_extra_data_locked();
1536 
1537   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1538   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1539          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1540          "code needs to be adjusted");
1541 
1542   // Do not create one of these if method has been redefined.
1543   if (m != nullptr && m->is_old()) {
1544     return nullptr;
1545   }
1546 
1547   DataLayout* dp  = extra_data_base();
1548   DataLayout* end = args_data_limit();
1549 
1550   // Find if already exists
1551   ProfileData* result = bci_to_extra_data_find(bci, m, dp);
1552   if (result != nullptr || dp >= end) {
1553     return result;
1554   }
1555 
1556   if (create_if_missing) {
1557     // Not found -> Allocate
1558     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1559     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1560     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1561     // SpeculativeTrapData is 2 slots. Make sure we have room.
1562     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1563       return nullptr;
1564     }
1565     DataLayout temp;
1566     temp.initialize(tag, checked_cast<u2>(bci), 0);
1567 
1568     dp->set_header(temp.header());
1569     assert(dp->tag() == tag, "sane");
1570     assert(dp->bci() == bci, "no concurrent allocation");
1571     if (tag == DataLayout::bit_data_tag) {
1572       return new BitData(dp);
1573     } else {
1574       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1575       data->set_method(m);
1576       return data;
1577     }
1578   }
1579   return nullptr;
1580 }
1581 
1582 ArgInfoData *MethodData::arg_info() {
1583   DataLayout* dp    = extra_data_base();
1584   DataLayout* end   = args_data_limit();
1585   for (; dp < end; dp = next_extra(dp)) {
1586     if (dp->tag() == DataLayout::arg_info_data_tag)
1587       return new ArgInfoData(dp);
1588   }
1589   return nullptr;
1590 }
1591 
1592 // Printing
1593 
1594 void MethodData::print_on(outputStream* st) const {
1595   assert(is_methodData(), "should be method data");
1596   st->print("method data for ");
1597   method()->print_value_on(st);
1598   st->cr();
1599   print_data_on(st);
1600 }
1601 
1602 void MethodData::print_value_on(outputStream* st) const {
1603   assert(is_methodData(), "should be method data");
1604   st->print("method data for ");
1605   method()->print_value_on(st);
1606 }
1607 
1608 void MethodData::print_data_on(outputStream* st) const {
1609   Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
1610   ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
1611                             Mutex::_no_safepoint_check_flag);
1612   ResourceMark rm;
1613   ProfileData* data = first_data();
1614   if (_parameters_type_data_di != no_parameters) {
1615     parameters_type_data()->print_data_on(st);
1616   }
1617   for ( ; is_valid(data); data = next_data(data)) {
1618     st->print("%d", dp_to_di(data->dp()));
1619     st->fill_to(6);
1620     data->print_data_on(st, this);
1621   }
1622 
1623   st->print_cr("--- Extra data:");
1624   DataLayout* dp    = extra_data_base();
1625   DataLayout* end   = args_data_limit();
1626   for (;; dp = next_extra(dp)) {
1627     assert(dp < end, "moved past end of extra data");
1628     // No need for "Atomic::load_acquire" ops,
1629     // since the data structure is monotonic.
1630     switch(dp->tag()) {
1631     case DataLayout::no_tag:
1632       continue;
1633     case DataLayout::bit_data_tag:
1634       data = new BitData(dp);
1635       break;
1636     case DataLayout::speculative_trap_data_tag:
1637       data = new SpeculativeTrapData(dp);
1638       break;
1639     case DataLayout::arg_info_data_tag:
1640       data = new ArgInfoData(dp);
1641       dp = end; // ArgInfoData is at the end of extra data section.
1642       break;
1643     default:
1644       fatal("unexpected tag %d", dp->tag());
1645     }
1646     st->print("%d", dp_to_di(data->dp()));
1647     st->fill_to(6);
1648     data->print_data_on(st);
1649     if (dp >= end) return;
1650   }
1651 }
1652 
1653 // Verification
1654 
1655 void MethodData::verify_on(outputStream* st) {
1656   guarantee(is_methodData(), "object must be method data");
1657   // guarantee(m->is_perm(), "should be in permspace");
1658   this->verify_data_on(st);
1659 }
1660 
1661 void MethodData::verify_data_on(outputStream* st) {
1662   NEEDS_CLEANUP;
1663   // not yet implemented.
1664 }
1665 
1666 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1667   if (m->is_compiled_lambda_form()) {
1668     return true;
1669   }
1670 
1671   Bytecode_invoke inv(m , bci);
1672   return inv.is_invokedynamic() || inv.is_invokehandle();
1673 }
1674 
1675 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1676   Bytecode_invoke inv(m , bci);
1677   if (inv.is_invokevirtual()) {
1678     Symbol* klass = inv.klass();
1679     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1680         klass == vmSymbols::sun_misc_Unsafe() ||
1681         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1682       Symbol* name = inv.name();
1683       if (name->starts_with("get") || name->starts_with("put")) {
1684         return true;
1685       }
1686     }
1687   }
1688   return false;
1689 }
1690 
1691 int MethodData::profile_arguments_flag() {
1692   return TypeProfileLevel % 10;
1693 }
1694 
1695 bool MethodData::profile_arguments() {
1696   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1697 }
1698 
1699 bool MethodData::profile_arguments_jsr292_only() {
1700   return profile_arguments_flag() == type_profile_jsr292;
1701 }
1702 
1703 bool MethodData::profile_all_arguments() {
1704   return profile_arguments_flag() == type_profile_all;
1705 }
1706 
1707 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1708   if (!profile_arguments()) {
1709     return false;
1710   }
1711 
1712   if (profile_all_arguments()) {
1713     return true;
1714   }
1715 
1716   if (profile_unsafe(m, bci)) {
1717     return true;
1718   }
1719 
1720   assert(profile_arguments_jsr292_only(), "inconsistent");
1721   return profile_jsr292(m, bci);
1722 }
1723 
1724 int MethodData::profile_return_flag() {
1725   return (TypeProfileLevel % 100) / 10;
1726 }
1727 
1728 bool MethodData::profile_return() {
1729   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1730 }
1731 
1732 bool MethodData::profile_return_jsr292_only() {
1733   return profile_return_flag() == type_profile_jsr292;
1734 }
1735 
1736 bool MethodData::profile_all_return() {
1737   return profile_return_flag() == type_profile_all;
1738 }
1739 
1740 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1741   if (!profile_return()) {
1742     return false;
1743   }
1744 
1745   if (profile_all_return()) {
1746     return true;
1747   }
1748 
1749   assert(profile_return_jsr292_only(), "inconsistent");
1750   return profile_jsr292(m, bci);
1751 }
1752 
1753 int MethodData::profile_parameters_flag() {
1754   return TypeProfileLevel / 100;
1755 }
1756 
1757 bool MethodData::profile_parameters() {
1758   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1759 }
1760 
1761 bool MethodData::profile_parameters_jsr292_only() {
1762   return profile_parameters_flag() == type_profile_jsr292;
1763 }
1764 
1765 bool MethodData::profile_all_parameters() {
1766   return profile_parameters_flag() == type_profile_all;
1767 }
1768 
1769 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1770   if (!profile_parameters()) {
1771     return false;
1772   }
1773 
1774   if (profile_all_parameters()) {
1775     return true;
1776   }
1777 
1778   assert(profile_parameters_jsr292_only(), "inconsistent");
1779   return m->is_compiled_lambda_form();
1780 }
1781 
1782 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1783   log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1784   it->push(&_method);
1785   if (_parameters_type_data_di != no_parameters) {
1786     parameters_type_data()->metaspace_pointers_do(it);
1787   }
1788   for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1789     data->metaspace_pointers_do(it);
1790   }
1791   for (DataLayout* dp = extra_data_base();
1792                    dp < extra_data_limit();
1793                    dp = MethodData::next_extra(dp)) {
1794     if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1795       ResourceMark rm;
1796       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1797       data->metaspace_pointers_do(it);
1798     } else if (dp->tag() == DataLayout::no_tag ||
1799                dp->tag() == DataLayout::arg_info_data_tag) {
1800       break;
1801     }
1802   }
1803 }
1804 
1805 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1806   check_extra_data_locked();
1807 
1808   if (shift == 0) {
1809     return;
1810   }
1811   if (!reset) {
1812     // Move all cells of trap entry at dp left by "shift" cells
1813     intptr_t* start = (intptr_t*)dp;
1814     intptr_t* end = (intptr_t*)next_extra(dp);
1815     for (intptr_t* ptr = start; ptr < end; ptr++) {
1816       *(ptr-shift) = *ptr;
1817     }
1818   } else {
1819     // Reset "shift" cells stopping at dp
1820     intptr_t* start = ((intptr_t*)dp) - shift;
1821     intptr_t* end = (intptr_t*)dp;
1822     for (intptr_t* ptr = start; ptr < end; ptr++) {
1823       *ptr = 0;
1824     }
1825   }
1826 }
1827 
1828 // Check for entries that reference an unloaded method
1829 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1830   bool _always_clean;
1831 public:
1832   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1833   bool is_live(Method* m) {
1834     if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1835       return true; // TODO: treat as unloaded instead?
1836     }
1837     return !(_always_clean) && m->method_holder()->is_loader_alive();
1838   }
1839 };
1840 
1841 // Check for entries that reference a redefined method
1842 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1843 public:
1844   CleanExtraDataMethodClosure() {}
1845   bool is_live(Method* m) { return !m->is_old(); }
1846 };
1847 
1848 Mutex* MethodData::extra_data_lock() {
1849   Mutex* lock = Atomic::load(&_extra_data_lock);
1850   if (lock == nullptr) {
1851     lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
1852     Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1853     if (old != nullptr) {
1854       // Another thread created the lock before us. Use that lock instead.
1855       delete lock;
1856       return old;
1857     }
1858   }
1859   return lock;
1860 }
1861 
1862 // Remove SpeculativeTrapData entries that reference an unloaded or
1863 // redefined method
1864 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1865   check_extra_data_locked();
1866 
1867   DataLayout* dp  = extra_data_base();
1868   DataLayout* end = args_data_limit();
1869 
1870   int shift = 0;
1871   for (; dp < end; dp = next_extra(dp)) {
1872     switch(dp->tag()) {
1873     case DataLayout::speculative_trap_data_tag: {
1874       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1875       Method* m = data->method();
1876       assert(m != nullptr, "should have a method");
1877       if (!cl->is_live(m)) {
1878         // "shift" accumulates the number of cells for dead
1879         // SpeculativeTrapData entries that have been seen so
1880         // far. Following entries must be shifted left by that many
1881         // cells to remove the dead SpeculativeTrapData entries.
1882         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1883       } else {
1884         // Shift this entry left if it follows dead
1885         // SpeculativeTrapData entries
1886         clean_extra_data_helper(dp, shift);
1887       }
1888       break;
1889     }
1890     case DataLayout::bit_data_tag:
1891       // Shift this entry left if it follows dead SpeculativeTrapData
1892       // entries
1893       clean_extra_data_helper(dp, shift);
1894       continue;
1895     case DataLayout::no_tag:
1896     case DataLayout::arg_info_data_tag:
1897       // We are at end of the live trap entries. The previous "shift"
1898       // cells contain entries that are either dead or were shifted
1899       // left. They need to be reset to no_tag
1900       clean_extra_data_helper(dp, shift, true);
1901       return;
1902     default:
1903       fatal("unexpected tag %d", dp->tag());
1904     }
1905   }
1906 }
1907 
1908 // Verify there's no unloaded or redefined method referenced by a
1909 // SpeculativeTrapData entry
1910 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1911   check_extra_data_locked();
1912 
1913 #ifdef ASSERT
1914   DataLayout* dp  = extra_data_base();
1915   DataLayout* end = args_data_limit();
1916 
1917   for (; dp < end; dp = next_extra(dp)) {
1918     switch(dp->tag()) {
1919     case DataLayout::speculative_trap_data_tag: {
1920       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1921       Method* m = data->method();
1922       assert(m != nullptr && cl->is_live(m), "Method should exist");
1923       break;
1924     }
1925     case DataLayout::bit_data_tag:
1926       continue;
1927     case DataLayout::no_tag:
1928     case DataLayout::arg_info_data_tag:
1929       return;
1930     default:
1931       fatal("unexpected tag %d", dp->tag());
1932     }
1933   }
1934 #endif
1935 }
1936 
1937 void MethodData::clean_method_data(bool always_clean) {
1938   ResourceMark rm;
1939   for (ProfileData* data = first_data();
1940        is_valid(data);
1941        data = next_data(data)) {
1942     data->clean_weak_klass_links(always_clean);
1943   }
1944   ParametersTypeData* parameters = parameters_type_data();
1945   if (parameters != nullptr) {
1946     parameters->clean_weak_klass_links(always_clean);
1947   }
1948 
1949   CleanExtraDataKlassClosure cl(always_clean);
1950 
1951   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1952   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1953 
1954   clean_extra_data(&cl);
1955   verify_extra_data_clean(&cl);
1956 }
1957 
1958 // This is called during redefinition to clean all "old" redefined
1959 // methods out of MethodData for all methods.
1960 void MethodData::clean_weak_method_links() {
1961   ResourceMark rm;
1962   CleanExtraDataMethodClosure cl;
1963 
1964   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1965   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1966 
1967   clean_extra_data(&cl);
1968   verify_extra_data_clean(&cl);
1969 }
1970 
1971 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1972   release_C_heap_structures();
1973 }
1974 
1975 void MethodData::release_C_heap_structures() {
1976 #if INCLUDE_JVMCI
1977   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1978 #endif
1979 }
1980 
1981 #if INCLUDE_CDS
1982 void MethodData::remove_unshareable_info() {
1983   _extra_data_lock = nullptr;
1984 }
1985 
1986 void MethodData::restore_unshareable_info(TRAPS) {
1987   //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
1988 }
1989 #endif // INCLUDE_CDS
1990        
1991 #ifdef ASSERT
1992 void MethodData::check_extra_data_locked() const {
1993     // Cast const away, just to be able to verify the lock
1994     // Usually we only want non-const accesses on the lock,
1995     // so this here is an exception.
1996     MethodData* self = (MethodData*)this;
1997     assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
1998     assert(!Thread::current()->is_Java_thread() ||
1999            JavaThread::current()->is_in_no_safepoint_scope(),
2000            "JavaThread must have NoSafepointVerifier inside lock scope");
2001 }
2002 #endif