1 /*
   2  * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/cdsConfig.hpp"
  27 #include "ci/ciMethodData.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "interpreter/bytecode.hpp"
  33 #include "interpreter/bytecodeStream.hpp"
  34 #include "interpreter/linkResolver.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/methodData.inline.hpp"
  39 #include "prims/jvmtiRedefineClasses.hpp"
  40 #include "runtime/atomic.hpp"
  41 #include "runtime/deoptimization.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/orderAccess.hpp"
  44 #include "runtime/safepointVerifiers.hpp"
  45 #include "runtime/signature.hpp"
  46 #include "utilities/align.hpp"
  47 #include "utilities/checkedCast.hpp"
  48 #include "utilities/copy.hpp"
  49 
  50 // ==================================================================
  51 // DataLayout
  52 //
  53 // Overlay for generic profiling data.
  54 
  55 // Some types of data layouts need a length field.
  56 bool DataLayout::needs_array_len(u1 tag) {
  57   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  58 }
  59 
  60 // Perform generic initialization of the data.  More specific
  61 // initialization occurs in overrides of ProfileData::post_initialize.
  62 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  63   _header._bits = (intptr_t)0;
  64   _header._struct._tag = tag;
  65   _header._struct._bci = bci;
  66   for (int i = 0; i < cell_count; i++) {
  67     set_cell_at(i, (intptr_t)0);
  68   }
  69   if (needs_array_len(tag)) {
  70     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  71   }
  72   if (tag == call_type_data_tag) {
  73     CallTypeData::initialize(this, cell_count);
  74   } else if (tag == virtual_call_type_data_tag) {
  75     VirtualCallTypeData::initialize(this, cell_count);
  76   }
  77 }
  78 
  79 void DataLayout::clean_weak_klass_links(bool always_clean) {
  80   ResourceMark m;
  81   data_in()->clean_weak_klass_links(always_clean);
  82 }
  83 
  84 
  85 // ==================================================================
  86 // ProfileData
  87 //
  88 // A ProfileData object is created to refer to a section of profiling
  89 // data in a structured way.
  90 
  91 // Constructor for invalid ProfileData.
  92 ProfileData::ProfileData() {
  93   _data = nullptr;
  94 }
  95 
  96 char* ProfileData::print_data_on_helper(const MethodData* md) const {
  97   DataLayout* dp  = md->extra_data_base();
  98   DataLayout* end = md->args_data_limit();
  99   stringStream ss;
 100   for (;; dp = MethodData::next_extra(dp)) {
 101     assert(dp < end, "moved past end of extra data");
 102     switch(dp->tag()) {
 103     case DataLayout::speculative_trap_data_tag:
 104       if (dp->bci() == bci()) {
 105         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 106         int trap = data->trap_state();
 107         char buf[100];
 108         ss.print("trap/");
 109         data->method()->print_short_name(&ss);
 110         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 111       }
 112       break;
 113     case DataLayout::bit_data_tag:
 114       break;
 115     case DataLayout::no_tag:
 116     case DataLayout::arg_info_data_tag:
 117       return ss.as_string();
 118       break;
 119     default:
 120       fatal("unexpected tag %d", dp->tag());
 121     }
 122   }
 123   return nullptr;
 124 }
 125 
 126 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 127   print_data_on(st, print_data_on_helper(md));
 128 }
 129 
 130 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 131   st->print("bci: %d ", bci());
 132   st->fill_to(tab_width_one + 1);
 133   st->print("%s", name);
 134   tab(st);
 135   int trap = trap_state();
 136   if (trap != 0) {
 137     char buf[100];
 138     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 139   }
 140   if (extra != nullptr) {
 141     st->print("%s", extra);
 142   }
 143   int flags = data()->flags();
 144   if (flags != 0) {
 145     st->print("flags(%d) ", flags);
 146   }
 147 }
 148 
 149 void ProfileData::tab(outputStream* st, bool first) const {
 150   st->fill_to(first ? tab_width_one : tab_width_two);
 151 }
 152 
 153 // ==================================================================
 154 // BitData
 155 //
 156 // A BitData corresponds to a one-bit flag.  This is used to indicate
 157 // whether a checkcast bytecode has seen a null value.
 158 
 159 
 160 void BitData::print_data_on(outputStream* st, const char* extra) const {
 161   print_shared(st, "BitData", extra);
 162   st->cr();
 163 }
 164 
 165 // ==================================================================
 166 // CounterData
 167 //
 168 // A CounterData corresponds to a simple counter.
 169 
 170 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 171   print_shared(st, "CounterData", extra);
 172   st->print_cr("count(%u)", count());
 173 }
 174 
 175 // ==================================================================
 176 // JumpData
 177 //
 178 // A JumpData is used to access profiling information for a direct
 179 // branch.  It is a counter, used for counting the number of branches,
 180 // plus a data displacement, used for realigning the data pointer to
 181 // the corresponding target bci.
 182 
 183 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 184   assert(stream->bci() == bci(), "wrong pos");
 185   int target;
 186   Bytecodes::Code c = stream->code();
 187   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 188     target = stream->dest_w();
 189   } else {
 190     target = stream->dest();
 191   }
 192   int my_di = mdo->dp_to_di(dp());
 193   int target_di = mdo->bci_to_di(target);
 194   int offset = target_di - my_di;
 195   set_displacement(offset);
 196 }
 197 
 198 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 199   print_shared(st, "JumpData", extra);
 200   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 201 }
 202 
 203 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 204   // Parameter profiling include the receiver
 205   int args_count = include_receiver ? 1 : 0;
 206   ResourceMark rm;
 207   ReferenceArgumentCount rac(signature);
 208   args_count += rac.count();
 209   args_count = MIN2(args_count, max);
 210   return args_count * per_arg_cell_count;
 211 }
 212 
 213 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 214   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 215   assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 216   const methodHandle m = stream->method();
 217   int bci = stream->bci();
 218   Bytecode_invoke inv(m, bci);
 219   int args_cell = 0;
 220   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 221     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 222   }
 223   int ret_cell = 0;
 224   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 225     ret_cell = ReturnTypeEntry::static_cell_count();
 226   }
 227   int header_cell = 0;
 228   if (args_cell + ret_cell > 0) {
 229     header_cell = header_cell_count();
 230   }
 231 
 232   return header_cell + args_cell + ret_cell;
 233 }
 234 
 235 class ArgumentOffsetComputer : public SignatureIterator {
 236 private:
 237   int _max;
 238   int _offset;
 239   GrowableArray<int> _offsets;
 240 
 241   friend class SignatureIterator;  // so do_parameters_on can call do_type
 242   void do_type(BasicType type) {
 243     if (is_reference_type(type) && _offsets.length() < _max) {
 244       _offsets.push(_offset);
 245     }
 246     _offset += parameter_type_word_count(type);
 247   }
 248 
 249  public:
 250   ArgumentOffsetComputer(Symbol* signature, int max)
 251     : SignatureIterator(signature),
 252       _max(max), _offset(0),
 253       _offsets(max) {
 254     do_parameters_on(this);  // non-virtual template execution
 255   }
 256 
 257   int off_at(int i) const { return _offsets.at(i); }
 258 };
 259 
 260 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 261   ResourceMark rm;
 262   int start = 0;
 263   // Parameter profiling include the receiver
 264   if (include_receiver && has_receiver) {
 265     set_stack_slot(0, 0);
 266     set_type(0, type_none());
 267     start += 1;
 268   }
 269   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 270   for (int i = start; i < _number_of_entries; i++) {
 271     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 272     set_type(i, type_none());
 273   }
 274 }
 275 
 276 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 277   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 278   Bytecode_invoke inv(stream->method(), stream->bci());
 279 
 280   if (has_arguments()) {
 281 #ifdef ASSERT
 282     ResourceMark rm;
 283     ReferenceArgumentCount rac(inv.signature());
 284     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 285     assert(count > 0, "room for args type but none found?");
 286     check_number_of_arguments(count);
 287 #endif
 288     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 289   }
 290 
 291   if (has_return()) {
 292     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 293     _ret.post_initialize();
 294   }
 295 }
 296 
 297 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 298   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 299   Bytecode_invoke inv(stream->method(), stream->bci());
 300 
 301   if (has_arguments()) {
 302 #ifdef ASSERT
 303     ResourceMark rm;
 304     ReferenceArgumentCount rac(inv.signature());
 305     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 306     assert(count > 0, "room for args type but none found?");
 307     check_number_of_arguments(count);
 308 #endif
 309     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 310   }
 311 
 312   if (has_return()) {
 313     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 314     _ret.post_initialize();
 315   }
 316 }
 317 
 318 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 319   for (int i = 0; i < _number_of_entries; i++) {
 320     intptr_t p = type(i);
 321     Klass* k = (Klass*)klass_part(p);
 322     if (k != nullptr) {
 323       if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 324         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 325       }
 326       if (always_clean || !k->is_loader_alive()) {
 327         set_type(i, with_status((Klass*)nullptr, p));
 328       }
 329     }
 330   }
 331 }
 332 
 333 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
 334   for (int i = 0; i < _number_of_entries; i++) {
 335     set_type(i, klass_part(type(i))); // reset tag; FIXME: properly handle tagged pointers
 336     Klass** k = (Klass**)type_adr(i);
 337     it->push(k);
 338 //    it->push_tagged(k);
 339   }
 340 }
 341 
 342 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 343   intptr_t p = type();
 344   Klass* k = (Klass*)klass_part(p);
 345   if (k != nullptr) {
 346     if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 347       return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 348     }
 349     if (always_clean || !k->is_loader_alive()) {
 350       set_type(with_status((Klass*)nullptr, p));
 351     }
 352   }
 353 }
 354 
 355 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
 356   Klass** k = (Klass**)type_adr(); // tagged
 357   set_type(klass_part(type())); // reset tag; FIXME: properly handle tagged pointers
 358   it->push(k);
 359 //  it->push_tagged(k);
 360 }
 361 
 362 bool TypeEntriesAtCall::return_profiling_enabled() {
 363   return MethodData::profile_return();
 364 }
 365 
 366 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 367   return MethodData::profile_arguments();
 368 }
 369 
 370 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 371   if (is_type_none(k)) {
 372     st->print("none");
 373   } else if (is_type_unknown(k)) {
 374     st->print("unknown");
 375   } else {
 376     valid_klass(k)->print_value_on(st);
 377   }
 378   if (was_null_seen(k)) {
 379     st->print(" (null seen)");
 380   }
 381 }
 382 
 383 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 384   for (int i = 0; i < _number_of_entries; i++) {
 385     _pd->tab(st);
 386     st->print("%d: stack(%u) ", i, stack_slot(i));
 387     print_klass(st, type(i));
 388     st->cr();
 389   }
 390 }
 391 
 392 void ReturnTypeEntry::print_data_on(outputStream* st) const {
 393   _pd->tab(st);
 394   print_klass(st, type());
 395   st->cr();
 396 }
 397 
 398 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 399   CounterData::print_data_on(st, extra);
 400   if (has_arguments()) {
 401     tab(st, true);
 402     st->print("argument types");
 403     _args.print_data_on(st);
 404   }
 405   if (has_return()) {
 406     tab(st, true);
 407     st->print("return type");
 408     _ret.print_data_on(st);
 409   }
 410 }
 411 
 412 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 413   VirtualCallData::print_data_on(st, extra);
 414   if (has_arguments()) {
 415     tab(st, true);
 416     st->print("argument types");
 417     _args.print_data_on(st);
 418   }
 419   if (has_return()) {
 420     tab(st, true);
 421     st->print("return type");
 422     _ret.print_data_on(st);
 423   }
 424 }
 425 
 426 // ==================================================================
 427 // ReceiverTypeData
 428 //
 429 // A ReceiverTypeData is used to access profiling information about a
 430 // dynamic type check.  It consists of a counter which counts the total times
 431 // that the check is reached, and a series of (Klass*, count) pairs
 432 // which are used to store a type profile for the receiver of the check.
 433 
 434 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 435     for (uint row = 0; row < row_limit(); row++) {
 436     Klass* p = receiver(row);
 437     if (p != nullptr) {
 438       if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
 439         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 440       }
 441       if (always_clean || !p->is_loader_alive()) {
 442         clear_row(row);
 443       }
 444     }
 445   }
 446 }
 447 
 448 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
 449   for (uint row = 0; row < row_limit(); row++) {
 450     Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
 451     it->push(recv);
 452   }
 453 }
 454 
 455 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 456   uint row;
 457   int entries = 0;
 458   for (row = 0; row < row_limit(); row++) {
 459     if (receiver(row) != nullptr)  entries++;
 460   }
 461   st->print_cr("count(%u) entries(%u)", count(), entries);
 462   int total = count();
 463   for (row = 0; row < row_limit(); row++) {
 464     if (receiver(row) != nullptr) {
 465       total += receiver_count(row);
 466     }
 467   }
 468   for (row = 0; row < row_limit(); row++) {
 469     if (receiver(row) != nullptr) {
 470       tab(st);
 471       receiver(row)->print_value_on(st);
 472       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 473     }
 474   }
 475 }
 476 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 477   print_shared(st, "ReceiverTypeData", extra);
 478   print_receiver_data_on(st);
 479 }
 480 
 481 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 482   print_shared(st, "VirtualCallData", extra);
 483   print_receiver_data_on(st);
 484 }
 485 
 486 // ==================================================================
 487 // RetData
 488 //
 489 // A RetData is used to access profiling information for a ret bytecode.
 490 // It is composed of a count of the number of times that the ret has
 491 // been executed, followed by a series of triples of the form
 492 // (bci, count, di) which count the number of times that some bci was the
 493 // target of the ret and cache a corresponding displacement.
 494 
 495 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 496   for (uint row = 0; row < row_limit(); row++) {
 497     set_bci_displacement(row, -1);
 498     set_bci(row, no_bci);
 499   }
 500   // release so other threads see a consistent state.  bci is used as
 501   // a valid flag for bci_displacement.
 502   OrderAccess::release();
 503 }
 504 
 505 // This routine needs to atomically update the RetData structure, so the
 506 // caller needs to hold the RetData_lock before it gets here.  Since taking
 507 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 508 // wrapper around a derived oop, taking the lock in _this_ method will
 509 // basically cause the 'this' pointer's _data field to contain junk after the
 510 // lock.  We require the caller to take the lock before making the ProfileData
 511 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 512 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 513   // First find the mdp which corresponds to the return bci.
 514   address mdp = h_mdo->bci_to_dp(return_bci);
 515 
 516   // Now check to see if any of the cache slots are open.
 517   for (uint row = 0; row < row_limit(); row++) {
 518     if (bci(row) == no_bci) {
 519       set_bci_displacement(row, checked_cast<int>(mdp - dp()));
 520       set_bci_count(row, DataLayout::counter_increment);
 521       // Barrier to ensure displacement is written before the bci; allows
 522       // the interpreter to read displacement without fear of race condition.
 523       release_set_bci(row, return_bci);
 524       break;
 525     }
 526   }
 527   return mdp;
 528 }
 529 
 530 void RetData::print_data_on(outputStream* st, const char* extra) const {
 531   print_shared(st, "RetData", extra);
 532   uint row;
 533   int entries = 0;
 534   for (row = 0; row < row_limit(); row++) {
 535     if (bci(row) != no_bci)  entries++;
 536   }
 537   st->print_cr("count(%u) entries(%u)", count(), entries);
 538   for (row = 0; row < row_limit(); row++) {
 539     if (bci(row) != no_bci) {
 540       tab(st);
 541       st->print_cr("bci(%d: count(%u) displacement(%d))",
 542                    bci(row), bci_count(row), bci_displacement(row));
 543     }
 544   }
 545 }
 546 
 547 // ==================================================================
 548 // BranchData
 549 //
 550 // A BranchData is used to access profiling data for a two-way branch.
 551 // It consists of taken and not_taken counts as well as a data displacement
 552 // for the taken case.
 553 
 554 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 555   assert(stream->bci() == bci(), "wrong pos");
 556   int target = stream->dest();
 557   int my_di = mdo->dp_to_di(dp());
 558   int target_di = mdo->bci_to_di(target);
 559   int offset = target_di - my_di;
 560   set_displacement(offset);
 561 }
 562 
 563 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 564   print_shared(st, "BranchData", extra);
 565   st->print_cr("taken(%u) displacement(%d)",
 566                taken(), displacement());
 567   tab(st);
 568   st->print_cr("not taken(%u)", not_taken());
 569 }
 570 
 571 // ==================================================================
 572 // MultiBranchData
 573 //
 574 // A MultiBranchData is used to access profiling information for
 575 // a multi-way branch (*switch bytecodes).  It consists of a series
 576 // of (count, displacement) pairs, which count the number of times each
 577 // case was taken and specify the data displacement for each branch target.
 578 
 579 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 580   int cell_count = 0;
 581   if (stream->code() == Bytecodes::_tableswitch) {
 582     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 583     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 584   } else {
 585     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 586     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 587   }
 588   return cell_count;
 589 }
 590 
 591 void MultiBranchData::post_initialize(BytecodeStream* stream,
 592                                       MethodData* mdo) {
 593   assert(stream->bci() == bci(), "wrong pos");
 594   int target;
 595   int my_di;
 596   int target_di;
 597   int offset;
 598   if (stream->code() == Bytecodes::_tableswitch) {
 599     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 600     int len = sw.length();
 601     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 602     for (int count = 0; count < len; count++) {
 603       target = sw.dest_offset_at(count) + bci();
 604       my_di = mdo->dp_to_di(dp());
 605       target_di = mdo->bci_to_di(target);
 606       offset = target_di - my_di;
 607       set_displacement_at(count, offset);
 608     }
 609     target = sw.default_offset() + bci();
 610     my_di = mdo->dp_to_di(dp());
 611     target_di = mdo->bci_to_di(target);
 612     offset = target_di - my_di;
 613     set_default_displacement(offset);
 614 
 615   } else {
 616     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 617     int npairs = sw.number_of_pairs();
 618     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 619     for (int count = 0; count < npairs; count++) {
 620       LookupswitchPair pair = sw.pair_at(count);
 621       target = pair.offset() + bci();
 622       my_di = mdo->dp_to_di(dp());
 623       target_di = mdo->bci_to_di(target);
 624       offset = target_di - my_di;
 625       set_displacement_at(count, offset);
 626     }
 627     target = sw.default_offset() + bci();
 628     my_di = mdo->dp_to_di(dp());
 629     target_di = mdo->bci_to_di(target);
 630     offset = target_di - my_di;
 631     set_default_displacement(offset);
 632   }
 633 }
 634 
 635 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 636   print_shared(st, "MultiBranchData", extra);
 637   st->print_cr("default_count(%u) displacement(%d)",
 638                default_count(), default_displacement());
 639   int cases = number_of_cases();
 640   for (int i = 0; i < cases; i++) {
 641     tab(st);
 642     st->print_cr("count(%u) displacement(%d)",
 643                  count_at(i), displacement_at(i));
 644   }
 645 }
 646 
 647 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 648   print_shared(st, "ArgInfoData", extra);
 649   int nargs = number_of_args();
 650   for (int i = 0; i < nargs; i++) {
 651     st->print("  0x%x", arg_modified(i));
 652   }
 653   st->cr();
 654 }
 655 
 656 int ParametersTypeData::compute_cell_count(Method* m) {
 657   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 658     return 0;
 659   }
 660   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 661   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 662   if (obj_args > 0) {
 663     return obj_args + 1; // 1 cell for array len
 664   }
 665   return 0;
 666 }
 667 
 668 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 669   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 670 }
 671 
 672 bool ParametersTypeData::profiling_enabled() {
 673   return MethodData::profile_parameters();
 674 }
 675 
 676 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 677   print_shared(st, "ParametersTypeData", extra);
 678   tab(st);
 679   _parameters.print_data_on(st);
 680   st->cr();
 681 }
 682 
 683 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
 684   Method** m = (Method**)intptr_at_adr(speculative_trap_method);
 685   it->push(m);
 686 }
 687 
 688 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 689   print_shared(st, "SpeculativeTrapData", extra);
 690   tab(st);
 691   method()->print_short_name(st);
 692   st->cr();
 693 }
 694 
 695 // ==================================================================
 696 // MethodData*
 697 //
 698 // A MethodData* holds information which has been collected about
 699 // a method.
 700 
 701 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 702   assert(!THREAD->owns_locks(), "Should not own any locks");
 703   int size = MethodData::compute_allocation_size_in_words(method);
 704 
 705   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 706     MethodData(method);
 707 }
 708 
 709 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 710   switch (code) {
 711   case Bytecodes::_checkcast:
 712   case Bytecodes::_instanceof:
 713   case Bytecodes::_aastore:
 714     if (TypeProfileCasts) {
 715       return ReceiverTypeData::static_cell_count();
 716     } else {
 717       return BitData::static_cell_count();
 718     }
 719   case Bytecodes::_invokespecial:
 720   case Bytecodes::_invokestatic:
 721     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 722       return variable_cell_count;
 723     } else {
 724       return CounterData::static_cell_count();
 725     }
 726   case Bytecodes::_goto:
 727   case Bytecodes::_goto_w:
 728   case Bytecodes::_jsr:
 729   case Bytecodes::_jsr_w:
 730     return JumpData::static_cell_count();
 731   case Bytecodes::_invokevirtual:
 732   case Bytecodes::_invokeinterface:
 733     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 734       return variable_cell_count;
 735     } else {
 736       return VirtualCallData::static_cell_count();
 737     }
 738   case Bytecodes::_invokedynamic:
 739     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 740       return variable_cell_count;
 741     } else {
 742       return CounterData::static_cell_count();
 743     }
 744   case Bytecodes::_ret:
 745     return RetData::static_cell_count();
 746   case Bytecodes::_ifeq:
 747   case Bytecodes::_ifne:
 748   case Bytecodes::_iflt:
 749   case Bytecodes::_ifge:
 750   case Bytecodes::_ifgt:
 751   case Bytecodes::_ifle:
 752   case Bytecodes::_if_icmpeq:
 753   case Bytecodes::_if_icmpne:
 754   case Bytecodes::_if_icmplt:
 755   case Bytecodes::_if_icmpge:
 756   case Bytecodes::_if_icmpgt:
 757   case Bytecodes::_if_icmple:
 758   case Bytecodes::_if_acmpeq:
 759   case Bytecodes::_if_acmpne:
 760   case Bytecodes::_ifnull:
 761   case Bytecodes::_ifnonnull:
 762     return BranchData::static_cell_count();
 763   case Bytecodes::_lookupswitch:
 764   case Bytecodes::_tableswitch:
 765     return variable_cell_count;
 766   default:
 767     return no_profile_data;
 768   }
 769 }
 770 
 771 // Compute the size of the profiling information corresponding to
 772 // the current bytecode.
 773 int MethodData::compute_data_size(BytecodeStream* stream) {
 774   int cell_count = bytecode_cell_count(stream->code());
 775   if (cell_count == no_profile_data) {
 776     return 0;
 777   }
 778   if (cell_count == variable_cell_count) {
 779     switch (stream->code()) {
 780     case Bytecodes::_lookupswitch:
 781     case Bytecodes::_tableswitch:
 782       cell_count = MultiBranchData::compute_cell_count(stream);
 783       break;
 784     case Bytecodes::_invokespecial:
 785     case Bytecodes::_invokestatic:
 786     case Bytecodes::_invokedynamic:
 787       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 788       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 789           profile_return_for_invoke(stream->method(), stream->bci())) {
 790         cell_count = CallTypeData::compute_cell_count(stream);
 791       } else {
 792         cell_count = CounterData::static_cell_count();
 793       }
 794       break;
 795     case Bytecodes::_invokevirtual:
 796     case Bytecodes::_invokeinterface: {
 797       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 798       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 799           profile_return_for_invoke(stream->method(), stream->bci())) {
 800         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 801       } else {
 802         cell_count = VirtualCallData::static_cell_count();
 803       }
 804       break;
 805     }
 806     default:
 807       fatal("unexpected bytecode for var length profile data");
 808     }
 809   }
 810   // Note:  cell_count might be zero, meaning that there is just
 811   //        a DataLayout header, with no extra cells.
 812   assert(cell_count >= 0, "sanity");
 813   return DataLayout::compute_size_in_bytes(cell_count);
 814 }
 815 
 816 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 817   // Bytecodes for which we may use speculation
 818   switch (code) {
 819   case Bytecodes::_checkcast:
 820   case Bytecodes::_instanceof:
 821   case Bytecodes::_aastore:
 822   case Bytecodes::_invokevirtual:
 823   case Bytecodes::_invokeinterface:
 824   case Bytecodes::_if_acmpeq:
 825   case Bytecodes::_if_acmpne:
 826   case Bytecodes::_ifnull:
 827   case Bytecodes::_ifnonnull:
 828   case Bytecodes::_invokestatic:
 829 #ifdef COMPILER2
 830     if (CompilerConfig::is_c2_enabled()) {
 831       return UseTypeSpeculation;
 832     }
 833 #endif
 834   default:
 835     return false;
 836   }
 837   return false;
 838 }
 839 
 840 #if INCLUDE_JVMCI
 841 
 842 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 843   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 844 }
 845 
 846 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 847   memcpy(data(), speculation, speculation_len);
 848 }
 849 
 850 // A heuristic check to detect nmethods that outlive a failed speculations list.
 851 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 852   jlong head = (jlong)(address) *failed_speculations_address;
 853   if ((head & 0x1) == 0x1) {
 854     stringStream st;
 855     if (nm != nullptr) {
 856       st.print("%d", nm->compile_id());
 857       Method* method = nm->method();
 858       st.print_raw("{");
 859       if (method != nullptr) {
 860         method->print_name(&st);
 861       } else {
 862         const char* jvmci_name = nm->jvmci_name();
 863         if (jvmci_name != nullptr) {
 864           st.print_raw(jvmci_name);
 865         }
 866       }
 867       st.print_raw("}");
 868     } else {
 869       st.print("<unknown>");
 870     }
 871     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 872   }
 873 }
 874 
 875 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 876   assert(failed_speculations_address != nullptr, "must be");
 877   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 878 
 879   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 880 
 881   FailedSpeculation** cursor = failed_speculations_address;
 882   FailedSpeculation* fs = nullptr;
 883   do {
 884     if (*cursor == nullptr) {
 885       if (fs == nullptr) {
 886         // lazily allocate FailedSpeculation
 887         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 888         if (fs == nullptr) {
 889           // no memory -> ignore failed speculation
 890           return false;
 891         }
 892         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 893       }
 894       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 895       if (old_fs == nullptr) {
 896         // Successfully appended fs to end of the list
 897         return true;
 898       }
 899     }
 900     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 901     // check if the current entry matches this thread's failed speculation
 902     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 903       if (fs != nullptr) {
 904         delete fs;
 905       }
 906       return false;
 907     }
 908     cursor = (*cursor)->next_adr();
 909   } while (true);
 910 }
 911 
 912 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 913   assert(failed_speculations_address != nullptr, "must be");
 914   FailedSpeculation* fs = *failed_speculations_address;
 915   while (fs != nullptr) {
 916     FailedSpeculation* next = fs->next();
 917     delete fs;
 918     fs = next;
 919   }
 920 
 921   // Write an unaligned value to failed_speculations_address to denote
 922   // that it is no longer a valid pointer. This is allows for the check
 923   // in add_failed_speculation against adding to a freed failed
 924   // speculations list.
 925   long* head = (long*) failed_speculations_address;
 926   (*head) = (*head) | 0x1;
 927 }
 928 #endif // INCLUDE_JVMCI
 929 
 930 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 931 #if INCLUDE_JVMCI
 932   if (ProfileTraps) {
 933     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 934     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 935 
 936     // Make sure we have a minimum number of extra data slots to
 937     // allocate SpeculativeTrapData entries. We would want to have one
 938     // entry per compilation that inlines this method and for which
 939     // some type speculation assumption fails. So the room we need for
 940     // the SpeculativeTrapData entries doesn't directly depend on the
 941     // size of the method. Because it's hard to estimate, we reserve
 942     // space for an arbitrary number of entries.
 943     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 944       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 945 
 946     return MAX2(extra_data_count, spec_data_count);
 947   } else {
 948     return 0;
 949   }
 950 #else // INCLUDE_JVMCI
 951   if (ProfileTraps) {
 952     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 953     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 954     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 955     int one_percent_of_data
 956       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 957     if (extra_data_count < one_percent_of_data)
 958       extra_data_count = one_percent_of_data;
 959     if (extra_data_count > empty_bc_count)
 960       extra_data_count = empty_bc_count;  // no need for more
 961 
 962     // Make sure we have a minimum number of extra data slots to
 963     // allocate SpeculativeTrapData entries. We would want to have one
 964     // entry per compilation that inlines this method and for which
 965     // some type speculation assumption fails. So the room we need for
 966     // the SpeculativeTrapData entries doesn't directly depend on the
 967     // size of the method. Because it's hard to estimate, we reserve
 968     // space for an arbitrary number of entries.
 969     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 970       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 971 
 972     return MAX2(extra_data_count, spec_data_count);
 973   } else {
 974     return 0;
 975   }
 976 #endif // INCLUDE_JVMCI
 977 }
 978 
 979 // Compute the size of the MethodData* necessary to store
 980 // profiling information about a given method.  Size is in bytes.
 981 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 982   int data_size = 0;
 983   BytecodeStream stream(method);
 984   Bytecodes::Code c;
 985   int empty_bc_count = 0;  // number of bytecodes lacking data
 986   bool needs_speculative_traps = false;
 987   while ((c = stream.next()) >= 0) {
 988     int size_in_bytes = compute_data_size(&stream);
 989     data_size += size_in_bytes;
 990     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 991     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 992   }
 993   int object_size = in_bytes(data_offset()) + data_size;
 994 
 995   // Add some extra DataLayout cells (at least one) to track stray traps.
 996   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
 997   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 998 
 999   // Add a cell to record information about modified arguments.
1000   int arg_size = method->size_of_parameters();
1001   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1002 
1003   // Reserve room for an area of the MDO dedicated to profiling of
1004   // parameters
1005   int args_cell = ParametersTypeData::compute_cell_count(method());
1006   if (args_cell > 0) {
1007     object_size += DataLayout::compute_size_in_bytes(args_cell);
1008   }
1009 
1010   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1011     int num_exception_handlers = method()->exception_table_length();
1012     object_size += num_exception_handlers * single_exception_handler_data_size();
1013   }
1014 
1015   return object_size;
1016 }
1017 
1018 // Compute the size of the MethodData* necessary to store
1019 // profiling information about a given method.  Size is in words
1020 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1021   int byte_size = compute_allocation_size_in_bytes(method);
1022   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1023   return align_metadata_size(word_size);
1024 }
1025 
1026 // Initialize an individual data segment.  Returns the size of
1027 // the segment in bytes.
1028 int MethodData::initialize_data(BytecodeStream* stream,
1029                                        int data_index) {
1030   int cell_count = -1;
1031   u1 tag = DataLayout::no_tag;
1032   DataLayout* data_layout = data_layout_at(data_index);
1033   Bytecodes::Code c = stream->code();
1034   switch (c) {
1035   case Bytecodes::_checkcast:
1036   case Bytecodes::_instanceof:
1037   case Bytecodes::_aastore:
1038     if (TypeProfileCasts) {
1039       cell_count = ReceiverTypeData::static_cell_count();
1040       tag = DataLayout::receiver_type_data_tag;
1041     } else {
1042       cell_count = BitData::static_cell_count();
1043       tag = DataLayout::bit_data_tag;
1044     }
1045     break;
1046   case Bytecodes::_invokespecial:
1047   case Bytecodes::_invokestatic: {
1048     int counter_data_cell_count = CounterData::static_cell_count();
1049     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1050         profile_return_for_invoke(stream->method(), stream->bci())) {
1051       cell_count = CallTypeData::compute_cell_count(stream);
1052     } else {
1053       cell_count = counter_data_cell_count;
1054     }
1055     if (cell_count > counter_data_cell_count) {
1056       tag = DataLayout::call_type_data_tag;
1057     } else {
1058       tag = DataLayout::counter_data_tag;
1059     }
1060     break;
1061   }
1062   case Bytecodes::_goto:
1063   case Bytecodes::_goto_w:
1064   case Bytecodes::_jsr:
1065   case Bytecodes::_jsr_w:
1066     cell_count = JumpData::static_cell_count();
1067     tag = DataLayout::jump_data_tag;
1068     break;
1069   case Bytecodes::_invokevirtual:
1070   case Bytecodes::_invokeinterface: {
1071     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1072     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1073         profile_return_for_invoke(stream->method(), stream->bci())) {
1074       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1075     } else {
1076       cell_count = virtual_call_data_cell_count;
1077     }
1078     if (cell_count > virtual_call_data_cell_count) {
1079       tag = DataLayout::virtual_call_type_data_tag;
1080     } else {
1081       tag = DataLayout::virtual_call_data_tag;
1082     }
1083     break;
1084   }
1085   case Bytecodes::_invokedynamic: {
1086     // %%% should make a type profile for any invokedynamic that takes a ref argument
1087     int counter_data_cell_count = CounterData::static_cell_count();
1088     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1089         profile_return_for_invoke(stream->method(), stream->bci())) {
1090       cell_count = CallTypeData::compute_cell_count(stream);
1091     } else {
1092       cell_count = counter_data_cell_count;
1093     }
1094     if (cell_count > counter_data_cell_count) {
1095       tag = DataLayout::call_type_data_tag;
1096     } else {
1097       tag = DataLayout::counter_data_tag;
1098     }
1099     break;
1100   }
1101   case Bytecodes::_ret:
1102     cell_count = RetData::static_cell_count();
1103     tag = DataLayout::ret_data_tag;
1104     break;
1105   case Bytecodes::_ifeq:
1106   case Bytecodes::_ifne:
1107   case Bytecodes::_iflt:
1108   case Bytecodes::_ifge:
1109   case Bytecodes::_ifgt:
1110   case Bytecodes::_ifle:
1111   case Bytecodes::_if_icmpeq:
1112   case Bytecodes::_if_icmpne:
1113   case Bytecodes::_if_icmplt:
1114   case Bytecodes::_if_icmpge:
1115   case Bytecodes::_if_icmpgt:
1116   case Bytecodes::_if_icmple:
1117   case Bytecodes::_if_acmpeq:
1118   case Bytecodes::_if_acmpne:
1119   case Bytecodes::_ifnull:
1120   case Bytecodes::_ifnonnull:
1121     cell_count = BranchData::static_cell_count();
1122     tag = DataLayout::branch_data_tag;
1123     break;
1124   case Bytecodes::_lookupswitch:
1125   case Bytecodes::_tableswitch:
1126     cell_count = MultiBranchData::compute_cell_count(stream);
1127     tag = DataLayout::multi_branch_data_tag;
1128     break;
1129   default:
1130     break;
1131   }
1132   assert(tag == DataLayout::multi_branch_data_tag ||
1133          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1134           (tag == DataLayout::call_type_data_tag ||
1135            tag == DataLayout::counter_data_tag ||
1136            tag == DataLayout::virtual_call_type_data_tag ||
1137            tag == DataLayout::virtual_call_data_tag)) ||
1138          cell_count == bytecode_cell_count(c), "cell counts must agree");
1139   if (cell_count >= 0) {
1140     assert(tag != DataLayout::no_tag, "bad tag");
1141     assert(bytecode_has_profile(c), "agree w/ BHP");
1142     data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1143     return DataLayout::compute_size_in_bytes(cell_count);
1144   } else {
1145     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1146     return 0;
1147   }
1148 }
1149 
1150 // Get the data at an arbitrary (sort of) data index.
1151 ProfileData* MethodData::data_at(int data_index) const {
1152   if (out_of_bounds(data_index)) {
1153     return nullptr;
1154   }
1155   DataLayout* data_layout = data_layout_at(data_index);
1156   return data_layout->data_in();
1157 }
1158 
1159 int DataLayout::cell_count() {
1160   switch (tag()) {
1161   case DataLayout::no_tag:
1162   default:
1163     ShouldNotReachHere();
1164     return 0;
1165   case DataLayout::bit_data_tag:
1166     return BitData::static_cell_count();
1167   case DataLayout::counter_data_tag:
1168     return CounterData::static_cell_count();
1169   case DataLayout::jump_data_tag:
1170     return JumpData::static_cell_count();
1171   case DataLayout::receiver_type_data_tag:
1172     return ReceiverTypeData::static_cell_count();
1173   case DataLayout::virtual_call_data_tag:
1174     return VirtualCallData::static_cell_count();
1175   case DataLayout::ret_data_tag:
1176     return RetData::static_cell_count();
1177   case DataLayout::branch_data_tag:
1178     return BranchData::static_cell_count();
1179   case DataLayout::multi_branch_data_tag:
1180     return ((new MultiBranchData(this))->cell_count());
1181   case DataLayout::arg_info_data_tag:
1182     return ((new ArgInfoData(this))->cell_count());
1183   case DataLayout::call_type_data_tag:
1184     return ((new CallTypeData(this))->cell_count());
1185   case DataLayout::virtual_call_type_data_tag:
1186     return ((new VirtualCallTypeData(this))->cell_count());
1187   case DataLayout::parameters_type_data_tag:
1188     return ((new ParametersTypeData(this))->cell_count());
1189   case DataLayout::speculative_trap_data_tag:
1190     return SpeculativeTrapData::static_cell_count();
1191   }
1192 }
1193 ProfileData* DataLayout::data_in() {
1194   switch (tag()) {
1195   case DataLayout::no_tag:
1196   default:
1197     ShouldNotReachHere();
1198     return nullptr;
1199   case DataLayout::bit_data_tag:
1200     return new BitData(this);
1201   case DataLayout::counter_data_tag:
1202     return new CounterData(this);
1203   case DataLayout::jump_data_tag:
1204     return new JumpData(this);
1205   case DataLayout::receiver_type_data_tag:
1206     return new ReceiverTypeData(this);
1207   case DataLayout::virtual_call_data_tag:
1208     return new VirtualCallData(this);
1209   case DataLayout::ret_data_tag:
1210     return new RetData(this);
1211   case DataLayout::branch_data_tag:
1212     return new BranchData(this);
1213   case DataLayout::multi_branch_data_tag:
1214     return new MultiBranchData(this);
1215   case DataLayout::arg_info_data_tag:
1216     return new ArgInfoData(this);
1217   case DataLayout::call_type_data_tag:
1218     return new CallTypeData(this);
1219   case DataLayout::virtual_call_type_data_tag:
1220     return new VirtualCallTypeData(this);
1221   case DataLayout::parameters_type_data_tag:
1222     return new ParametersTypeData(this);
1223   case DataLayout::speculative_trap_data_tag:
1224     return new SpeculativeTrapData(this);
1225   }
1226 }
1227 
1228 // Iteration over data.
1229 ProfileData* MethodData::next_data(ProfileData* current) const {
1230   int current_index = dp_to_di(current->dp());
1231   int next_index = current_index + current->size_in_bytes();
1232   ProfileData* next = data_at(next_index);
1233   return next;
1234 }
1235 
1236 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1237   int current_index = dp_to_di((address)current);
1238   int next_index = current_index + current->size_in_bytes();
1239   if (out_of_bounds(next_index)) {
1240     return nullptr;
1241   }
1242   DataLayout* next = data_layout_at(next_index);
1243   return next;
1244 }
1245 
1246 // Give each of the data entries a chance to perform specific
1247 // data initialization.
1248 void MethodData::post_initialize(BytecodeStream* stream) {
1249   ResourceMark rm;
1250   ProfileData* data;
1251   for (data = first_data(); is_valid(data); data = next_data(data)) {
1252     stream->set_start(data->bci());
1253     stream->next();
1254     data->post_initialize(stream, this);
1255   }
1256   if (_parameters_type_data_di != no_parameters) {
1257     parameters_type_data()->post_initialize(nullptr, this);
1258   }
1259 }
1260 
1261 // Initialize the MethodData* corresponding to a given method.
1262 MethodData::MethodData(const methodHandle& method)
1263   : _method(method()),
1264     // Holds Compile_lock
1265     _compiler_counters(),
1266     _parameters_type_data_di(parameters_uninitialized) {
1267     _extra_data_lock = nullptr;
1268     initialize();
1269 }
1270 
1271 MethodData::MethodData() {
1272   assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1273 }
1274 
1275 void MethodData::initialize() {
1276   Thread* thread = Thread::current();
1277   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1278   ResourceMark rm(thread);
1279 
1280   init();
1281   set_creation_mileage(mileage_of(method()));
1282 
1283   // Go through the bytecodes and allocate and initialize the
1284   // corresponding data cells.
1285   int data_size = 0;
1286   int empty_bc_count = 0;  // number of bytecodes lacking data
1287   _data[0] = 0;  // apparently not set below.
1288   BytecodeStream stream(methodHandle(thread, method()));
1289   Bytecodes::Code c;
1290   bool needs_speculative_traps = false;
1291   while ((c = stream.next()) >= 0) {
1292     int size_in_bytes = initialize_data(&stream, data_size);
1293     data_size += size_in_bytes;
1294     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1295     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1296   }
1297   _data_size = data_size;
1298   int object_size = in_bytes(data_offset()) + data_size;
1299 
1300   // Add some extra DataLayout cells (at least one) to track stray traps.
1301   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1302   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1303 
1304   // Let's zero the space for the extra data
1305   if (extra_size > 0) {
1306     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1307   }
1308 
1309   // Add a cell to record information about modified arguments.
1310   // Set up _args_modified array after traps cells so that
1311   // the code for traps cells works.
1312   DataLayout *dp = data_layout_at(data_size + extra_size);
1313 
1314   int arg_size = method()->size_of_parameters();
1315   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1316 
1317   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1318   object_size += extra_size + arg_data_size;
1319 
1320   int parms_cell = ParametersTypeData::compute_cell_count(method());
1321   // If we are profiling parameters, we reserved an area near the end
1322   // of the MDO after the slots for bytecodes (because there's no bci
1323   // for method entry so they don't fit with the framework for the
1324   // profiling of bytecodes). We store the offset within the MDO of
1325   // this area (or -1 if no parameter is profiled)
1326   int parm_data_size = 0;
1327   if (parms_cell > 0) {
1328     parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1329     object_size += parm_data_size;
1330     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1331     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1332     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1333   } else {
1334     _parameters_type_data_di = no_parameters;
1335   }
1336 
1337   _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1338   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1339     int num_exception_handlers = method()->exception_table_length();
1340     object_size += num_exception_handlers * single_exception_handler_data_size();
1341     ExceptionTableElement* exception_handlers = method()->exception_table_start();
1342     for (int i = 0; i < num_exception_handlers; i++) {
1343       DataLayout *dp = exception_handler_data_at(i);
1344       dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1345     }
1346   }
1347 
1348   // Set an initial hint. Don't use set_hint_di() because
1349   // first_di() may be out of bounds if data_size is 0.
1350   // In that situation, _hint_di is never used, but at
1351   // least well-defined.
1352   _hint_di = first_di();
1353 
1354   post_initialize(&stream);
1355 
1356   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1357   set_size(object_size);
1358 }
1359 
1360 void MethodData::init() {
1361   _compiler_counters = CompilerCounters(); // reset compiler counters
1362   _invocation_counter.init();
1363   _backedge_counter.init();
1364   _invocation_counter_start = 0;
1365   _backedge_counter_start = 0;
1366 
1367   // Set per-method invoke- and backedge mask.
1368   double scale = 1.0;
1369   methodHandle mh(Thread::current(), _method);
1370   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1371   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1372   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1373 
1374   _tenure_traps = 0;
1375   _num_loops = 0;
1376   _num_blocks = 0;
1377   _would_profile = unknown;
1378 
1379 #if INCLUDE_JVMCI
1380   _jvmci_ir_size = 0;
1381   _failed_speculations = nullptr;
1382 #endif
1383 
1384 #if INCLUDE_RTM_OPT
1385   _rtm_state = NoRTM; // No RTM lock eliding by default
1386   if (UseRTMLocking &&
1387       !CompilerOracle::has_option(mh, CompileCommandEnum::NoRTMLockEliding)) {
1388     if (CompilerOracle::has_option(mh, CompileCommandEnum::UseRTMLockEliding) || !UseRTMDeopt) {
1389       // Generate RTM lock eliding code without abort ratio calculation code.
1390       _rtm_state = UseRTM;
1391     } else if (UseRTMDeopt) {
1392       // Generate RTM lock eliding code and include abort ratio calculation
1393       // code if UseRTMDeopt is on.
1394       _rtm_state = ProfileRTM;
1395     }
1396   }
1397 #endif
1398 
1399   // Initialize escape flags.
1400   clear_escape_info();
1401 }
1402 
1403 // Get a measure of how much mileage the method has on it.
1404 int MethodData::mileage_of(Method* method) {
1405   return MAX2(method->invocation_count(), method->backedge_count());
1406 }
1407 
1408 bool MethodData::is_mature() const {
1409   return CompilationPolicy::is_mature((MethodData*)this);
1410 }
1411 
1412 // Translate a bci to its corresponding data index (di).
1413 address MethodData::bci_to_dp(int bci) {
1414   ResourceMark rm;
1415   DataLayout* data = data_layout_before(bci);
1416   DataLayout* prev = nullptr;
1417   for ( ; is_valid(data); data = next_data_layout(data)) {
1418     if (data->bci() >= bci) {
1419       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1420       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1421       return (address)data;
1422     }
1423     prev = data;
1424   }
1425   return (address)limit_data_position();
1426 }
1427 
1428 // Translate a bci to its corresponding data, or null.
1429 ProfileData* MethodData::bci_to_data(int bci) {
1430   check_extra_data_locked();
1431 
1432   DataLayout* data = data_layout_before(bci);
1433   for ( ; is_valid(data); data = next_data_layout(data)) {
1434     if (data->bci() == bci) {
1435       set_hint_di(dp_to_di((address)data));
1436       return data->data_in();
1437     } else if (data->bci() > bci) {
1438       break;
1439     }
1440   }
1441   return bci_to_extra_data(bci, nullptr, false);
1442 }
1443 
1444 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1445   assert(ProfileExceptionHandlers, "not profiling");
1446   for (int i = 0; i < num_exception_handler_data(); i++) {
1447     DataLayout* exception_handler_data = exception_handler_data_at(i);
1448     if (exception_handler_data->bci() == bci) {
1449       return exception_handler_data;
1450     }
1451   }
1452   return nullptr;
1453 }
1454 
1455 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1456   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1457   return data != nullptr ? new BitData(data) : nullptr;
1458 }
1459 
1460 BitData MethodData::exception_handler_bci_to_data(int bci) {
1461   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1462   assert(data != nullptr, "invalid bci");
1463   return BitData(data);
1464 }
1465 
1466 DataLayout* MethodData::next_extra(DataLayout* dp) {
1467   int nb_cells = 0;
1468   switch(dp->tag()) {
1469   case DataLayout::bit_data_tag:
1470   case DataLayout::no_tag:
1471     nb_cells = BitData::static_cell_count();
1472     break;
1473   case DataLayout::speculative_trap_data_tag:
1474     nb_cells = SpeculativeTrapData::static_cell_count();
1475     break;
1476   default:
1477     fatal("unexpected tag %d", dp->tag());
1478   }
1479   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1480 }
1481 
1482 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
1483   check_extra_data_locked();
1484 
1485   DataLayout* end = args_data_limit();
1486 
1487   for (;; dp = next_extra(dp)) {
1488     assert(dp < end, "moved past end of extra data");
1489     // No need for "Atomic::load_acquire" ops,
1490     // since the data structure is monotonic.
1491     switch(dp->tag()) {
1492     case DataLayout::no_tag:
1493       return nullptr;
1494     case DataLayout::arg_info_data_tag:
1495       dp = end;
1496       return nullptr; // ArgInfoData is at the end of extra data section.
1497     case DataLayout::bit_data_tag:
1498       if (m == nullptr && dp->bci() == bci) {
1499         return new BitData(dp);
1500       }
1501       break;
1502     case DataLayout::speculative_trap_data_tag:
1503       if (m != nullptr) {
1504         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1505         if (dp->bci() == bci) {
1506           assert(data->method() != nullptr, "method must be set");
1507           if (data->method() == m) {
1508             return data;
1509           }
1510         }
1511       }
1512       break;
1513     default:
1514       fatal("unexpected tag %d", dp->tag());
1515     }
1516   }
1517   return nullptr;
1518 }
1519 
1520 
1521 // Translate a bci to its corresponding extra data, or null.
1522 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1523   check_extra_data_locked();
1524 
1525   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1526   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1527          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1528          "code needs to be adjusted");
1529 
1530   // Do not create one of these if method has been redefined.
1531   if (m != nullptr && m->is_old()) {
1532     return nullptr;
1533   }
1534 
1535   DataLayout* dp  = extra_data_base();
1536   DataLayout* end = args_data_limit();
1537 
1538   // Find if already exists
1539   ProfileData* result = bci_to_extra_data_find(bci, m, dp);
1540   if (result != nullptr || dp >= end) {
1541     return result;
1542   }
1543 
1544   if (create_if_missing) {
1545     // Not found -> Allocate
1546     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1547     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1548     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1549     // SpeculativeTrapData is 2 slots. Make sure we have room.
1550     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1551       return nullptr;
1552     }
1553     DataLayout temp;
1554     temp.initialize(tag, checked_cast<u2>(bci), 0);
1555 
1556     dp->set_header(temp.header());
1557     assert(dp->tag() == tag, "sane");
1558     assert(dp->bci() == bci, "no concurrent allocation");
1559     if (tag == DataLayout::bit_data_tag) {
1560       return new BitData(dp);
1561     } else {
1562       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1563       data->set_method(m);
1564       return data;
1565     }
1566   }
1567   return nullptr;
1568 }
1569 
1570 ArgInfoData *MethodData::arg_info() {
1571   DataLayout* dp    = extra_data_base();
1572   DataLayout* end   = args_data_limit();
1573   for (; dp < end; dp = next_extra(dp)) {
1574     if (dp->tag() == DataLayout::arg_info_data_tag)
1575       return new ArgInfoData(dp);
1576   }
1577   return nullptr;
1578 }
1579 
1580 // Printing
1581 
1582 void MethodData::print_on(outputStream* st) const {
1583   assert(is_methodData(), "should be method data");
1584   st->print("method data for ");
1585   method()->print_value_on(st);
1586   st->cr();
1587   print_data_on(st);
1588 }
1589 
1590 void MethodData::print_value_on(outputStream* st) const {
1591   assert(is_methodData(), "should be method data");
1592   st->print("method data for ");
1593   method()->print_value_on(st);
1594 }
1595 
1596 void MethodData::print_data_on(outputStream* st) const {
1597   ResourceMark rm;
1598   ProfileData* data = first_data();
1599   if (_parameters_type_data_di != no_parameters) {
1600     parameters_type_data()->print_data_on(st);
1601   }
1602   for ( ; is_valid(data); data = next_data(data)) {
1603     st->print("%d", dp_to_di(data->dp()));
1604     st->fill_to(6);
1605     data->print_data_on(st, this);
1606   }
1607   st->print_cr("--- Extra data:");
1608   DataLayout* dp    = extra_data_base();
1609   DataLayout* end   = args_data_limit();
1610   for (;; dp = next_extra(dp)) {
1611     assert(dp < end, "moved past end of extra data");
1612     // No need for "Atomic::load_acquire" ops,
1613     // since the data structure is monotonic.
1614     switch(dp->tag()) {
1615     case DataLayout::no_tag:
1616       continue;
1617     case DataLayout::bit_data_tag:
1618       data = new BitData(dp);
1619       break;
1620     case DataLayout::speculative_trap_data_tag:
1621       data = new SpeculativeTrapData(dp);
1622       break;
1623     case DataLayout::arg_info_data_tag:
1624       data = new ArgInfoData(dp);
1625       dp = end; // ArgInfoData is at the end of extra data section.
1626       break;
1627     default:
1628       fatal("unexpected tag %d", dp->tag());
1629     }
1630     st->print("%d", dp_to_di(data->dp()));
1631     st->fill_to(6);
1632     data->print_data_on(st);
1633     if (dp >= end) return;
1634   }
1635 }
1636 
1637 // Verification
1638 
1639 void MethodData::verify_on(outputStream* st) {
1640   guarantee(is_methodData(), "object must be method data");
1641   // guarantee(m->is_perm(), "should be in permspace");
1642   this->verify_data_on(st);
1643 }
1644 
1645 void MethodData::verify_data_on(outputStream* st) {
1646   NEEDS_CLEANUP;
1647   // not yet implemented.
1648 }
1649 
1650 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1651   if (m->is_compiled_lambda_form()) {
1652     return true;
1653   }
1654 
1655   Bytecode_invoke inv(m , bci);
1656   return inv.is_invokedynamic() || inv.is_invokehandle();
1657 }
1658 
1659 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1660   Bytecode_invoke inv(m , bci);
1661   if (inv.is_invokevirtual()) {
1662     Symbol* klass = inv.klass();
1663     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1664         klass == vmSymbols::sun_misc_Unsafe() ||
1665         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1666       Symbol* name = inv.name();
1667       if (name->starts_with("get") || name->starts_with("put")) {
1668         return true;
1669       }
1670     }
1671   }
1672   return false;
1673 }
1674 
1675 int MethodData::profile_arguments_flag() {
1676   return TypeProfileLevel % 10;
1677 }
1678 
1679 bool MethodData::profile_arguments() {
1680   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1681 }
1682 
1683 bool MethodData::profile_arguments_jsr292_only() {
1684   return profile_arguments_flag() == type_profile_jsr292;
1685 }
1686 
1687 bool MethodData::profile_all_arguments() {
1688   return profile_arguments_flag() == type_profile_all;
1689 }
1690 
1691 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1692   if (!profile_arguments()) {
1693     return false;
1694   }
1695 
1696   if (profile_all_arguments()) {
1697     return true;
1698   }
1699 
1700   if (profile_unsafe(m, bci)) {
1701     return true;
1702   }
1703 
1704   assert(profile_arguments_jsr292_only(), "inconsistent");
1705   return profile_jsr292(m, bci);
1706 }
1707 
1708 int MethodData::profile_return_flag() {
1709   return (TypeProfileLevel % 100) / 10;
1710 }
1711 
1712 bool MethodData::profile_return() {
1713   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1714 }
1715 
1716 bool MethodData::profile_return_jsr292_only() {
1717   return profile_return_flag() == type_profile_jsr292;
1718 }
1719 
1720 bool MethodData::profile_all_return() {
1721   return profile_return_flag() == type_profile_all;
1722 }
1723 
1724 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1725   if (!profile_return()) {
1726     return false;
1727   }
1728 
1729   if (profile_all_return()) {
1730     return true;
1731   }
1732 
1733   assert(profile_return_jsr292_only(), "inconsistent");
1734   return profile_jsr292(m, bci);
1735 }
1736 
1737 int MethodData::profile_parameters_flag() {
1738   return TypeProfileLevel / 100;
1739 }
1740 
1741 bool MethodData::profile_parameters() {
1742   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1743 }
1744 
1745 bool MethodData::profile_parameters_jsr292_only() {
1746   return profile_parameters_flag() == type_profile_jsr292;
1747 }
1748 
1749 bool MethodData::profile_all_parameters() {
1750   return profile_parameters_flag() == type_profile_all;
1751 }
1752 
1753 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1754   if (!profile_parameters()) {
1755     return false;
1756   }
1757 
1758   if (profile_all_parameters()) {
1759     return true;
1760   }
1761 
1762   assert(profile_parameters_jsr292_only(), "inconsistent");
1763   return m->is_compiled_lambda_form();
1764 }
1765 
1766 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1767   log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1768   it->push(&_method);
1769   if (_parameters_type_data_di != no_parameters) {
1770     parameters_type_data()->metaspace_pointers_do(it);
1771   }
1772   for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1773     data->metaspace_pointers_do(it);
1774   }
1775   for (DataLayout* dp = extra_data_base();
1776                    dp < extra_data_limit();
1777                    dp = MethodData::next_extra(dp)) {
1778     if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1779       ResourceMark rm;
1780       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1781       data->metaspace_pointers_do(it);
1782     } else if (dp->tag() == DataLayout::no_tag ||
1783                dp->tag() == DataLayout::arg_info_data_tag) {
1784       break;
1785     }
1786   }
1787 }
1788 
1789 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1790   check_extra_data_locked();
1791 
1792   if (shift == 0) {
1793     return;
1794   }
1795   if (!reset) {
1796     // Move all cells of trap entry at dp left by "shift" cells
1797     intptr_t* start = (intptr_t*)dp;
1798     intptr_t* end = (intptr_t*)next_extra(dp);
1799     for (intptr_t* ptr = start; ptr < end; ptr++) {
1800       *(ptr-shift) = *ptr;
1801     }
1802   } else {
1803     // Reset "shift" cells stopping at dp
1804     intptr_t* start = ((intptr_t*)dp) - shift;
1805     intptr_t* end = (intptr_t*)dp;
1806     for (intptr_t* ptr = start; ptr < end; ptr++) {
1807       *ptr = 0;
1808     }
1809   }
1810 }
1811 
1812 // Check for entries that reference an unloaded method
1813 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1814   bool _always_clean;
1815 public:
1816   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1817   bool is_live(Method* m) {
1818     if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1819       return true; // TODO: treat as unloaded instead?
1820     }
1821     return !(_always_clean) && m->method_holder()->is_loader_alive();
1822   }
1823 };
1824 
1825 // Check for entries that reference a redefined method
1826 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1827 public:
1828   CleanExtraDataMethodClosure() {}
1829   bool is_live(Method* m) { return !m->is_old(); }
1830 };
1831 
1832 Mutex* MethodData::extra_data_lock() {
1833   Mutex* lock = Atomic::load(&_extra_data_lock);
1834   if (lock == nullptr) {
1835     lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
1836     Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1837     if (old != nullptr) {
1838       // Another thread created the lock before us. Use that lock instead.
1839       delete lock;
1840       return old;
1841     }
1842   }
1843   return lock;
1844 }
1845 
1846 // Remove SpeculativeTrapData entries that reference an unloaded or
1847 // redefined method
1848 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1849   check_extra_data_locked();
1850 
1851   DataLayout* dp  = extra_data_base();
1852   DataLayout* end = args_data_limit();
1853 
1854   int shift = 0;
1855   for (; dp < end; dp = next_extra(dp)) {
1856     switch(dp->tag()) {
1857     case DataLayout::speculative_trap_data_tag: {
1858       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1859       Method* m = data->method();
1860       assert(m != nullptr, "should have a method");
1861       if (!cl->is_live(m)) {
1862         // "shift" accumulates the number of cells for dead
1863         // SpeculativeTrapData entries that have been seen so
1864         // far. Following entries must be shifted left by that many
1865         // cells to remove the dead SpeculativeTrapData entries.
1866         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1867       } else {
1868         // Shift this entry left if it follows dead
1869         // SpeculativeTrapData entries
1870         clean_extra_data_helper(dp, shift);
1871       }
1872       break;
1873     }
1874     case DataLayout::bit_data_tag:
1875       // Shift this entry left if it follows dead SpeculativeTrapData
1876       // entries
1877       clean_extra_data_helper(dp, shift);
1878       continue;
1879     case DataLayout::no_tag:
1880     case DataLayout::arg_info_data_tag:
1881       // We are at end of the live trap entries. The previous "shift"
1882       // cells contain entries that are either dead or were shifted
1883       // left. They need to be reset to no_tag
1884       clean_extra_data_helper(dp, shift, true);
1885       return;
1886     default:
1887       fatal("unexpected tag %d", dp->tag());
1888     }
1889   }
1890 }
1891 
1892 // Verify there's no unloaded or redefined method referenced by a
1893 // SpeculativeTrapData entry
1894 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1895   check_extra_data_locked();
1896 
1897 #ifdef ASSERT
1898   DataLayout* dp  = extra_data_base();
1899   DataLayout* end = args_data_limit();
1900 
1901   for (; dp < end; dp = next_extra(dp)) {
1902     switch(dp->tag()) {
1903     case DataLayout::speculative_trap_data_tag: {
1904       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1905       Method* m = data->method();
1906       assert(m != nullptr && cl->is_live(m), "Method should exist");
1907       break;
1908     }
1909     case DataLayout::bit_data_tag:
1910       continue;
1911     case DataLayout::no_tag:
1912     case DataLayout::arg_info_data_tag:
1913       return;
1914     default:
1915       fatal("unexpected tag %d", dp->tag());
1916     }
1917   }
1918 #endif
1919 }
1920 
1921 void MethodData::clean_method_data(bool always_clean) {
1922   ResourceMark rm;
1923   for (ProfileData* data = first_data();
1924        is_valid(data);
1925        data = next_data(data)) {
1926     data->clean_weak_klass_links(always_clean);
1927   }
1928   ParametersTypeData* parameters = parameters_type_data();
1929   if (parameters != nullptr) {
1930     parameters->clean_weak_klass_links(always_clean);
1931   }
1932 
1933   CleanExtraDataKlassClosure cl(always_clean);
1934 
1935   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1936   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1937 
1938   clean_extra_data(&cl);
1939   verify_extra_data_clean(&cl);
1940 }
1941 
1942 // This is called during redefinition to clean all "old" redefined
1943 // methods out of MethodData for all methods.
1944 void MethodData::clean_weak_method_links() {
1945   ResourceMark rm;
1946   CleanExtraDataMethodClosure cl;
1947 
1948   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1949   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1950 
1951   clean_extra_data(&cl);
1952   verify_extra_data_clean(&cl);
1953 }
1954 
1955 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1956   release_C_heap_structures();
1957 }
1958 
1959 void MethodData::release_C_heap_structures() {
1960 #if INCLUDE_JVMCI
1961   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1962 #endif
1963 }
1964 
1965 #if INCLUDE_CDS
1966 void MethodData::remove_unshareable_info() {
1967   _extra_data_lock = nullptr;
1968 }
1969 
1970 void MethodData::restore_unshareable_info(TRAPS) {
1971   //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
1972 }
1973 #endif // INCLUDE_CDS
1974        
1975 #ifdef ASSERT
1976 void MethodData::check_extra_data_locked() const {
1977     // Cast const away, just to be able to verify the lock
1978     // Usually we only want non-const accesses on the lock,
1979     // so this here is an exception.
1980     MethodData* self = (MethodData*)this;
1981     assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
1982     assert(!Thread::current()->is_Java_thread() ||
1983            JavaThread::current()->is_in_no_safepoint_scope(),
1984            "JavaThread must have NoSafepointVerifier inside lock scope");
1985 }
1986 #endif