1 /*
   2  * Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compilationPolicy.hpp"
  29 #include "compiler/compilerOracle.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "interpreter/bytecodeStream.hpp"
  32 #include "interpreter/linkResolver.hpp"
  33 #include "memory/metaspaceClosure.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/methodData.inline.hpp"
  37 #include "prims/jvmtiRedefineClasses.hpp"
  38 #include "runtime/atomic.hpp"
  39 #include "runtime/deoptimization.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/orderAccess.hpp"
  42 #include "runtime/safepointVerifiers.hpp"
  43 #include "runtime/signature.hpp"
  44 #include "utilities/align.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 // ==================================================================
  48 // DataLayout
  49 //
  50 // Overlay for generic profiling data.
  51 
  52 // Some types of data layouts need a length field.
  53 bool DataLayout::needs_array_len(u1 tag) {
  54   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  55 }
  56 
  57 // Perform generic initialization of the data.  More specific
  58 // initialization occurs in overrides of ProfileData::post_initialize.
  59 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  60   _header._bits = (intptr_t)0;
  61   _header._struct._tag = tag;
  62   _header._struct._bci = bci;
  63   for (int i = 0; i < cell_count; i++) {
  64     set_cell_at(i, (intptr_t)0);
  65   }
  66   if (needs_array_len(tag)) {
  67     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  68   }
  69   if (tag == call_type_data_tag) {
  70     CallTypeData::initialize(this, cell_count);
  71   } else if (tag == virtual_call_type_data_tag) {
  72     VirtualCallTypeData::initialize(this, cell_count);
  73   }
  74 }
  75 
  76 void DataLayout::clean_weak_klass_links(bool always_clean) {
  77   ResourceMark m;
  78   data_in()->clean_weak_klass_links(always_clean);
  79 }
  80 
  81 
  82 // ==================================================================
  83 // ProfileData
  84 //
  85 // A ProfileData object is created to refer to a section of profiling
  86 // data in a structured way.
  87 
  88 // Constructor for invalid ProfileData.
  89 ProfileData::ProfileData() {
  90   _data = NULL;
  91 }
  92 
  93 char* ProfileData::print_data_on_helper(const MethodData* md) const {
  94   DataLayout* dp  = md->extra_data_base();
  95   DataLayout* end = md->args_data_limit();
  96   stringStream ss;
  97   for (;; dp = MethodData::next_extra(dp)) {
  98     assert(dp < end, "moved past end of extra data");
  99     switch(dp->tag()) {
 100     case DataLayout::speculative_trap_data_tag:
 101       if (dp->bci() == bci()) {
 102         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 103         int trap = data->trap_state();
 104         char buf[100];
 105         ss.print("trap/");
 106         data->method()->print_short_name(&ss);
 107         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 108       }
 109       break;
 110     case DataLayout::bit_data_tag:
 111       break;
 112     case DataLayout::no_tag:
 113     case DataLayout::arg_info_data_tag:
 114       return ss.as_string();
 115       break;
 116     default:
 117       fatal("unexpected tag %d", dp->tag());
 118     }
 119   }
 120   return NULL;
 121 }
 122 
 123 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 124   print_data_on(st, print_data_on_helper(md));
 125 }
 126 
 127 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 128   st->print("bci: %d", bci());
 129   st->fill_to(tab_width_one);
 130   st->print("%s", name);
 131   tab(st);
 132   int trap = trap_state();
 133   if (trap != 0) {
 134     char buf[100];
 135     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 136   }
 137   if (extra != NULL) {
 138     st->print("%s", extra);
 139   }
 140   int flags = data()->flags();
 141   if (flags != 0) {
 142     st->print("flags(%d) ", flags);
 143   }
 144 }
 145 
 146 void ProfileData::tab(outputStream* st, bool first) const {
 147   st->fill_to(first ? tab_width_one : tab_width_two);
 148 }
 149 
 150 // ==================================================================
 151 // BitData
 152 //
 153 // A BitData corresponds to a one-bit flag.  This is used to indicate
 154 // whether a checkcast bytecode has seen a null value.
 155 
 156 
 157 void BitData::print_data_on(outputStream* st, const char* extra) const {
 158   print_shared(st, "BitData", extra);
 159   st->cr();
 160 }
 161 
 162 // ==================================================================
 163 // CounterData
 164 //
 165 // A CounterData corresponds to a simple counter.
 166 
 167 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 168   print_shared(st, "CounterData", extra);
 169   st->print_cr("count(%u)", count());
 170 }
 171 
 172 // ==================================================================
 173 // JumpData
 174 //
 175 // A JumpData is used to access profiling information for a direct
 176 // branch.  It is a counter, used for counting the number of branches,
 177 // plus a data displacement, used for realigning the data pointer to
 178 // the corresponding target bci.
 179 
 180 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 181   assert(stream->bci() == bci(), "wrong pos");
 182   int target;
 183   Bytecodes::Code c = stream->code();
 184   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 185     target = stream->dest_w();
 186   } else {
 187     target = stream->dest();
 188   }
 189   int my_di = mdo->dp_to_di(dp());
 190   int target_di = mdo->bci_to_di(target);
 191   int offset = target_di - my_di;
 192   set_displacement(offset);
 193 }
 194 
 195 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 196   print_shared(st, "JumpData", extra);
 197   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 198 }
 199 
 200 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 201   // Parameter profiling include the receiver
 202   int args_count = include_receiver ? 1 : 0;
 203   ResourceMark rm;
 204   ReferenceArgumentCount rac(signature);
 205   args_count += rac.count();
 206   args_count = MIN2(args_count, max);
 207   return args_count * per_arg_cell_count;
 208 }
 209 
 210 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 211   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 212   assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 213   const methodHandle m = stream->method();
 214   int bci = stream->bci();
 215   Bytecode_invoke inv(m, bci);
 216   int args_cell = 0;
 217   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 218     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 219   }
 220   int ret_cell = 0;
 221   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 222     ret_cell = ReturnTypeEntry::static_cell_count();
 223   }
 224   int header_cell = 0;
 225   if (args_cell + ret_cell > 0) {
 226     header_cell = header_cell_count();
 227   }
 228 
 229   return header_cell + args_cell + ret_cell;
 230 }
 231 
 232 class ArgumentOffsetComputer : public SignatureIterator {
 233 private:
 234   int _max;
 235   int _offset;
 236   GrowableArray<int> _offsets;
 237 
 238   friend class SignatureIterator;  // so do_parameters_on can call do_type
 239   void do_type(BasicType type) {
 240     if (is_reference_type(type) && _offsets.length() < _max) {
 241       _offsets.push(_offset);
 242     }
 243     _offset += parameter_type_word_count(type);
 244   }
 245 
 246  public:
 247   ArgumentOffsetComputer(Symbol* signature, int max)
 248     : SignatureIterator(signature),
 249       _max(max), _offset(0),
 250       _offsets(max) {
 251     do_parameters_on(this);  // non-virtual template execution
 252   }
 253 
 254   int off_at(int i) const { return _offsets.at(i); }
 255 };
 256 
 257 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 258   ResourceMark rm;
 259   int start = 0;
 260   // Parameter profiling include the receiver
 261   if (include_receiver && has_receiver) {
 262     set_stack_slot(0, 0);
 263     set_type(0, type_none());
 264     start += 1;
 265   }
 266   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 267   for (int i = start; i < _number_of_entries; i++) {
 268     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 269     set_type(i, type_none());
 270   }
 271 }
 272 
 273 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 274   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 275   Bytecode_invoke inv(stream->method(), stream->bci());
 276 
 277   if (has_arguments()) {
 278 #ifdef ASSERT
 279     ResourceMark rm;
 280     ReferenceArgumentCount rac(inv.signature());
 281     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 282     assert(count > 0, "room for args type but none found?");
 283     check_number_of_arguments(count);
 284 #endif
 285     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 286   }
 287 
 288   if (has_return()) {
 289     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 290     _ret.post_initialize();
 291   }
 292 }
 293 
 294 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 295   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 296   Bytecode_invoke inv(stream->method(), stream->bci());
 297 
 298   if (has_arguments()) {
 299 #ifdef ASSERT
 300     ResourceMark rm;
 301     ReferenceArgumentCount rac(inv.signature());
 302     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 303     assert(count > 0, "room for args type but none found?");
 304     check_number_of_arguments(count);
 305 #endif
 306     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 307   }
 308 
 309   if (has_return()) {
 310     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 311     _ret.post_initialize();
 312   }
 313 }
 314 
 315 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 316   for (int i = 0; i < _number_of_entries; i++) {
 317     intptr_t p = type(i);
 318     Klass* k = (Klass*)klass_part(p);
 319     if (k != NULL && (always_clean || !k->is_loader_alive())) {
 320       set_type(i, with_status((Klass*)NULL, p));
 321     }
 322   }
 323 }
 324 
 325 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 326   intptr_t p = type();
 327   Klass* k = (Klass*)klass_part(p);
 328   if (k != NULL && (always_clean || !k->is_loader_alive())) {
 329     set_type(with_status((Klass*)NULL, p));
 330   }
 331 }
 332 
 333 bool TypeEntriesAtCall::return_profiling_enabled() {
 334   return MethodData::profile_return();
 335 }
 336 
 337 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 338   return MethodData::profile_arguments();
 339 }
 340 
 341 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 342   if (is_type_none(k)) {
 343     st->print("none");
 344   } else if (is_type_unknown(k)) {
 345     st->print("unknown");
 346   } else {
 347     valid_klass(k)->print_value_on(st);
 348   }
 349   if (was_null_seen(k)) {
 350     st->print(" (null seen)");
 351   }
 352 }
 353 
 354 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 355   for (int i = 0; i < _number_of_entries; i++) {
 356     _pd->tab(st);
 357     st->print("%d: stack(%u) ", i, stack_slot(i));
 358     print_klass(st, type(i));
 359     st->cr();
 360   }
 361 }
 362 
 363 void ReturnTypeEntry::print_data_on(outputStream* st) const {
 364   _pd->tab(st);
 365   print_klass(st, type());
 366   st->cr();
 367 }
 368 
 369 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 370   CounterData::print_data_on(st, extra);
 371   if (has_arguments()) {
 372     tab(st, true);
 373     st->print("argument types");
 374     _args.print_data_on(st);
 375   }
 376   if (has_return()) {
 377     tab(st, true);
 378     st->print("return type");
 379     _ret.print_data_on(st);
 380   }
 381 }
 382 
 383 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 384   VirtualCallData::print_data_on(st, extra);
 385   if (has_arguments()) {
 386     tab(st, true);
 387     st->print("argument types");
 388     _args.print_data_on(st);
 389   }
 390   if (has_return()) {
 391     tab(st, true);
 392     st->print("return type");
 393     _ret.print_data_on(st);
 394   }
 395 }
 396 
 397 // ==================================================================
 398 // ReceiverTypeData
 399 //
 400 // A ReceiverTypeData is used to access profiling information about a
 401 // dynamic type check.  It consists of a counter which counts the total times
 402 // that the check is reached, and a series of (Klass*, count) pairs
 403 // which are used to store a type profile for the receiver of the check.
 404 
 405 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 406     for (uint row = 0; row < row_limit(); row++) {
 407     Klass* p = receiver(row);
 408     if (p != NULL && (always_clean || !p->is_loader_alive())) {
 409       clear_row(row);
 410     }
 411   }
 412 }
 413 
 414 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 415   uint row;
 416   int entries = 0;
 417   for (row = 0; row < row_limit(); row++) {
 418     if (receiver(row) != NULL)  entries++;
 419   }
 420 #if INCLUDE_JVMCI
 421   st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries);
 422 #else
 423   st->print_cr("count(%u) entries(%u)", count(), entries);
 424 #endif
 425   int total = count();
 426   for (row = 0; row < row_limit(); row++) {
 427     if (receiver(row) != NULL) {
 428       total += receiver_count(row);
 429     }
 430   }
 431   for (row = 0; row < row_limit(); row++) {
 432     if (receiver(row) != NULL) {
 433       tab(st);
 434       receiver(row)->print_value_on(st);
 435       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 436     }
 437   }
 438 }
 439 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 440   print_shared(st, "ReceiverTypeData", extra);
 441   print_receiver_data_on(st);
 442 }
 443 
 444 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 445   print_shared(st, "VirtualCallData", extra);
 446   print_receiver_data_on(st);
 447 }
 448 
 449 // ==================================================================
 450 // RetData
 451 //
 452 // A RetData is used to access profiling information for a ret bytecode.
 453 // It is composed of a count of the number of times that the ret has
 454 // been executed, followed by a series of triples of the form
 455 // (bci, count, di) which count the number of times that some bci was the
 456 // target of the ret and cache a corresponding displacement.
 457 
 458 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 459   for (uint row = 0; row < row_limit(); row++) {
 460     set_bci_displacement(row, -1);
 461     set_bci(row, no_bci);
 462   }
 463   // release so other threads see a consistent state.  bci is used as
 464   // a valid flag for bci_displacement.
 465   OrderAccess::release();
 466 }
 467 
 468 // This routine needs to atomically update the RetData structure, so the
 469 // caller needs to hold the RetData_lock before it gets here.  Since taking
 470 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 471 // wrapper around a derived oop, taking the lock in _this_ method will
 472 // basically cause the 'this' pointer's _data field to contain junk after the
 473 // lock.  We require the caller to take the lock before making the ProfileData
 474 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 475 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 476   // First find the mdp which corresponds to the return bci.
 477   address mdp = h_mdo->bci_to_dp(return_bci);
 478 
 479   // Now check to see if any of the cache slots are open.
 480   for (uint row = 0; row < row_limit(); row++) {
 481     if (bci(row) == no_bci) {
 482       set_bci_displacement(row, mdp - dp());
 483       set_bci_count(row, DataLayout::counter_increment);
 484       // Barrier to ensure displacement is written before the bci; allows
 485       // the interpreter to read displacement without fear of race condition.
 486       release_set_bci(row, return_bci);
 487       break;
 488     }
 489   }
 490   return mdp;
 491 }
 492 
 493 void RetData::print_data_on(outputStream* st, const char* extra) const {
 494   print_shared(st, "RetData", extra);
 495   uint row;
 496   int entries = 0;
 497   for (row = 0; row < row_limit(); row++) {
 498     if (bci(row) != no_bci)  entries++;
 499   }
 500   st->print_cr("count(%u) entries(%u)", count(), entries);
 501   for (row = 0; row < row_limit(); row++) {
 502     if (bci(row) != no_bci) {
 503       tab(st);
 504       st->print_cr("bci(%d: count(%u) displacement(%d))",
 505                    bci(row), bci_count(row), bci_displacement(row));
 506     }
 507   }
 508 }
 509 
 510 // ==================================================================
 511 // BranchData
 512 //
 513 // A BranchData is used to access profiling data for a two-way branch.
 514 // It consists of taken and not_taken counts as well as a data displacement
 515 // for the taken case.
 516 
 517 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 518   assert(stream->bci() == bci(), "wrong pos");
 519   int target = stream->dest();
 520   int my_di = mdo->dp_to_di(dp());
 521   int target_di = mdo->bci_to_di(target);
 522   int offset = target_di - my_di;
 523   set_displacement(offset);
 524 }
 525 
 526 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 527   print_shared(st, "BranchData", extra);
 528   st->print_cr("taken(%u) displacement(%d)",
 529                taken(), displacement());
 530   tab(st);
 531   st->print_cr("not taken(%u)", not_taken());
 532 }
 533 
 534 // ==================================================================
 535 // MultiBranchData
 536 //
 537 // A MultiBranchData is used to access profiling information for
 538 // a multi-way branch (*switch bytecodes).  It consists of a series
 539 // of (count, displacement) pairs, which count the number of times each
 540 // case was taken and specify the data displacment for each branch target.
 541 
 542 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 543   int cell_count = 0;
 544   if (stream->code() == Bytecodes::_tableswitch) {
 545     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 546     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 547   } else {
 548     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 549     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 550   }
 551   return cell_count;
 552 }
 553 
 554 void MultiBranchData::post_initialize(BytecodeStream* stream,
 555                                       MethodData* mdo) {
 556   assert(stream->bci() == bci(), "wrong pos");
 557   int target;
 558   int my_di;
 559   int target_di;
 560   int offset;
 561   if (stream->code() == Bytecodes::_tableswitch) {
 562     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 563     int len = sw.length();
 564     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 565     for (int count = 0; count < len; count++) {
 566       target = sw.dest_offset_at(count) + bci();
 567       my_di = mdo->dp_to_di(dp());
 568       target_di = mdo->bci_to_di(target);
 569       offset = target_di - my_di;
 570       set_displacement_at(count, offset);
 571     }
 572     target = sw.default_offset() + bci();
 573     my_di = mdo->dp_to_di(dp());
 574     target_di = mdo->bci_to_di(target);
 575     offset = target_di - my_di;
 576     set_default_displacement(offset);
 577 
 578   } else {
 579     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 580     int npairs = sw.number_of_pairs();
 581     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 582     for (int count = 0; count < npairs; count++) {
 583       LookupswitchPair pair = sw.pair_at(count);
 584       target = pair.offset() + bci();
 585       my_di = mdo->dp_to_di(dp());
 586       target_di = mdo->bci_to_di(target);
 587       offset = target_di - my_di;
 588       set_displacement_at(count, offset);
 589     }
 590     target = sw.default_offset() + bci();
 591     my_di = mdo->dp_to_di(dp());
 592     target_di = mdo->bci_to_di(target);
 593     offset = target_di - my_di;
 594     set_default_displacement(offset);
 595   }
 596 }
 597 
 598 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 599   print_shared(st, "MultiBranchData", extra);
 600   st->print_cr("default_count(%u) displacement(%d)",
 601                default_count(), default_displacement());
 602   int cases = number_of_cases();
 603   for (int i = 0; i < cases; i++) {
 604     tab(st);
 605     st->print_cr("count(%u) displacement(%d)",
 606                  count_at(i), displacement_at(i));
 607   }
 608 }
 609 
 610 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 611   print_shared(st, "ArgInfoData", extra);
 612   int nargs = number_of_args();
 613   for (int i = 0; i < nargs; i++) {
 614     st->print("  0x%x", arg_modified(i));
 615   }
 616   st->cr();
 617 }
 618 
 619 int ParametersTypeData::compute_cell_count(Method* m) {
 620   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 621     return 0;
 622   }
 623   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 624   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 625   if (obj_args > 0) {
 626     return obj_args + 1; // 1 cell for array len
 627   }
 628   return 0;
 629 }
 630 
 631 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 632   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 633 }
 634 
 635 bool ParametersTypeData::profiling_enabled() {
 636   return MethodData::profile_parameters();
 637 }
 638 
 639 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 640   st->print("parameter types"); // FIXME extra ignored?
 641   _parameters.print_data_on(st);
 642 }
 643 
 644 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 645   print_shared(st, "SpeculativeTrapData", extra);
 646   tab(st);
 647   method()->print_short_name(st);
 648   st->cr();
 649 }
 650 
 651 // ==================================================================
 652 // MethodData*
 653 //
 654 // A MethodData* holds information which has been collected about
 655 // a method.
 656 
 657 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 658   int size = MethodData::compute_allocation_size_in_words(method);
 659 
 660   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 661     MethodData(method);
 662 }
 663 
 664 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 665   if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) {
 666     return no_profile_data;
 667   }
 668   switch (code) {
 669   case Bytecodes::_checkcast:
 670   case Bytecodes::_instanceof:
 671   case Bytecodes::_aastore:
 672     if (TypeProfileCasts) {
 673       return ReceiverTypeData::static_cell_count();
 674     } else {
 675       return BitData::static_cell_count();
 676     }
 677   case Bytecodes::_invokespecial:
 678   case Bytecodes::_invokestatic:
 679     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 680       return variable_cell_count;
 681     } else {
 682       return CounterData::static_cell_count();
 683     }
 684   case Bytecodes::_goto:
 685   case Bytecodes::_goto_w:
 686   case Bytecodes::_jsr:
 687   case Bytecodes::_jsr_w:
 688     return JumpData::static_cell_count();
 689   case Bytecodes::_invokevirtual:
 690   case Bytecodes::_invokeinterface:
 691     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 692       return variable_cell_count;
 693     } else {
 694       return VirtualCallData::static_cell_count();
 695     }
 696   case Bytecodes::_invokedynamic:
 697     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 698       return variable_cell_count;
 699     } else {
 700       return CounterData::static_cell_count();
 701     }
 702   case Bytecodes::_ret:
 703     return RetData::static_cell_count();
 704   case Bytecodes::_ifeq:
 705   case Bytecodes::_ifne:
 706   case Bytecodes::_iflt:
 707   case Bytecodes::_ifge:
 708   case Bytecodes::_ifgt:
 709   case Bytecodes::_ifle:
 710   case Bytecodes::_if_icmpeq:
 711   case Bytecodes::_if_icmpne:
 712   case Bytecodes::_if_icmplt:
 713   case Bytecodes::_if_icmpge:
 714   case Bytecodes::_if_icmpgt:
 715   case Bytecodes::_if_icmple:
 716   case Bytecodes::_if_acmpeq:
 717   case Bytecodes::_if_acmpne:
 718   case Bytecodes::_ifnull:
 719   case Bytecodes::_ifnonnull:
 720     return BranchData::static_cell_count();
 721   case Bytecodes::_lookupswitch:
 722   case Bytecodes::_tableswitch:
 723     return variable_cell_count;
 724   default:
 725     return no_profile_data;
 726   }
 727 }
 728 
 729 // Compute the size of the profiling information corresponding to
 730 // the current bytecode.
 731 int MethodData::compute_data_size(BytecodeStream* stream) {
 732   int cell_count = bytecode_cell_count(stream->code());
 733   if (cell_count == no_profile_data) {
 734     return 0;
 735   }
 736   if (cell_count == variable_cell_count) {
 737     switch (stream->code()) {
 738     case Bytecodes::_lookupswitch:
 739     case Bytecodes::_tableswitch:
 740       cell_count = MultiBranchData::compute_cell_count(stream);
 741       break;
 742     case Bytecodes::_invokespecial:
 743     case Bytecodes::_invokestatic:
 744     case Bytecodes::_invokedynamic:
 745       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 746       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 747           profile_return_for_invoke(stream->method(), stream->bci())) {
 748         cell_count = CallTypeData::compute_cell_count(stream);
 749       } else {
 750         cell_count = CounterData::static_cell_count();
 751       }
 752       break;
 753     case Bytecodes::_invokevirtual:
 754     case Bytecodes::_invokeinterface: {
 755       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 756       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 757           profile_return_for_invoke(stream->method(), stream->bci())) {
 758         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 759       } else {
 760         cell_count = VirtualCallData::static_cell_count();
 761       }
 762       break;
 763     }
 764     default:
 765       fatal("unexpected bytecode for var length profile data");
 766     }
 767   }
 768   // Note:  cell_count might be zero, meaning that there is just
 769   //        a DataLayout header, with no extra cells.
 770   assert(cell_count >= 0, "sanity");
 771   return DataLayout::compute_size_in_bytes(cell_count);
 772 }
 773 
 774 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 775   // Bytecodes for which we may use speculation
 776   switch (code) {
 777   case Bytecodes::_checkcast:
 778   case Bytecodes::_instanceof:
 779   case Bytecodes::_aastore:
 780   case Bytecodes::_invokevirtual:
 781   case Bytecodes::_invokeinterface:
 782   case Bytecodes::_if_acmpeq:
 783   case Bytecodes::_if_acmpne:
 784   case Bytecodes::_ifnull:
 785   case Bytecodes::_ifnonnull:
 786   case Bytecodes::_invokestatic:
 787 #ifdef COMPILER2
 788     if (CompilerConfig::is_c2_enabled()) {
 789       return UseTypeSpeculation;
 790     }
 791 #endif
 792   default:
 793     return false;
 794   }
 795   return false;
 796 }
 797 
 798 #if INCLUDE_JVMCI
 799 
 800 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 801   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 802 }
 803 
 804 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) {
 805   memcpy(data(), speculation, speculation_len);
 806 }
 807 
 808 // A heuristic check to detect nmethods that outlive a failed speculations list.
 809 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 810   jlong head = (jlong)(address) *failed_speculations_address;
 811   if ((head & 0x1) == 0x1) {
 812     stringStream st;
 813     if (nm != NULL) {
 814       st.print("%d", nm->compile_id());
 815       Method* method = nm->method();
 816       st.print_raw("{");
 817       if (method != NULL) {
 818         method->print_name(&st);
 819       } else {
 820         const char* jvmci_name = nm->jvmci_name();
 821         if (jvmci_name != NULL) {
 822           st.print_raw(jvmci_name);
 823         }
 824       }
 825       st.print_raw("}");
 826     } else {
 827       st.print("<unknown>");
 828     }
 829     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 830   }
 831 }
 832 
 833 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 834   assert(failed_speculations_address != NULL, "must be");
 835   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 836   FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 837   if (fs == NULL) {
 838     // no memory -> ignore failed speculation
 839     return false;
 840   }
 841 
 842   guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 843   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 844 
 845   FailedSpeculation** cursor = failed_speculations_address;
 846   do {
 847     if (*cursor == NULL) {
 848       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) NULL, fs);
 849       if (old_fs == NULL) {
 850         // Successfully appended fs to end of the list
 851         return true;
 852       }
 853       cursor = old_fs->next_adr();
 854     } else {
 855       cursor = (*cursor)->next_adr();
 856     }
 857   } while (true);
 858 }
 859 
 860 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 861   assert(failed_speculations_address != NULL, "must be");
 862   FailedSpeculation* fs = *failed_speculations_address;
 863   while (fs != NULL) {
 864     FailedSpeculation* next = fs->next();
 865     delete fs;
 866     fs = next;
 867   }
 868 
 869   // Write an unaligned value to failed_speculations_address to denote
 870   // that it is no longer a valid pointer. This is allows for the check
 871   // in add_failed_speculation against adding to a freed failed
 872   // speculations list.
 873   long* head = (long*) failed_speculations_address;
 874   (*head) = (*head) | 0x1;
 875 }
 876 #endif // INCLUDE_JVMCI
 877 
 878 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 879 #if INCLUDE_JVMCI
 880   if (ProfileTraps) {
 881     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 882     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 883 
 884     // Make sure we have a minimum number of extra data slots to
 885     // allocate SpeculativeTrapData entries. We would want to have one
 886     // entry per compilation that inlines this method and for which
 887     // some type speculation assumption fails. So the room we need for
 888     // the SpeculativeTrapData entries doesn't directly depend on the
 889     // size of the method. Because it's hard to estimate, we reserve
 890     // space for an arbitrary number of entries.
 891     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 892       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 893 
 894     return MAX2(extra_data_count, spec_data_count);
 895   } else {
 896     return 0;
 897   }
 898 #else // INCLUDE_JVMCI
 899   if (ProfileTraps) {
 900     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 901     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 902     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 903     int one_percent_of_data
 904       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 905     if (extra_data_count < one_percent_of_data)
 906       extra_data_count = one_percent_of_data;
 907     if (extra_data_count > empty_bc_count)
 908       extra_data_count = empty_bc_count;  // no need for more
 909 
 910     // Make sure we have a minimum number of extra data slots to
 911     // allocate SpeculativeTrapData entries. We would want to have one
 912     // entry per compilation that inlines this method and for which
 913     // some type speculation assumption fails. So the room we need for
 914     // the SpeculativeTrapData entries doesn't directly depend on the
 915     // size of the method. Because it's hard to estimate, we reserve
 916     // space for an arbitrary number of entries.
 917     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 918       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 919 
 920     return MAX2(extra_data_count, spec_data_count);
 921   } else {
 922     return 0;
 923   }
 924 #endif // INCLUDE_JVMCI
 925 }
 926 
 927 // Compute the size of the MethodData* necessary to store
 928 // profiling information about a given method.  Size is in bytes.
 929 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 930   int data_size = 0;
 931   BytecodeStream stream(method);
 932   Bytecodes::Code c;
 933   int empty_bc_count = 0;  // number of bytecodes lacking data
 934   bool needs_speculative_traps = false;
 935   while ((c = stream.next()) >= 0) {
 936     int size_in_bytes = compute_data_size(&stream);
 937     data_size += size_in_bytes;
 938     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 939     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 940   }
 941   int object_size = in_bytes(data_offset()) + data_size;
 942 
 943   // Add some extra DataLayout cells (at least one) to track stray traps.
 944   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
 945   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 946 
 947   // Add a cell to record information about modified arguments.
 948   int arg_size = method->size_of_parameters();
 949   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
 950 
 951   // Reserve room for an area of the MDO dedicated to profiling of
 952   // parameters
 953   int args_cell = ParametersTypeData::compute_cell_count(method());
 954   if (args_cell > 0) {
 955     object_size += DataLayout::compute_size_in_bytes(args_cell);
 956   }
 957   return object_size;
 958 }
 959 
 960 // Compute the size of the MethodData* necessary to store
 961 // profiling information about a given method.  Size is in words
 962 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
 963   int byte_size = compute_allocation_size_in_bytes(method);
 964   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
 965   return align_metadata_size(word_size);
 966 }
 967 
 968 // Initialize an individual data segment.  Returns the size of
 969 // the segment in bytes.
 970 int MethodData::initialize_data(BytecodeStream* stream,
 971                                        int data_index) {
 972   if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) {
 973     return 0;
 974   }
 975   int cell_count = -1;
 976   int tag = DataLayout::no_tag;
 977   DataLayout* data_layout = data_layout_at(data_index);
 978   Bytecodes::Code c = stream->code();
 979   switch (c) {
 980   case Bytecodes::_checkcast:
 981   case Bytecodes::_instanceof:
 982   case Bytecodes::_aastore:
 983     if (TypeProfileCasts) {
 984       cell_count = ReceiverTypeData::static_cell_count();
 985       tag = DataLayout::receiver_type_data_tag;
 986     } else {
 987       cell_count = BitData::static_cell_count();
 988       tag = DataLayout::bit_data_tag;
 989     }
 990     break;
 991   case Bytecodes::_invokespecial:
 992   case Bytecodes::_invokestatic: {
 993     int counter_data_cell_count = CounterData::static_cell_count();
 994     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 995         profile_return_for_invoke(stream->method(), stream->bci())) {
 996       cell_count = CallTypeData::compute_cell_count(stream);
 997     } else {
 998       cell_count = counter_data_cell_count;
 999     }
1000     if (cell_count > counter_data_cell_count) {
1001       tag = DataLayout::call_type_data_tag;
1002     } else {
1003       tag = DataLayout::counter_data_tag;
1004     }
1005     break;
1006   }
1007   case Bytecodes::_goto:
1008   case Bytecodes::_goto_w:
1009   case Bytecodes::_jsr:
1010   case Bytecodes::_jsr_w:
1011     cell_count = JumpData::static_cell_count();
1012     tag = DataLayout::jump_data_tag;
1013     break;
1014   case Bytecodes::_invokevirtual:
1015   case Bytecodes::_invokeinterface: {
1016     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1017     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1018         profile_return_for_invoke(stream->method(), stream->bci())) {
1019       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1020     } else {
1021       cell_count = virtual_call_data_cell_count;
1022     }
1023     if (cell_count > virtual_call_data_cell_count) {
1024       tag = DataLayout::virtual_call_type_data_tag;
1025     } else {
1026       tag = DataLayout::virtual_call_data_tag;
1027     }
1028     break;
1029   }
1030   case Bytecodes::_invokedynamic: {
1031     // %%% should make a type profile for any invokedynamic that takes a ref argument
1032     int counter_data_cell_count = CounterData::static_cell_count();
1033     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1034         profile_return_for_invoke(stream->method(), stream->bci())) {
1035       cell_count = CallTypeData::compute_cell_count(stream);
1036     } else {
1037       cell_count = counter_data_cell_count;
1038     }
1039     if (cell_count > counter_data_cell_count) {
1040       tag = DataLayout::call_type_data_tag;
1041     } else {
1042       tag = DataLayout::counter_data_tag;
1043     }
1044     break;
1045   }
1046   case Bytecodes::_ret:
1047     cell_count = RetData::static_cell_count();
1048     tag = DataLayout::ret_data_tag;
1049     break;
1050   case Bytecodes::_ifeq:
1051   case Bytecodes::_ifne:
1052   case Bytecodes::_iflt:
1053   case Bytecodes::_ifge:
1054   case Bytecodes::_ifgt:
1055   case Bytecodes::_ifle:
1056   case Bytecodes::_if_icmpeq:
1057   case Bytecodes::_if_icmpne:
1058   case Bytecodes::_if_icmplt:
1059   case Bytecodes::_if_icmpge:
1060   case Bytecodes::_if_icmpgt:
1061   case Bytecodes::_if_icmple:
1062   case Bytecodes::_if_acmpeq:
1063   case Bytecodes::_if_acmpne:
1064   case Bytecodes::_ifnull:
1065   case Bytecodes::_ifnonnull:
1066     cell_count = BranchData::static_cell_count();
1067     tag = DataLayout::branch_data_tag;
1068     break;
1069   case Bytecodes::_lookupswitch:
1070   case Bytecodes::_tableswitch:
1071     cell_count = MultiBranchData::compute_cell_count(stream);
1072     tag = DataLayout::multi_branch_data_tag;
1073     break;
1074   default:
1075     break;
1076   }
1077   assert(tag == DataLayout::multi_branch_data_tag ||
1078          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1079           (tag == DataLayout::call_type_data_tag ||
1080            tag == DataLayout::counter_data_tag ||
1081            tag == DataLayout::virtual_call_type_data_tag ||
1082            tag == DataLayout::virtual_call_data_tag)) ||
1083          cell_count == bytecode_cell_count(c), "cell counts must agree");
1084   if (cell_count >= 0) {
1085     assert(tag != DataLayout::no_tag, "bad tag");
1086     assert(bytecode_has_profile(c), "agree w/ BHP");
1087     data_layout->initialize(tag, stream->bci(), cell_count);
1088     return DataLayout::compute_size_in_bytes(cell_count);
1089   } else {
1090     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1091     return 0;
1092   }
1093 }
1094 
1095 // Get the data at an arbitrary (sort of) data index.
1096 ProfileData* MethodData::data_at(int data_index) const {
1097   if (out_of_bounds(data_index)) {
1098     return NULL;
1099   }
1100   DataLayout* data_layout = data_layout_at(data_index);
1101   return data_layout->data_in();
1102 }
1103 
1104 int DataLayout::cell_count() {
1105   switch (tag()) {
1106   case DataLayout::no_tag:
1107   default:
1108     ShouldNotReachHere();
1109     return 0;
1110   case DataLayout::bit_data_tag:
1111     return BitData::static_cell_count();
1112   case DataLayout::counter_data_tag:
1113     return CounterData::static_cell_count();
1114   case DataLayout::jump_data_tag:
1115     return JumpData::static_cell_count();
1116   case DataLayout::receiver_type_data_tag:
1117     return ReceiverTypeData::static_cell_count();
1118   case DataLayout::virtual_call_data_tag:
1119     return VirtualCallData::static_cell_count();
1120   case DataLayout::ret_data_tag:
1121     return RetData::static_cell_count();
1122   case DataLayout::branch_data_tag:
1123     return BranchData::static_cell_count();
1124   case DataLayout::multi_branch_data_tag:
1125     return ((new MultiBranchData(this))->cell_count());
1126   case DataLayout::arg_info_data_tag:
1127     return ((new ArgInfoData(this))->cell_count());
1128   case DataLayout::call_type_data_tag:
1129     return ((new CallTypeData(this))->cell_count());
1130   case DataLayout::virtual_call_type_data_tag:
1131     return ((new VirtualCallTypeData(this))->cell_count());
1132   case DataLayout::parameters_type_data_tag:
1133     return ((new ParametersTypeData(this))->cell_count());
1134   case DataLayout::speculative_trap_data_tag:
1135     return SpeculativeTrapData::static_cell_count();
1136   }
1137 }
1138 ProfileData* DataLayout::data_in() {
1139   switch (tag()) {
1140   case DataLayout::no_tag:
1141   default:
1142     ShouldNotReachHere();
1143     return NULL;
1144   case DataLayout::bit_data_tag:
1145     return new BitData(this);
1146   case DataLayout::counter_data_tag:
1147     return new CounterData(this);
1148   case DataLayout::jump_data_tag:
1149     return new JumpData(this);
1150   case DataLayout::receiver_type_data_tag:
1151     return new ReceiverTypeData(this);
1152   case DataLayout::virtual_call_data_tag:
1153     return new VirtualCallData(this);
1154   case DataLayout::ret_data_tag:
1155     return new RetData(this);
1156   case DataLayout::branch_data_tag:
1157     return new BranchData(this);
1158   case DataLayout::multi_branch_data_tag:
1159     return new MultiBranchData(this);
1160   case DataLayout::arg_info_data_tag:
1161     return new ArgInfoData(this);
1162   case DataLayout::call_type_data_tag:
1163     return new CallTypeData(this);
1164   case DataLayout::virtual_call_type_data_tag:
1165     return new VirtualCallTypeData(this);
1166   case DataLayout::parameters_type_data_tag:
1167     return new ParametersTypeData(this);
1168   case DataLayout::speculative_trap_data_tag:
1169     return new SpeculativeTrapData(this);
1170   }
1171 }
1172 
1173 // Iteration over data.
1174 ProfileData* MethodData::next_data(ProfileData* current) const {
1175   int current_index = dp_to_di(current->dp());
1176   int next_index = current_index + current->size_in_bytes();
1177   ProfileData* next = data_at(next_index);
1178   return next;
1179 }
1180 
1181 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1182   int current_index = dp_to_di((address)current);
1183   int next_index = current_index + current->size_in_bytes();
1184   if (out_of_bounds(next_index)) {
1185     return NULL;
1186   }
1187   DataLayout* next = data_layout_at(next_index);
1188   return next;
1189 }
1190 
1191 // Give each of the data entries a chance to perform specific
1192 // data initialization.
1193 void MethodData::post_initialize(BytecodeStream* stream) {
1194   ResourceMark rm;
1195   ProfileData* data;
1196   for (data = first_data(); is_valid(data); data = next_data(data)) {
1197     stream->set_start(data->bci());
1198     stream->next();
1199     data->post_initialize(stream, this);
1200   }
1201   if (_parameters_type_data_di != no_parameters) {
1202     parameters_type_data()->post_initialize(NULL, this);
1203   }
1204 }
1205 
1206 // Initialize the MethodData* corresponding to a given method.
1207 MethodData::MethodData(const methodHandle& method)
1208   : _method(method()),
1209     // Holds Compile_lock
1210     _extra_data_lock(Mutex::nonleaf-2, "MDOExtraData_lock", Mutex::_safepoint_check_always),
1211     _compiler_counters(),
1212     _parameters_type_data_di(parameters_uninitialized) {
1213   initialize();
1214 }
1215 
1216 void MethodData::initialize() {
1217   Thread* thread = Thread::current();
1218   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1219   ResourceMark rm(thread);
1220 
1221   init();
1222   set_creation_mileage(mileage_of(method()));
1223 
1224   // Go through the bytecodes and allocate and initialize the
1225   // corresponding data cells.
1226   int data_size = 0;
1227   int empty_bc_count = 0;  // number of bytecodes lacking data
1228   _data[0] = 0;  // apparently not set below.
1229   BytecodeStream stream(methodHandle(thread, method()));
1230   Bytecodes::Code c;
1231   bool needs_speculative_traps = false;
1232   while ((c = stream.next()) >= 0) {
1233     int size_in_bytes = initialize_data(&stream, data_size);
1234     data_size += size_in_bytes;
1235     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1236     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1237   }
1238   _data_size = data_size;
1239   int object_size = in_bytes(data_offset()) + data_size;
1240 
1241   // Add some extra DataLayout cells (at least one) to track stray traps.
1242   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1243   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1244 
1245   // Let's zero the space for the extra data
1246   Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1247 
1248   // Add a cell to record information about modified arguments.
1249   // Set up _args_modified array after traps cells so that
1250   // the code for traps cells works.
1251   DataLayout *dp = data_layout_at(data_size + extra_size);
1252 
1253   int arg_size = method()->size_of_parameters();
1254   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1255 
1256   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1257   object_size += extra_size + arg_data_size;
1258 
1259   int parms_cell = ParametersTypeData::compute_cell_count(method());
1260   // If we are profiling parameters, we reserved an area near the end
1261   // of the MDO after the slots for bytecodes (because there's no bci
1262   // for method entry so they don't fit with the framework for the
1263   // profiling of bytecodes). We store the offset within the MDO of
1264   // this area (or -1 if no parameter is profiled)
1265   if (parms_cell > 0) {
1266     object_size += DataLayout::compute_size_in_bytes(parms_cell);
1267     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1268     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1269     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1270   } else {
1271     _parameters_type_data_di = no_parameters;
1272   }
1273 
1274   // Set an initial hint. Don't use set_hint_di() because
1275   // first_di() may be out of bounds if data_size is 0.
1276   // In that situation, _hint_di is never used, but at
1277   // least well-defined.
1278   _hint_di = first_di();
1279 
1280   post_initialize(&stream);
1281 
1282   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1283   set_size(object_size);
1284 }
1285 
1286 void MethodData::init() {
1287   _compiler_counters = CompilerCounters(); // reset compiler counters
1288   _invocation_counter.init();
1289   _backedge_counter.init();
1290   _invocation_counter_start = 0;
1291   _backedge_counter_start = 0;
1292 
1293   // Set per-method invoke- and backedge mask.
1294   double scale = 1.0;
1295   methodHandle mh(Thread::current(), _method);
1296   CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale);
1297   _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1298   _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1299 
1300   _tenure_traps = 0;
1301   _num_loops = 0;
1302   _num_blocks = 0;
1303   _would_profile = unknown;
1304 
1305 #if INCLUDE_JVMCI
1306   _jvmci_ir_size = 0;
1307   _failed_speculations = NULL;
1308 #endif
1309 
1310 #if INCLUDE_RTM_OPT
1311   _rtm_state = NoRTM; // No RTM lock eliding by default
1312   if (UseRTMLocking &&
1313       !CompilerOracle::has_option(mh, CompileCommand::NoRTMLockEliding)) {
1314     if (CompilerOracle::has_option(mh, CompileCommand::UseRTMLockEliding) || !UseRTMDeopt) {
1315       // Generate RTM lock eliding code without abort ratio calculation code.
1316       _rtm_state = UseRTM;
1317     } else if (UseRTMDeopt) {
1318       // Generate RTM lock eliding code and include abort ratio calculation
1319       // code if UseRTMDeopt is on.
1320       _rtm_state = ProfileRTM;
1321     }
1322   }
1323 #endif
1324 
1325   // Initialize escape flags.
1326   clear_escape_info();
1327 }
1328 
1329 // Get a measure of how much mileage the method has on it.
1330 int MethodData::mileage_of(Method* method) {
1331   return MAX2(method->invocation_count(), method->backedge_count());
1332 }
1333 
1334 bool MethodData::is_mature() const {
1335   return CompilationPolicy::is_mature(_method);
1336 }
1337 
1338 // Translate a bci to its corresponding data index (di).
1339 address MethodData::bci_to_dp(int bci) {
1340   ResourceMark rm;
1341   DataLayout* data = data_layout_before(bci);
1342   DataLayout* prev = NULL;
1343   for ( ; is_valid(data); data = next_data_layout(data)) {
1344     if (data->bci() >= bci) {
1345       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1346       else if (prev != NULL)   set_hint_di(dp_to_di((address)prev));
1347       return (address)data;
1348     }
1349     prev = data;
1350   }
1351   return (address)limit_data_position();
1352 }
1353 
1354 // Translate a bci to its corresponding data, or NULL.
1355 ProfileData* MethodData::bci_to_data(int bci) {
1356   DataLayout* data = data_layout_before(bci);
1357   for ( ; is_valid(data); data = next_data_layout(data)) {
1358     if (data->bci() == bci) {
1359       set_hint_di(dp_to_di((address)data));
1360       return data->data_in();
1361     } else if (data->bci() > bci) {
1362       break;
1363     }
1364   }
1365   return bci_to_extra_data(bci, NULL, false);
1366 }
1367 
1368 DataLayout* MethodData::next_extra(DataLayout* dp) {
1369   int nb_cells = 0;
1370   switch(dp->tag()) {
1371   case DataLayout::bit_data_tag:
1372   case DataLayout::no_tag:
1373     nb_cells = BitData::static_cell_count();
1374     break;
1375   case DataLayout::speculative_trap_data_tag:
1376     nb_cells = SpeculativeTrapData::static_cell_count();
1377     break;
1378   default:
1379     fatal("unexpected tag %d", dp->tag());
1380   }
1381   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1382 }
1383 
1384 ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
1385   DataLayout* end = args_data_limit();
1386 
1387   for (;; dp = next_extra(dp)) {
1388     assert(dp < end, "moved past end of extra data");
1389     // No need for "Atomic::load_acquire" ops,
1390     // since the data structure is monotonic.
1391     switch(dp->tag()) {
1392     case DataLayout::no_tag:
1393       return NULL;
1394     case DataLayout::arg_info_data_tag:
1395       dp = end;
1396       return NULL; // ArgInfoData is at the end of extra data section.
1397     case DataLayout::bit_data_tag:
1398       if (m == NULL && dp->bci() == bci) {
1399         return new BitData(dp);
1400       }
1401       break;
1402     case DataLayout::speculative_trap_data_tag:
1403       if (m != NULL) {
1404         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1405         // data->method() may be null in case of a concurrent
1406         // allocation. Maybe it's for the same method. Try to use that
1407         // entry in that case.
1408         if (dp->bci() == bci) {
1409           if (data->method() == NULL) {
1410             assert(concurrent, "impossible because no concurrent allocation");
1411             return NULL;
1412           } else if (data->method() == m) {
1413             return data;
1414           }
1415         }
1416       }
1417       break;
1418     default:
1419       fatal("unexpected tag %d", dp->tag());
1420     }
1421   }
1422   return NULL;
1423 }
1424 
1425 
1426 // Translate a bci to its corresponding extra data, or NULL.
1427 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1428   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1429   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1430          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1431          "code needs to be adjusted");
1432 
1433   // Do not create one of these if method has been redefined.
1434   if (m != NULL && m->is_old()) {
1435     return NULL;
1436   }
1437 
1438   DataLayout* dp  = extra_data_base();
1439   DataLayout* end = args_data_limit();
1440 
1441   // Allocation in the extra data space has to be atomic because not
1442   // all entries have the same size and non atomic concurrent
1443   // allocation would result in a corrupted extra data space.
1444   ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
1445   if (result != NULL) {
1446     return result;
1447   }
1448 
1449   if (create_if_missing && dp < end) {
1450     MutexLocker ml(&_extra_data_lock);
1451     // Check again now that we have the lock. Another thread may
1452     // have added extra data entries.
1453     ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
1454     if (result != NULL || dp >= end) {
1455       return result;
1456     }
1457 
1458     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
1459     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1460     u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1461     // SpeculativeTrapData is 2 slots. Make sure we have room.
1462     if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
1463       return NULL;
1464     }
1465     DataLayout temp;
1466     temp.initialize(tag, bci, 0);
1467 
1468     dp->set_header(temp.header());
1469     assert(dp->tag() == tag, "sane");
1470     assert(dp->bci() == bci, "no concurrent allocation");
1471     if (tag == DataLayout::bit_data_tag) {
1472       return new BitData(dp);
1473     } else {
1474       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1475       data->set_method(m);
1476       return data;
1477     }
1478   }
1479   return NULL;
1480 }
1481 
1482 ArgInfoData *MethodData::arg_info() {
1483   DataLayout* dp    = extra_data_base();
1484   DataLayout* end   = args_data_limit();
1485   for (; dp < end; dp = next_extra(dp)) {
1486     if (dp->tag() == DataLayout::arg_info_data_tag)
1487       return new ArgInfoData(dp);
1488   }
1489   return NULL;
1490 }
1491 
1492 // Printing
1493 
1494 void MethodData::print_on(outputStream* st) const {
1495   assert(is_methodData(), "should be method data");
1496   st->print("method data for ");
1497   method()->print_value_on(st);
1498   st->cr();
1499   print_data_on(st);
1500 }
1501 
1502 void MethodData::print_value_on(outputStream* st) const {
1503   assert(is_methodData(), "should be method data");
1504   st->print("method data for ");
1505   method()->print_value_on(st);
1506 }
1507 
1508 void MethodData::print_data_on(outputStream* st) const {
1509   ResourceMark rm;
1510   ProfileData* data = first_data();
1511   if (_parameters_type_data_di != no_parameters) {
1512     parameters_type_data()->print_data_on(st);
1513   }
1514   for ( ; is_valid(data); data = next_data(data)) {
1515     st->print("%d", dp_to_di(data->dp()));
1516     st->fill_to(6);
1517     data->print_data_on(st, this);
1518   }
1519   st->print_cr("--- Extra data:");
1520   DataLayout* dp    = extra_data_base();
1521   DataLayout* end   = args_data_limit();
1522   for (;; dp = next_extra(dp)) {
1523     assert(dp < end, "moved past end of extra data");
1524     // No need for "Atomic::load_acquire" ops,
1525     // since the data structure is monotonic.
1526     switch(dp->tag()) {
1527     case DataLayout::no_tag:
1528       continue;
1529     case DataLayout::bit_data_tag:
1530       data = new BitData(dp);
1531       break;
1532     case DataLayout::speculative_trap_data_tag:
1533       data = new SpeculativeTrapData(dp);
1534       break;
1535     case DataLayout::arg_info_data_tag:
1536       data = new ArgInfoData(dp);
1537       dp = end; // ArgInfoData is at the end of extra data section.
1538       break;
1539     default:
1540       fatal("unexpected tag %d", dp->tag());
1541     }
1542     st->print("%d", dp_to_di(data->dp()));
1543     st->fill_to(6);
1544     data->print_data_on(st);
1545     if (dp >= end) return;
1546   }
1547 }
1548 
1549 // Verification
1550 
1551 void MethodData::verify_on(outputStream* st) {
1552   guarantee(is_methodData(), "object must be method data");
1553   // guarantee(m->is_perm(), "should be in permspace");
1554   this->verify_data_on(st);
1555 }
1556 
1557 void MethodData::verify_data_on(outputStream* st) {
1558   NEEDS_CLEANUP;
1559   // not yet implemented.
1560 }
1561 
1562 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1563   if (m->is_compiled_lambda_form()) {
1564     return true;
1565   }
1566 
1567   Bytecode_invoke inv(m , bci);
1568   return inv.is_invokedynamic() || inv.is_invokehandle();
1569 }
1570 
1571 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1572   Bytecode_invoke inv(m , bci);
1573   if (inv.is_invokevirtual()) {
1574     Symbol* klass = inv.klass();
1575     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1576         klass == vmSymbols::sun_misc_Unsafe() ||
1577         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1578       Symbol* name = inv.name();
1579       if (name->starts_with("get") || name->starts_with("put")) {
1580         return true;
1581       }
1582     }
1583   }
1584   return false;
1585 }
1586 
1587 bool MethodData::profile_memory_access(const methodHandle& m, int bci) {
1588   Bytecode_invoke inv(m , bci);
1589   if (inv.is_invokestatic()) {
1590     if (inv.klass() == vmSymbols::jdk_incubator_foreign_MemoryAccess()) {
1591       if (inv.name()->starts_with("get") || inv.name()->starts_with("set")) {
1592         return true;
1593       }
1594     }
1595   }
1596   return false;
1597 }
1598 
1599 int MethodData::profile_arguments_flag() {
1600   return TypeProfileLevel % 10;
1601 }
1602 
1603 bool MethodData::profile_arguments() {
1604   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
1605 }
1606 
1607 bool MethodData::profile_arguments_jsr292_only() {
1608   return profile_arguments_flag() == type_profile_jsr292;
1609 }
1610 
1611 bool MethodData::profile_all_arguments() {
1612   return profile_arguments_flag() == type_profile_all;
1613 }
1614 
1615 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1616   if (!profile_arguments()) {
1617     return false;
1618   }
1619 
1620   if (profile_all_arguments()) {
1621     return true;
1622   }
1623 
1624   if (profile_unsafe(m, bci)) {
1625     return true;
1626   }
1627 
1628   if (profile_memory_access(m, bci)) {
1629     return true;
1630   }
1631 
1632   assert(profile_arguments_jsr292_only(), "inconsistent");
1633   return profile_jsr292(m, bci);
1634 }
1635 
1636 int MethodData::profile_return_flag() {
1637   return (TypeProfileLevel % 100) / 10;
1638 }
1639 
1640 bool MethodData::profile_return() {
1641   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1642 }
1643 
1644 bool MethodData::profile_return_jsr292_only() {
1645   return profile_return_flag() == type_profile_jsr292;
1646 }
1647 
1648 bool MethodData::profile_all_return() {
1649   return profile_return_flag() == type_profile_all;
1650 }
1651 
1652 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1653   if (!profile_return()) {
1654     return false;
1655   }
1656 
1657   if (profile_all_return()) {
1658     return true;
1659   }
1660 
1661   assert(profile_return_jsr292_only(), "inconsistent");
1662   return profile_jsr292(m, bci);
1663 }
1664 
1665 int MethodData::profile_parameters_flag() {
1666   return TypeProfileLevel / 100;
1667 }
1668 
1669 bool MethodData::profile_parameters() {
1670   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1671 }
1672 
1673 bool MethodData::profile_parameters_jsr292_only() {
1674   return profile_parameters_flag() == type_profile_jsr292;
1675 }
1676 
1677 bool MethodData::profile_all_parameters() {
1678   return profile_parameters_flag() == type_profile_all;
1679 }
1680 
1681 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1682   if (!profile_parameters()) {
1683     return false;
1684   }
1685 
1686   if (profile_all_parameters()) {
1687     return true;
1688   }
1689 
1690   assert(profile_parameters_jsr292_only(), "inconsistent");
1691   return m->is_compiled_lambda_form();
1692 }
1693 
1694 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1695   log_trace(cds)("Iter(MethodData): %p", this);
1696   it->push(&_method);
1697 }
1698 
1699 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1700   if (shift == 0) {
1701     return;
1702   }
1703   if (!reset) {
1704     // Move all cells of trap entry at dp left by "shift" cells
1705     intptr_t* start = (intptr_t*)dp;
1706     intptr_t* end = (intptr_t*)next_extra(dp);
1707     for (intptr_t* ptr = start; ptr < end; ptr++) {
1708       *(ptr-shift) = *ptr;
1709     }
1710   } else {
1711     // Reset "shift" cells stopping at dp
1712     intptr_t* start = ((intptr_t*)dp) - shift;
1713     intptr_t* end = (intptr_t*)dp;
1714     for (intptr_t* ptr = start; ptr < end; ptr++) {
1715       *ptr = 0;
1716     }
1717   }
1718 }
1719 
1720 // Check for entries that reference an unloaded method
1721 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1722   bool _always_clean;
1723 public:
1724   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1725   bool is_live(Method* m) {
1726     return !(_always_clean) && m->method_holder()->is_loader_alive();
1727   }
1728 };
1729 
1730 // Check for entries that reference a redefined method
1731 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1732 public:
1733   CleanExtraDataMethodClosure() {}
1734   bool is_live(Method* m) { return !m->is_old(); }
1735 };
1736 
1737 
1738 // Remove SpeculativeTrapData entries that reference an unloaded or
1739 // redefined method
1740 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1741   DataLayout* dp  = extra_data_base();
1742   DataLayout* end = args_data_limit();
1743 
1744   int shift = 0;
1745   for (; dp < end; dp = next_extra(dp)) {
1746     switch(dp->tag()) {
1747     case DataLayout::speculative_trap_data_tag: {
1748       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1749       Method* m = data->method();
1750       assert(m != NULL, "should have a method");
1751       if (!cl->is_live(m)) {
1752         // "shift" accumulates the number of cells for dead
1753         // SpeculativeTrapData entries that have been seen so
1754         // far. Following entries must be shifted left by that many
1755         // cells to remove the dead SpeculativeTrapData entries.
1756         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1757       } else {
1758         // Shift this entry left if it follows dead
1759         // SpeculativeTrapData entries
1760         clean_extra_data_helper(dp, shift);
1761       }
1762       break;
1763     }
1764     case DataLayout::bit_data_tag:
1765       // Shift this entry left if it follows dead SpeculativeTrapData
1766       // entries
1767       clean_extra_data_helper(dp, shift);
1768       continue;
1769     case DataLayout::no_tag:
1770     case DataLayout::arg_info_data_tag:
1771       // We are at end of the live trap entries. The previous "shift"
1772       // cells contain entries that are either dead or were shifted
1773       // left. They need to be reset to no_tag
1774       clean_extra_data_helper(dp, shift, true);
1775       return;
1776     default:
1777       fatal("unexpected tag %d", dp->tag());
1778     }
1779   }
1780 }
1781 
1782 // Verify there's no unloaded or redefined method referenced by a
1783 // SpeculativeTrapData entry
1784 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1785 #ifdef ASSERT
1786   DataLayout* dp  = extra_data_base();
1787   DataLayout* end = args_data_limit();
1788 
1789   for (; dp < end; dp = next_extra(dp)) {
1790     switch(dp->tag()) {
1791     case DataLayout::speculative_trap_data_tag: {
1792       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1793       Method* m = data->method();
1794       assert(m != NULL && cl->is_live(m), "Method should exist");
1795       break;
1796     }
1797     case DataLayout::bit_data_tag:
1798       continue;
1799     case DataLayout::no_tag:
1800     case DataLayout::arg_info_data_tag:
1801       return;
1802     default:
1803       fatal("unexpected tag %d", dp->tag());
1804     }
1805   }
1806 #endif
1807 }
1808 
1809 void MethodData::clean_method_data(bool always_clean) {
1810   ResourceMark rm;
1811   for (ProfileData* data = first_data();
1812        is_valid(data);
1813        data = next_data(data)) {
1814     data->clean_weak_klass_links(always_clean);
1815   }
1816   ParametersTypeData* parameters = parameters_type_data();
1817   if (parameters != NULL) {
1818     parameters->clean_weak_klass_links(always_clean);
1819   }
1820 
1821   CleanExtraDataKlassClosure cl(always_clean);
1822   clean_extra_data(&cl);
1823   verify_extra_data_clean(&cl);
1824 }
1825 
1826 // This is called during redefinition to clean all "old" redefined
1827 // methods out of MethodData for all methods.
1828 void MethodData::clean_weak_method_links() {
1829   ResourceMark rm;
1830   CleanExtraDataMethodClosure cl;
1831   clean_extra_data(&cl);
1832   verify_extra_data_clean(&cl);
1833 }