< prev index next >

src/hotspot/share/oops/methodData.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 

  25 #include "ci/ciMethodData.hpp"

  26 #include "classfile/vmSymbols.hpp"
  27 #include "compiler/compilationPolicy.hpp"
  28 #include "compiler/compilerDefinitions.inline.hpp"
  29 #include "compiler/compilerOracle.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "interpreter/bytecodeStream.hpp"
  32 #include "interpreter/linkResolver.hpp"
  33 #include "memory/metaspaceClosure.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/method.inline.hpp"
  37 #include "oops/methodData.inline.hpp"
  38 #include "prims/jvmtiRedefineClasses.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "utilities/align.hpp"

 302   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 303   Bytecode_invoke inv(stream->method(), stream->bci());
 304 
 305   if (has_arguments()) {
 306 #ifdef ASSERT
 307     ResourceMark rm;
 308     ReferenceArgumentCount rac(inv.signature());
 309     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 310     assert(count > 0, "room for args type but none found?");
 311     check_number_of_arguments(count);
 312 #endif
 313     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 314   }
 315 
 316   if (has_return()) {
 317     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 318     _ret.post_initialize();
 319   }
 320 }
 321 






















 322 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 323   for (int i = 0; i < _number_of_entries; i++) {
 324     intptr_t p = type(i);
 325     Klass* k = (Klass*)klass_part(p);
 326     if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 327       set_type(i, with_status((Klass*)nullptr, p));





 328     }
 329   }
 330 }
 331 







 332 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 333   intptr_t p = type();
 334   Klass* k = (Klass*)klass_part(p);
 335   if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 336     set_type(with_status((Klass*)nullptr, p));





 337   }
 338 }
 339 





 340 bool TypeEntriesAtCall::return_profiling_enabled() {
 341   return MethodData::profile_return();
 342 }
 343 
 344 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 345   return MethodData::profile_arguments();
 346 }
 347 
 348 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 349   if (is_type_none(k)) {
 350     st->print("none");
 351   } else if (is_type_unknown(k)) {
 352     st->print("unknown");
 353   } else {
 354     valid_klass(k)->print_value_on(st);
 355   }
 356   if (was_null_seen(k)) {
 357     st->print(" (null seen)");
 358   }
 359 }

 395     _args.print_data_on(st);
 396   }
 397   if (has_return()) {
 398     tab(st, true);
 399     st->print("return type");
 400     _ret.print_data_on(st);
 401   }
 402 }
 403 
 404 // ==================================================================
 405 // ReceiverTypeData
 406 //
 407 // A ReceiverTypeData is used to access profiling information about a
 408 // dynamic type check.  It consists of a counter which counts the total times
 409 // that the check is reached, and a series of (Klass*, count) pairs
 410 // which are used to store a type profile for the receiver of the check.
 411 
 412 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 413     for (uint row = 0; row < row_limit(); row++) {
 414     Klass* p = receiver(row);
 415     if (p != nullptr && (always_clean || !p->is_loader_alive())) {
 416       clear_row(row);





 417     }
 418   }
 419 }
 420 







 421 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 422   uint row;
 423   int entries = 0;
 424   for (row = 0; row < row_limit(); row++) {
 425     if (receiver(row) != nullptr)  entries++;
 426   }
 427   st->print_cr("count(%u) entries(%u)", count(), entries);
 428   int total = count();
 429   for (row = 0; row < row_limit(); row++) {
 430     if (receiver(row) != nullptr) {
 431       total += receiver_count(row);
 432     }
 433   }
 434   for (row = 0; row < row_limit(); row++) {
 435     if (receiver(row) != nullptr) {
 436       tab(st);
 437       receiver(row)->print_value_on(st);
 438       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 439     }
 440   }

 629     return obj_args + 1; // 1 cell for array len
 630   }
 631   return 0;
 632 }
 633 
 634 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 635   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 636 }
 637 
 638 bool ParametersTypeData::profiling_enabled() {
 639   return MethodData::profile_parameters();
 640 }
 641 
 642 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 643   print_shared(st, "ParametersTypeData", extra);
 644   tab(st);
 645   _parameters.print_data_on(st);
 646   st->cr();
 647 }
 648 





 649 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 650   print_shared(st, "SpeculativeTrapData", extra);
 651   tab(st);
 652   method()->print_short_name(st);
 653   st->cr();
 654 }
 655 
 656 // ==================================================================
 657 // MethodData*
 658 //
 659 // A MethodData* holds information which has been collected about
 660 // a method.
 661 
 662 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 663   assert(!THREAD->owns_locks(), "Should not own any locks");
 664   int size = MethodData::compute_allocation_size_in_words(method);
 665 
 666   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 667     MethodData(method);
 668 }

1206 
1207 // Give each of the data entries a chance to perform specific
1208 // data initialization.
1209 void MethodData::post_initialize(BytecodeStream* stream) {
1210   ResourceMark rm;
1211   ProfileData* data;
1212   for (data = first_data(); is_valid(data); data = next_data(data)) {
1213     stream->set_start(data->bci());
1214     stream->next();
1215     data->post_initialize(stream, this);
1216   }
1217   if (_parameters_type_data_di != no_parameters) {
1218     parameters_type_data()->post_initialize(nullptr, this);
1219   }
1220 }
1221 
1222 // Initialize the MethodData* corresponding to a given method.
1223 MethodData::MethodData(const methodHandle& method)
1224   : _method(method()),
1225     // Holds Compile_lock
1226     _extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"),
1227     _compiler_counters(),
1228     _parameters_type_data_di(parameters_uninitialized) {
1229   initialize();





1230 }
1231 
1232 // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
1233 // not being accessed while the contents are being rewritten.
1234 class VM_ReinitializeMDO: public VM_Operation {
1235  private:
1236   MethodData* _mdo;
1237  public:
1238   VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1239   VMOp_Type type() const                         { return VMOp_ReinitializeMDO; }
1240   void doit() {
1241     // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1242     // over a safepoint.  This means that we don't actually need to acquire the lock.
1243     _mdo->initialize();
1244   }
1245   bool allow_nested_vm_operations() const        { return true; }
1246 };
1247 
1248 void MethodData::reinitialize() {
1249   VM_ReinitializeMDO op(this);

1347   methodHandle mh(Thread::current(), _method);
1348   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1349   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1350   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1351 
1352   _tenure_traps = 0;
1353   _num_loops = 0;
1354   _num_blocks = 0;
1355   _would_profile = unknown;
1356 
1357 #if INCLUDE_JVMCI
1358   _jvmci_ir_size = 0;
1359   _failed_speculations = nullptr;
1360 #endif
1361 
1362   // Initialize escape flags.
1363   clear_escape_info();
1364 }
1365 
1366 bool MethodData::is_mature() const {
1367   return CompilationPolicy::is_mature(_method);
1368 }
1369 
1370 // Translate a bci to its corresponding data index (di).
1371 address MethodData::bci_to_dp(int bci) {
1372   ResourceMark rm;
1373   DataLayout* data = data_layout_before(bci);
1374   DataLayout* prev = nullptr;
1375   for ( ; is_valid(data); data = next_data_layout(data)) {
1376     if (data->bci() >= bci) {
1377       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1378       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1379       return (address)data;
1380     }
1381     prev = data;
1382   }
1383   return (address)limit_data_position();
1384 }
1385 
1386 // Translate a bci to its corresponding data, or null.
1387 ProfileData* MethodData::bci_to_data(int bci) {

1535   return nullptr;
1536 }
1537 
1538 // Printing
1539 
1540 void MethodData::print_on(outputStream* st) const {
1541   assert(is_methodData(), "should be method data");
1542   st->print("method data for ");
1543   method()->print_value_on(st);
1544   st->cr();
1545   print_data_on(st);
1546 }
1547 
1548 void MethodData::print_value_on(outputStream* st) const {
1549   assert(is_methodData(), "should be method data");
1550   st->print("method data for ");
1551   method()->print_value_on(st);
1552 }
1553 
1554 void MethodData::print_data_on(outputStream* st) const {
1555   ConditionalMutexLocker ml(extra_data_lock(), !extra_data_lock()->owned_by_self(),

1556                             Mutex::_no_safepoint_check_flag);
1557   ResourceMark rm;
1558   ProfileData* data = first_data();
1559   if (_parameters_type_data_di != no_parameters) {
1560     parameters_type_data()->print_data_on(st);
1561   }
1562   for ( ; is_valid(data); data = next_data(data)) {
1563     st->print("%d", dp_to_di(data->dp()));
1564     st->fill_to(6);
1565     data->print_data_on(st, this);
1566   }
1567 
1568   st->print_cr("--- Extra data:");
1569   DataLayout* dp    = extra_data_base();
1570   DataLayout* end   = args_data_limit();
1571   for (;; dp = next_extra(dp)) {
1572     assert(dp < end, "moved past end of extra data");
1573     // No need for "Atomic::load_acquire" ops,
1574     // since the data structure is monotonic.
1575     switch(dp->tag()) {

1708 }
1709 
1710 bool MethodData::profile_all_parameters() {
1711   return profile_parameters_flag() == type_profile_all;
1712 }
1713 
1714 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1715   if (!profile_parameters()) {
1716     return false;
1717   }
1718 
1719   if (profile_all_parameters()) {
1720     return true;
1721   }
1722 
1723   assert(profile_parameters_jsr292_only(), "inconsistent");
1724   return m->is_compiled_lambda_form();
1725 }
1726 
1727 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1728   log_trace(cds)("Iter(MethodData): %p", this);
1729   it->push(&_method);


















1730 }
1731 
1732 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1733   check_extra_data_locked();
1734 
1735   if (shift == 0) {
1736     return;
1737   }
1738   if (!reset) {
1739     // Move all cells of trap entry at dp left by "shift" cells
1740     intptr_t* start = (intptr_t*)dp;
1741     intptr_t* end = (intptr_t*)next_extra(dp);
1742     for (intptr_t* ptr = start; ptr < end; ptr++) {
1743       *(ptr-shift) = *ptr;
1744     }
1745   } else {
1746     // Reset "shift" cells stopping at dp
1747     intptr_t* start = ((intptr_t*)dp) - shift;
1748     intptr_t* end = (intptr_t*)dp;
1749     for (intptr_t* ptr = start; ptr < end; ptr++) {
1750       *ptr = 0;
1751     }
1752   }
1753 }
1754 
1755 // Check for entries that reference an unloaded method
1756 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1757   bool _always_clean;
1758 public:
1759   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1760   bool is_live(Method* m) {



1761     return !(_always_clean) && m->method_holder()->is_loader_alive();
1762   }
1763 };
1764 
1765 // Check for entries that reference a redefined method
1766 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1767 public:
1768   CleanExtraDataMethodClosure() {}
1769   bool is_live(Method* m) { return !m->is_old(); }
1770 };
1771 














1772 
1773 // Remove SpeculativeTrapData entries that reference an unloaded or
1774 // redefined method
1775 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1776   check_extra_data_locked();
1777 
1778   DataLayout* dp  = extra_data_base();
1779   DataLayout* end = args_data_limit();
1780 
1781   int shift = 0;
1782   for (; dp < end; dp = next_extra(dp)) {
1783     switch(dp->tag()) {
1784     case DataLayout::speculative_trap_data_tag: {
1785       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1786       Method* m = data->method();
1787       assert(m != nullptr, "should have a method");
1788       if (!cl->is_live(m)) {
1789         // "shift" accumulates the number of cells for dead
1790         // SpeculativeTrapData entries that have been seen so
1791         // far. Following entries must be shifted left by that many
1792         // cells to remove the dead SpeculativeTrapData entries.
1793         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1794       } else {
1795         // Shift this entry left if it follows dead
1796         // SpeculativeTrapData entries
1797         clean_extra_data_helper(dp, shift);
1798       }
1799       break;
1800     }
1801     case DataLayout::bit_data_tag:
1802       // Shift this entry left if it follows dead SpeculativeTrapData
1803       // entries
1804       clean_extra_data_helper(dp, shift);
1805       continue;
1806     case DataLayout::no_tag:
1807     case DataLayout::arg_info_data_tag:
1808       // We are at end of the live trap entries. The previous "shift"

1872   ResourceMark rm;
1873   CleanExtraDataMethodClosure cl;
1874 
1875   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1876   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1877 
1878   clean_extra_data(&cl);
1879   verify_extra_data_clean(&cl);
1880 }
1881 
1882 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1883   release_C_heap_structures();
1884 }
1885 
1886 void MethodData::release_C_heap_structures() {
1887 #if INCLUDE_JVMCI
1888   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1889 #endif
1890 }
1891 










1892 #ifdef ASSERT
1893 void MethodData::check_extra_data_locked() const {
1894     // Cast const away, just to be able to verify the lock
1895     // Usually we only want non-const accesses on the lock,
1896     // so this here is an exception.
1897     MethodData* self = (MethodData*)this;
1898     assert(self->extra_data_lock()->owned_by_self(), "must have lock");
1899     assert(!Thread::current()->is_Java_thread() ||
1900            JavaThread::current()->is_in_no_safepoint_scope(),
1901            "JavaThread must have NoSafepointVerifier inside lock scope");
1902 }
1903 #endif

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/cdsConfig.hpp"
  26 #include "ci/ciMethodData.hpp"
  27 #include "classfile/systemDictionaryShared.hpp"
  28 #include "classfile/vmSymbols.hpp"
  29 #include "compiler/compilationPolicy.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "interpreter/bytecode.hpp"
  33 #include "interpreter/bytecodeStream.hpp"
  34 #include "interpreter/linkResolver.hpp"
  35 #include "memory/metaspaceClosure.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "oops/klass.inline.hpp"
  38 #include "oops/method.inline.hpp"
  39 #include "oops/methodData.inline.hpp"
  40 #include "prims/jvmtiRedefineClasses.hpp"
  41 #include "runtime/atomic.hpp"
  42 #include "runtime/deoptimization.hpp"
  43 #include "runtime/handles.inline.hpp"
  44 #include "runtime/orderAccess.hpp"
  45 #include "runtime/safepointVerifiers.hpp"
  46 #include "runtime/signature.hpp"
  47 #include "utilities/align.hpp"

 304   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 305   Bytecode_invoke inv(stream->method(), stream->bci());
 306 
 307   if (has_arguments()) {
 308 #ifdef ASSERT
 309     ResourceMark rm;
 310     ReferenceArgumentCount rac(inv.signature());
 311     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 312     assert(count > 0, "room for args type but none found?");
 313     check_number_of_arguments(count);
 314 #endif
 315     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 316   }
 317 
 318   if (has_return()) {
 319     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 320     _ret.post_initialize();
 321   }
 322 }
 323 
 324 static bool is_excluded(Klass* k) {
 325 #if INCLUDE_CDS
 326   if (SafepointSynchronize::is_at_safepoint() &&
 327       CDSConfig::is_dumping_archive() &&
 328       CDSConfig::current_thread_is_vm_or_dumper()) {
 329     if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
 330       log_debug(cds)("Purged %s from MDO: unloaded class", k->name()->as_C_string());
 331       return true;
 332     } else if (CDSConfig::is_dumping_dynamic_archive() && k->is_shared()) {
 333       return false;
 334     } else {
 335       bool excluded = SystemDictionaryShared::should_be_excluded(k);
 336       if (excluded) {
 337         log_debug(cds)("Purged %s from MDO: excluded class", k->name()->as_C_string());
 338       }
 339       return excluded;
 340     }
 341   }
 342 #endif
 343   return false;
 344 }
 345 
 346 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 347   for (int i = 0; i < _number_of_entries; i++) {
 348     intptr_t p = type(i);
 349     Klass* k = (Klass*)klass_part(p);
 350     if (k != nullptr) {
 351       if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 352         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 353       }
 354       if (always_clean || !k->is_loader_alive() || is_excluded(k)) {
 355         set_type(i, with_status((Klass*)nullptr, p));
 356       }
 357     }
 358   }
 359 }
 360 
 361 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
 362   for (int i = 0; i < _number_of_entries; i++) {
 363     Klass** k = (Klass**)type_adr(i); // tagged
 364     it->push(k);
 365   }
 366 }
 367 
 368 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
 369   intptr_t p = type();
 370   Klass* k = (Klass*)klass_part(p);
 371   if (k != nullptr) {
 372     if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
 373       return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 374     }
 375     if (always_clean || !k->is_loader_alive() || is_excluded(k)) {
 376       set_type(with_status((Klass*)nullptr, p));
 377     }
 378   }
 379 }
 380 
 381 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
 382   Klass** k = (Klass**)type_adr(); // tagged
 383   it->push(k);
 384 }
 385 
 386 bool TypeEntriesAtCall::return_profiling_enabled() {
 387   return MethodData::profile_return();
 388 }
 389 
 390 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 391   return MethodData::profile_arguments();
 392 }
 393 
 394 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 395   if (is_type_none(k)) {
 396     st->print("none");
 397   } else if (is_type_unknown(k)) {
 398     st->print("unknown");
 399   } else {
 400     valid_klass(k)->print_value_on(st);
 401   }
 402   if (was_null_seen(k)) {
 403     st->print(" (null seen)");
 404   }
 405 }

 441     _args.print_data_on(st);
 442   }
 443   if (has_return()) {
 444     tab(st, true);
 445     st->print("return type");
 446     _ret.print_data_on(st);
 447   }
 448 }
 449 
 450 // ==================================================================
 451 // ReceiverTypeData
 452 //
 453 // A ReceiverTypeData is used to access profiling information about a
 454 // dynamic type check.  It consists of a counter which counts the total times
 455 // that the check is reached, and a series of (Klass*, count) pairs
 456 // which are used to store a type profile for the receiver of the check.
 457 
 458 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 459     for (uint row = 0; row < row_limit(); row++) {
 460     Klass* p = receiver(row);
 461     if (p != nullptr) {
 462       if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
 463         continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
 464       }
 465       if (always_clean || !p->is_loader_alive() || is_excluded(p)) {
 466         clear_row(row);
 467       }
 468     }
 469   }
 470 }
 471 
 472 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
 473   for (uint row = 0; row < row_limit(); row++) {
 474     Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
 475     it->push(recv);
 476   }
 477 }
 478 
 479 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 480   uint row;
 481   int entries = 0;
 482   for (row = 0; row < row_limit(); row++) {
 483     if (receiver(row) != nullptr)  entries++;
 484   }
 485   st->print_cr("count(%u) entries(%u)", count(), entries);
 486   int total = count();
 487   for (row = 0; row < row_limit(); row++) {
 488     if (receiver(row) != nullptr) {
 489       total += receiver_count(row);
 490     }
 491   }
 492   for (row = 0; row < row_limit(); row++) {
 493     if (receiver(row) != nullptr) {
 494       tab(st);
 495       receiver(row)->print_value_on(st);
 496       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 497     }
 498   }

 687     return obj_args + 1; // 1 cell for array len
 688   }
 689   return 0;
 690 }
 691 
 692 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 693   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 694 }
 695 
 696 bool ParametersTypeData::profiling_enabled() {
 697   return MethodData::profile_parameters();
 698 }
 699 
 700 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 701   print_shared(st, "ParametersTypeData", extra);
 702   tab(st);
 703   _parameters.print_data_on(st);
 704   st->cr();
 705 }
 706 
 707 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
 708   Method** m = (Method**)intptr_at_adr(speculative_trap_method);
 709   it->push(m);
 710 }
 711 
 712 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 713   print_shared(st, "SpeculativeTrapData", extra);
 714   tab(st);
 715   method()->print_short_name(st);
 716   st->cr();
 717 }
 718 
 719 // ==================================================================
 720 // MethodData*
 721 //
 722 // A MethodData* holds information which has been collected about
 723 // a method.
 724 
 725 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 726   assert(!THREAD->owns_locks(), "Should not own any locks");
 727   int size = MethodData::compute_allocation_size_in_words(method);
 728 
 729   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 730     MethodData(method);
 731 }

1269 
1270 // Give each of the data entries a chance to perform specific
1271 // data initialization.
1272 void MethodData::post_initialize(BytecodeStream* stream) {
1273   ResourceMark rm;
1274   ProfileData* data;
1275   for (data = first_data(); is_valid(data); data = next_data(data)) {
1276     stream->set_start(data->bci());
1277     stream->next();
1278     data->post_initialize(stream, this);
1279   }
1280   if (_parameters_type_data_di != no_parameters) {
1281     parameters_type_data()->post_initialize(nullptr, this);
1282   }
1283 }
1284 
1285 // Initialize the MethodData* corresponding to a given method.
1286 MethodData::MethodData(const methodHandle& method)
1287   : _method(method()),
1288     // Holds Compile_lock

1289     _compiler_counters(),
1290     _parameters_type_data_di(parameters_uninitialized) {
1291     _extra_data_lock = nullptr;
1292     initialize();
1293 }
1294 
1295 MethodData::MethodData() {
1296   assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1297 }
1298 
1299 // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
1300 // not being accessed while the contents are being rewritten.
1301 class VM_ReinitializeMDO: public VM_Operation {
1302  private:
1303   MethodData* _mdo;
1304  public:
1305   VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1306   VMOp_Type type() const                         { return VMOp_ReinitializeMDO; }
1307   void doit() {
1308     // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1309     // over a safepoint.  This means that we don't actually need to acquire the lock.
1310     _mdo->initialize();
1311   }
1312   bool allow_nested_vm_operations() const        { return true; }
1313 };
1314 
1315 void MethodData::reinitialize() {
1316   VM_ReinitializeMDO op(this);

1414   methodHandle mh(Thread::current(), _method);
1415   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1416   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1417   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1418 
1419   _tenure_traps = 0;
1420   _num_loops = 0;
1421   _num_blocks = 0;
1422   _would_profile = unknown;
1423 
1424 #if INCLUDE_JVMCI
1425   _jvmci_ir_size = 0;
1426   _failed_speculations = nullptr;
1427 #endif
1428 
1429   // Initialize escape flags.
1430   clear_escape_info();
1431 }
1432 
1433 bool MethodData::is_mature() const {
1434   return CompilationPolicy::is_mature((MethodData*)this);
1435 }
1436 
1437 // Translate a bci to its corresponding data index (di).
1438 address MethodData::bci_to_dp(int bci) {
1439   ResourceMark rm;
1440   DataLayout* data = data_layout_before(bci);
1441   DataLayout* prev = nullptr;
1442   for ( ; is_valid(data); data = next_data_layout(data)) {
1443     if (data->bci() >= bci) {
1444       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1445       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1446       return (address)data;
1447     }
1448     prev = data;
1449   }
1450   return (address)limit_data_position();
1451 }
1452 
1453 // Translate a bci to its corresponding data, or null.
1454 ProfileData* MethodData::bci_to_data(int bci) {

1602   return nullptr;
1603 }
1604 
1605 // Printing
1606 
1607 void MethodData::print_on(outputStream* st) const {
1608   assert(is_methodData(), "should be method data");
1609   st->print("method data for ");
1610   method()->print_value_on(st);
1611   st->cr();
1612   print_data_on(st);
1613 }
1614 
1615 void MethodData::print_value_on(outputStream* st) const {
1616   assert(is_methodData(), "should be method data");
1617   st->print("method data for ");
1618   method()->print_value_on(st);
1619 }
1620 
1621 void MethodData::print_data_on(outputStream* st) const {
1622   Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
1623   ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
1624                             Mutex::_no_safepoint_check_flag);
1625   ResourceMark rm;
1626   ProfileData* data = first_data();
1627   if (_parameters_type_data_di != no_parameters) {
1628     parameters_type_data()->print_data_on(st);
1629   }
1630   for ( ; is_valid(data); data = next_data(data)) {
1631     st->print("%d", dp_to_di(data->dp()));
1632     st->fill_to(6);
1633     data->print_data_on(st, this);
1634   }
1635 
1636   st->print_cr("--- Extra data:");
1637   DataLayout* dp    = extra_data_base();
1638   DataLayout* end   = args_data_limit();
1639   for (;; dp = next_extra(dp)) {
1640     assert(dp < end, "moved past end of extra data");
1641     // No need for "Atomic::load_acquire" ops,
1642     // since the data structure is monotonic.
1643     switch(dp->tag()) {

1776 }
1777 
1778 bool MethodData::profile_all_parameters() {
1779   return profile_parameters_flag() == type_profile_all;
1780 }
1781 
1782 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1783   if (!profile_parameters()) {
1784     return false;
1785   }
1786 
1787   if (profile_all_parameters()) {
1788     return true;
1789   }
1790 
1791   assert(profile_parameters_jsr292_only(), "inconsistent");
1792   return m->is_compiled_lambda_form();
1793 }
1794 
1795 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1796   log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1797   it->push(&_method);
1798   if (_parameters_type_data_di != no_parameters) {
1799     parameters_type_data()->metaspace_pointers_do(it);
1800   }
1801   for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1802     data->metaspace_pointers_do(it);
1803   }
1804   for (DataLayout* dp = extra_data_base();
1805                    dp < extra_data_limit();
1806                    dp = MethodData::next_extra(dp)) {
1807     if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1808       ResourceMark rm;
1809       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1810       data->metaspace_pointers_do(it);
1811     } else if (dp->tag() == DataLayout::no_tag ||
1812                dp->tag() == DataLayout::arg_info_data_tag) {
1813       break;
1814     }
1815   }
1816 }
1817 
1818 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1819   check_extra_data_locked();
1820 
1821   if (shift == 0) {
1822     return;
1823   }
1824   if (!reset) {
1825     // Move all cells of trap entry at dp left by "shift" cells
1826     intptr_t* start = (intptr_t*)dp;
1827     intptr_t* end = (intptr_t*)next_extra(dp);
1828     for (intptr_t* ptr = start; ptr < end; ptr++) {
1829       *(ptr-shift) = *ptr;
1830     }
1831   } else {
1832     // Reset "shift" cells stopping at dp
1833     intptr_t* start = ((intptr_t*)dp) - shift;
1834     intptr_t* end = (intptr_t*)dp;
1835     for (intptr_t* ptr = start; ptr < end; ptr++) {
1836       *ptr = 0;
1837     }
1838   }
1839 }
1840 
1841 // Check for entries that reference an unloaded method
1842 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1843   bool _always_clean;
1844 public:
1845   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1846   bool is_live(Method* m) {
1847     if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1848       return true; // TODO: treat as unloaded instead?
1849     }
1850     return !(_always_clean) && m->method_holder()->is_loader_alive();
1851   }
1852 };
1853 
1854 // Check for entries that reference a redefined method
1855 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1856 public:
1857   CleanExtraDataMethodClosure() {}
1858   bool is_live(Method* m) { return !m->is_old(); }
1859 };
1860 
1861 Mutex* MethodData::extra_data_lock() {
1862   Mutex* lock = Atomic::load(&_extra_data_lock);
1863   if (lock == nullptr) {
1864     // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint
1865     lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock");
1866     Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1867     if (old != nullptr) {
1868       // Another thread created the lock before us. Use that lock instead.
1869       delete lock;
1870       return old;
1871     }
1872   }
1873   return lock;
1874 }
1875 
1876 // Remove SpeculativeTrapData entries that reference an unloaded or
1877 // redefined method
1878 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1879   check_extra_data_locked();
1880 
1881   DataLayout* dp  = extra_data_base();
1882   DataLayout* end = args_data_limit();
1883 
1884   int shift = 0;
1885   for (; dp < end; dp = next_extra(dp)) {
1886     switch(dp->tag()) {
1887     case DataLayout::speculative_trap_data_tag: {
1888       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1889       Method* m = data->method();
1890       assert(m != nullptr, "should have a method");
1891       if (is_excluded(m->method_holder()) || !cl->is_live(m)) {
1892         // "shift" accumulates the number of cells for dead
1893         // SpeculativeTrapData entries that have been seen so
1894         // far. Following entries must be shifted left by that many
1895         // cells to remove the dead SpeculativeTrapData entries.
1896         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1897       } else {
1898         // Shift this entry left if it follows dead
1899         // SpeculativeTrapData entries
1900         clean_extra_data_helper(dp, shift);
1901       }
1902       break;
1903     }
1904     case DataLayout::bit_data_tag:
1905       // Shift this entry left if it follows dead SpeculativeTrapData
1906       // entries
1907       clean_extra_data_helper(dp, shift);
1908       continue;
1909     case DataLayout::no_tag:
1910     case DataLayout::arg_info_data_tag:
1911       // We are at end of the live trap entries. The previous "shift"

1975   ResourceMark rm;
1976   CleanExtraDataMethodClosure cl;
1977 
1978   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1979   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1980 
1981   clean_extra_data(&cl);
1982   verify_extra_data_clean(&cl);
1983 }
1984 
1985 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1986   release_C_heap_structures();
1987 }
1988 
1989 void MethodData::release_C_heap_structures() {
1990 #if INCLUDE_JVMCI
1991   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1992 #endif
1993 }
1994 
1995 #if INCLUDE_CDS
1996 void MethodData::remove_unshareable_info() {
1997   _extra_data_lock = nullptr;
1998 }
1999 
2000 void MethodData::restore_unshareable_info(TRAPS) {
2001   //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
2002 }
2003 #endif // INCLUDE_CDS
2004        
2005 #ifdef ASSERT
2006 void MethodData::check_extra_data_locked() const {
2007     // Cast const away, just to be able to verify the lock
2008     // Usually we only want non-const accesses on the lock,
2009     // so this here is an exception.
2010     MethodData* self = (MethodData*)this;
2011     assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
2012     assert(!Thread::current()->is_Java_thread() ||
2013            JavaThread::current()->is_in_no_safepoint_scope(),
2014            "JavaThread must have NoSafepointVerifier inside lock scope");
2015 }
2016 #endif
< prev index next >