6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "compiler/compilationPolicy.hpp"
29 #include "compiler/compilerDefinitions.inline.hpp"
30 #include "compiler/compilerOracle.hpp"
31 #include "interpreter/bytecode.hpp"
32 #include "interpreter/bytecodeStream.hpp"
33 #include "interpreter/linkResolver.hpp"
34 #include "memory/metaspaceClosure.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "oops/methodData.inline.hpp"
38 #include "prims/jvmtiRedefineClasses.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/orderAccess.hpp"
43 #include "runtime/safepointVerifiers.hpp"
44 #include "runtime/signature.hpp"
45 #include "utilities/align.hpp"
306 #ifdef ASSERT
307 ResourceMark rm;
308 ReferenceArgumentCount rac(inv.signature());
309 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
310 assert(count > 0, "room for args type but none found?");
311 check_number_of_arguments(count);
312 #endif
313 _args.post_initialize(inv.signature(), inv.has_receiver(), false);
314 }
315
316 if (has_return()) {
317 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
318 _ret.post_initialize();
319 }
320 }
321
322 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
323 for (int i = 0; i < _number_of_entries; i++) {
324 intptr_t p = type(i);
325 Klass* k = (Klass*)klass_part(p);
326 if (k != nullptr && (always_clean || !k->is_loader_alive())) {
327 set_type(i, with_status((Klass*)nullptr, p));
328 }
329 }
330 }
331
332 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
333 intptr_t p = type();
334 Klass* k = (Klass*)klass_part(p);
335 if (k != nullptr && (always_clean || !k->is_loader_alive())) {
336 set_type(with_status((Klass*)nullptr, p));
337 }
338 }
339
340 bool TypeEntriesAtCall::return_profiling_enabled() {
341 return MethodData::profile_return();
342 }
343
344 bool TypeEntriesAtCall::arguments_profiling_enabled() {
345 return MethodData::profile_arguments();
346 }
347
348 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
349 if (is_type_none(k)) {
350 st->print("none");
351 } else if (is_type_unknown(k)) {
352 st->print("unknown");
353 } else {
354 valid_klass(k)->print_value_on(st);
355 }
356 if (was_null_seen(k)) {
357 st->print(" (null seen)");
358 }
359 }
395 _args.print_data_on(st);
396 }
397 if (has_return()) {
398 tab(st, true);
399 st->print("return type");
400 _ret.print_data_on(st);
401 }
402 }
403
404 // ==================================================================
405 // ReceiverTypeData
406 //
407 // A ReceiverTypeData is used to access profiling information about a
408 // dynamic type check. It consists of a counter which counts the total times
409 // that the check is reached, and a series of (Klass*, count) pairs
410 // which are used to store a type profile for the receiver of the check.
411
412 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
413 for (uint row = 0; row < row_limit(); row++) {
414 Klass* p = receiver(row);
415 if (p != nullptr && (always_clean || !p->is_loader_alive())) {
416 clear_row(row);
417 }
418 }
419 }
420
421 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
422 uint row;
423 int entries = 0;
424 for (row = 0; row < row_limit(); row++) {
425 if (receiver(row) != nullptr) entries++;
426 }
427 st->print_cr("count(%u) entries(%u)", count(), entries);
428 int total = count();
429 for (row = 0; row < row_limit(); row++) {
430 if (receiver(row) != nullptr) {
431 total += receiver_count(row);
432 }
433 }
434 for (row = 0; row < row_limit(); row++) {
435 if (receiver(row) != nullptr) {
436 tab(st);
437 receiver(row)->print_value_on(st);
438 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
439 }
440 }
629 return obj_args + 1; // 1 cell for array len
630 }
631 return 0;
632 }
633
634 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
635 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
636 }
637
638 bool ParametersTypeData::profiling_enabled() {
639 return MethodData::profile_parameters();
640 }
641
642 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
643 print_shared(st, "ParametersTypeData", extra);
644 tab(st);
645 _parameters.print_data_on(st);
646 st->cr();
647 }
648
649 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
650 print_shared(st, "SpeculativeTrapData", extra);
651 tab(st);
652 method()->print_short_name(st);
653 st->cr();
654 }
655
656 // ==================================================================
657 // MethodData*
658 //
659 // A MethodData* holds information which has been collected about
660 // a method.
661
662 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
663 assert(!THREAD->owns_locks(), "Should not own any locks");
664 int size = MethodData::compute_allocation_size_in_words(method);
665
666 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
667 MethodData(method);
668 }
1206
1207 // Give each of the data entries a chance to perform specific
1208 // data initialization.
1209 void MethodData::post_initialize(BytecodeStream* stream) {
1210 ResourceMark rm;
1211 ProfileData* data;
1212 for (data = first_data(); is_valid(data); data = next_data(data)) {
1213 stream->set_start(data->bci());
1214 stream->next();
1215 data->post_initialize(stream, this);
1216 }
1217 if (_parameters_type_data_di != no_parameters) {
1218 parameters_type_data()->post_initialize(nullptr, this);
1219 }
1220 }
1221
1222 // Initialize the MethodData* corresponding to a given method.
1223 MethodData::MethodData(const methodHandle& method)
1224 : _method(method()),
1225 // Holds Compile_lock
1226 _extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"),
1227 _compiler_counters(),
1228 _parameters_type_data_di(parameters_uninitialized) {
1229 initialize();
1230 }
1231
1232 // Reinitialize the storage of an existing MDO at a safepoint. Doing it this way will ensure it's
1233 // not being accessed while the contents are being rewritten.
1234 class VM_ReinitializeMDO: public VM_Operation {
1235 private:
1236 MethodData* _mdo;
1237 public:
1238 VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1239 VMOp_Type type() const { return VMOp_ReinitializeMDO; }
1240 void doit() {
1241 // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1242 // over a safepoint. This means that we don't actually need to acquire the lock.
1243 _mdo->initialize();
1244 }
1245 bool allow_nested_vm_operations() const { return true; }
1246 };
1247
1248 void MethodData::reinitialize() {
1249 VM_ReinitializeMDO op(this);
1353 _tenure_traps = 0;
1354 _num_loops = 0;
1355 _num_blocks = 0;
1356 _would_profile = unknown;
1357
1358 #if INCLUDE_JVMCI
1359 _jvmci_ir_size = 0;
1360 _failed_speculations = nullptr;
1361 #endif
1362
1363 // Initialize escape flags.
1364 clear_escape_info();
1365 }
1366
1367 // Get a measure of how much mileage the method has on it.
1368 int MethodData::mileage_of(Method* method) {
1369 return MAX2(method->invocation_count(), method->backedge_count());
1370 }
1371
1372 bool MethodData::is_mature() const {
1373 return CompilationPolicy::is_mature(_method);
1374 }
1375
1376 // Translate a bci to its corresponding data index (di).
1377 address MethodData::bci_to_dp(int bci) {
1378 ResourceMark rm;
1379 DataLayout* data = data_layout_before(bci);
1380 DataLayout* prev = nullptr;
1381 for ( ; is_valid(data); data = next_data_layout(data)) {
1382 if (data->bci() >= bci) {
1383 if (data->bci() == bci) set_hint_di(dp_to_di((address)data));
1384 else if (prev != nullptr) set_hint_di(dp_to_di((address)prev));
1385 return (address)data;
1386 }
1387 prev = data;
1388 }
1389 return (address)limit_data_position();
1390 }
1391
1392 // Translate a bci to its corresponding data, or null.
1393 ProfileData* MethodData::bci_to_data(int bci) {
1541 return nullptr;
1542 }
1543
1544 // Printing
1545
1546 void MethodData::print_on(outputStream* st) const {
1547 assert(is_methodData(), "should be method data");
1548 st->print("method data for ");
1549 method()->print_value_on(st);
1550 st->cr();
1551 print_data_on(st);
1552 }
1553
1554 void MethodData::print_value_on(outputStream* st) const {
1555 assert(is_methodData(), "should be method data");
1556 st->print("method data for ");
1557 method()->print_value_on(st);
1558 }
1559
1560 void MethodData::print_data_on(outputStream* st) const {
1561 ConditionalMutexLocker ml(extra_data_lock(), !extra_data_lock()->owned_by_self(),
1562 Mutex::_no_safepoint_check_flag);
1563 ResourceMark rm;
1564 ProfileData* data = first_data();
1565 if (_parameters_type_data_di != no_parameters) {
1566 parameters_type_data()->print_data_on(st);
1567 }
1568 for ( ; is_valid(data); data = next_data(data)) {
1569 st->print("%d", dp_to_di(data->dp()));
1570 st->fill_to(6);
1571 data->print_data_on(st, this);
1572 }
1573
1574 st->print_cr("--- Extra data:");
1575 DataLayout* dp = extra_data_base();
1576 DataLayout* end = args_data_limit();
1577 for (;; dp = next_extra(dp)) {
1578 assert(dp < end, "moved past end of extra data");
1579 // No need for "Atomic::load_acquire" ops,
1580 // since the data structure is monotonic.
1581 switch(dp->tag()) {
1714 }
1715
1716 bool MethodData::profile_all_parameters() {
1717 return profile_parameters_flag() == type_profile_all;
1718 }
1719
1720 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1721 if (!profile_parameters()) {
1722 return false;
1723 }
1724
1725 if (profile_all_parameters()) {
1726 return true;
1727 }
1728
1729 assert(profile_parameters_jsr292_only(), "inconsistent");
1730 return m->is_compiled_lambda_form();
1731 }
1732
1733 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1734 log_trace(cds)("Iter(MethodData): %p", this);
1735 it->push(&_method);
1736 }
1737
1738 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1739 check_extra_data_locked();
1740
1741 if (shift == 0) {
1742 return;
1743 }
1744 if (!reset) {
1745 // Move all cells of trap entry at dp left by "shift" cells
1746 intptr_t* start = (intptr_t*)dp;
1747 intptr_t* end = (intptr_t*)next_extra(dp);
1748 for (intptr_t* ptr = start; ptr < end; ptr++) {
1749 *(ptr-shift) = *ptr;
1750 }
1751 } else {
1752 // Reset "shift" cells stopping at dp
1753 intptr_t* start = ((intptr_t*)dp) - shift;
1754 intptr_t* end = (intptr_t*)dp;
1755 for (intptr_t* ptr = start; ptr < end; ptr++) {
1756 *ptr = 0;
1757 }
1758 }
1759 }
1760
1761 // Check for entries that reference an unloaded method
1762 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1763 bool _always_clean;
1764 public:
1765 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1766 bool is_live(Method* m) {
1767 return !(_always_clean) && m->method_holder()->is_loader_alive();
1768 }
1769 };
1770
1771 // Check for entries that reference a redefined method
1772 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1773 public:
1774 CleanExtraDataMethodClosure() {}
1775 bool is_live(Method* m) { return !m->is_old(); }
1776 };
1777
1778
1779 // Remove SpeculativeTrapData entries that reference an unloaded or
1780 // redefined method
1781 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1782 check_extra_data_locked();
1783
1784 DataLayout* dp = extra_data_base();
1785 DataLayout* end = args_data_limit();
1786
1787 int shift = 0;
1788 for (; dp < end; dp = next_extra(dp)) {
1789 switch(dp->tag()) {
1790 case DataLayout::speculative_trap_data_tag: {
1791 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1792 Method* m = data->method();
1793 assert(m != nullptr, "should have a method");
1794 if (!cl->is_live(m)) {
1795 // "shift" accumulates the number of cells for dead
1796 // SpeculativeTrapData entries that have been seen so
1797 // far. Following entries must be shifted left by that many
1878 ResourceMark rm;
1879 CleanExtraDataMethodClosure cl;
1880
1881 // Lock to modify extra data, and prevent Safepoint from breaking the lock
1882 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1883
1884 clean_extra_data(&cl);
1885 verify_extra_data_clean(&cl);
1886 }
1887
1888 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1889 release_C_heap_structures();
1890 }
1891
1892 void MethodData::release_C_heap_structures() {
1893 #if INCLUDE_JVMCI
1894 FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1895 #endif
1896 }
1897
1898 #ifdef ASSERT
1899 void MethodData::check_extra_data_locked() const {
1900 // Cast const away, just to be able to verify the lock
1901 // Usually we only want non-const accesses on the lock,
1902 // so this here is an exception.
1903 MethodData* self = (MethodData*)this;
1904 assert(self->extra_data_lock()->owned_by_self(), "must have lock");
1905 assert(!Thread::current()->is_Java_thread() ||
1906 JavaThread::current()->is_in_no_safepoint_scope(),
1907 "JavaThread must have NoSafepointVerifier inside lock scope");
1908 }
1909 #endif
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "ci/ciMethodData.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compilationPolicy.hpp"
30 #include "compiler/compilerDefinitions.inline.hpp"
31 #include "compiler/compilerOracle.hpp"
32 #include "interpreter/bytecode.hpp"
33 #include "interpreter/bytecodeStream.hpp"
34 #include "interpreter/linkResolver.hpp"
35 #include "memory/metaspaceClosure.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/klass.inline.hpp"
38 #include "oops/methodData.inline.hpp"
39 #include "prims/jvmtiRedefineClasses.hpp"
40 #include "runtime/atomic.hpp"
41 #include "runtime/deoptimization.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/orderAccess.hpp"
44 #include "runtime/safepointVerifiers.hpp"
45 #include "runtime/signature.hpp"
46 #include "utilities/align.hpp"
307 #ifdef ASSERT
308 ResourceMark rm;
309 ReferenceArgumentCount rac(inv.signature());
310 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
311 assert(count > 0, "room for args type but none found?");
312 check_number_of_arguments(count);
313 #endif
314 _args.post_initialize(inv.signature(), inv.has_receiver(), false);
315 }
316
317 if (has_return()) {
318 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
319 _ret.post_initialize();
320 }
321 }
322
323 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
324 for (int i = 0; i < _number_of_entries; i++) {
325 intptr_t p = type(i);
326 Klass* k = (Klass*)klass_part(p);
327 if (k != nullptr) {
328 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
329 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
330 }
331 if (always_clean || !k->is_loader_alive()) {
332 set_type(i, with_status((Klass*)nullptr, p));
333 }
334 }
335 }
336 }
337
338 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
339 for (int i = 0; i < _number_of_entries; i++) {
340 set_type(i, klass_part(type(i))); // reset tag; FIXME: properly handle tagged pointers
341 Klass** k = (Klass**)type_adr(i);
342 it->push(k);
343 // it->push_tagged(k);
344 }
345 }
346
347 void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
348 intptr_t p = type();
349 Klass* k = (Klass*)klass_part(p);
350 if (k != nullptr) {
351 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
352 return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
353 }
354 if (always_clean || !k->is_loader_alive()) {
355 set_type(with_status((Klass*)nullptr, p));
356 }
357 }
358 }
359
360 void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
361 Klass** k = (Klass**)type_adr(); // tagged
362 set_type(klass_part(type())); // reset tag; FIXME: properly handle tagged pointers
363 it->push(k);
364 // it->push_tagged(k);
365 }
366
367 bool TypeEntriesAtCall::return_profiling_enabled() {
368 return MethodData::profile_return();
369 }
370
371 bool TypeEntriesAtCall::arguments_profiling_enabled() {
372 return MethodData::profile_arguments();
373 }
374
375 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
376 if (is_type_none(k)) {
377 st->print("none");
378 } else if (is_type_unknown(k)) {
379 st->print("unknown");
380 } else {
381 valid_klass(k)->print_value_on(st);
382 }
383 if (was_null_seen(k)) {
384 st->print(" (null seen)");
385 }
386 }
422 _args.print_data_on(st);
423 }
424 if (has_return()) {
425 tab(st, true);
426 st->print("return type");
427 _ret.print_data_on(st);
428 }
429 }
430
431 // ==================================================================
432 // ReceiverTypeData
433 //
434 // A ReceiverTypeData is used to access profiling information about a
435 // dynamic type check. It consists of a counter which counts the total times
436 // that the check is reached, and a series of (Klass*, count) pairs
437 // which are used to store a type profile for the receiver of the check.
438
439 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
440 for (uint row = 0; row < row_limit(); row++) {
441 Klass* p = receiver(row);
442 if (p != nullptr) {
443 if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
444 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
445 }
446 if (always_clean || !p->is_loader_alive()) {
447 clear_row(row);
448 }
449 }
450 }
451 }
452
453 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
454 for (uint row = 0; row < row_limit(); row++) {
455 Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
456 it->push(recv);
457 }
458 }
459
460 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
461 uint row;
462 int entries = 0;
463 for (row = 0; row < row_limit(); row++) {
464 if (receiver(row) != nullptr) entries++;
465 }
466 st->print_cr("count(%u) entries(%u)", count(), entries);
467 int total = count();
468 for (row = 0; row < row_limit(); row++) {
469 if (receiver(row) != nullptr) {
470 total += receiver_count(row);
471 }
472 }
473 for (row = 0; row < row_limit(); row++) {
474 if (receiver(row) != nullptr) {
475 tab(st);
476 receiver(row)->print_value_on(st);
477 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
478 }
479 }
668 return obj_args + 1; // 1 cell for array len
669 }
670 return 0;
671 }
672
673 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
674 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
675 }
676
677 bool ParametersTypeData::profiling_enabled() {
678 return MethodData::profile_parameters();
679 }
680
681 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
682 print_shared(st, "ParametersTypeData", extra);
683 tab(st);
684 _parameters.print_data_on(st);
685 st->cr();
686 }
687
688 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
689 Method** m = (Method**)intptr_at_adr(speculative_trap_method);
690 it->push(m);
691 }
692
693 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
694 print_shared(st, "SpeculativeTrapData", extra);
695 tab(st);
696 method()->print_short_name(st);
697 st->cr();
698 }
699
700 // ==================================================================
701 // MethodData*
702 //
703 // A MethodData* holds information which has been collected about
704 // a method.
705
706 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
707 assert(!THREAD->owns_locks(), "Should not own any locks");
708 int size = MethodData::compute_allocation_size_in_words(method);
709
710 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
711 MethodData(method);
712 }
1250
1251 // Give each of the data entries a chance to perform specific
1252 // data initialization.
1253 void MethodData::post_initialize(BytecodeStream* stream) {
1254 ResourceMark rm;
1255 ProfileData* data;
1256 for (data = first_data(); is_valid(data); data = next_data(data)) {
1257 stream->set_start(data->bci());
1258 stream->next();
1259 data->post_initialize(stream, this);
1260 }
1261 if (_parameters_type_data_di != no_parameters) {
1262 parameters_type_data()->post_initialize(nullptr, this);
1263 }
1264 }
1265
1266 // Initialize the MethodData* corresponding to a given method.
1267 MethodData::MethodData(const methodHandle& method)
1268 : _method(method()),
1269 // Holds Compile_lock
1270 _compiler_counters(),
1271 _parameters_type_data_di(parameters_uninitialized) {
1272 _extra_data_lock = nullptr;
1273 initialize();
1274 }
1275
1276 MethodData::MethodData() {
1277 assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1278 }
1279
1280 // Reinitialize the storage of an existing MDO at a safepoint. Doing it this way will ensure it's
1281 // not being accessed while the contents are being rewritten.
1282 class VM_ReinitializeMDO: public VM_Operation {
1283 private:
1284 MethodData* _mdo;
1285 public:
1286 VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1287 VMOp_Type type() const { return VMOp_ReinitializeMDO; }
1288 void doit() {
1289 // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1290 // over a safepoint. This means that we don't actually need to acquire the lock.
1291 _mdo->initialize();
1292 }
1293 bool allow_nested_vm_operations() const { return true; }
1294 };
1295
1296 void MethodData::reinitialize() {
1297 VM_ReinitializeMDO op(this);
1401 _tenure_traps = 0;
1402 _num_loops = 0;
1403 _num_blocks = 0;
1404 _would_profile = unknown;
1405
1406 #if INCLUDE_JVMCI
1407 _jvmci_ir_size = 0;
1408 _failed_speculations = nullptr;
1409 #endif
1410
1411 // Initialize escape flags.
1412 clear_escape_info();
1413 }
1414
1415 // Get a measure of how much mileage the method has on it.
1416 int MethodData::mileage_of(Method* method) {
1417 return MAX2(method->invocation_count(), method->backedge_count());
1418 }
1419
1420 bool MethodData::is_mature() const {
1421 return CompilationPolicy::is_mature((MethodData*)this);
1422 }
1423
1424 // Translate a bci to its corresponding data index (di).
1425 address MethodData::bci_to_dp(int bci) {
1426 ResourceMark rm;
1427 DataLayout* data = data_layout_before(bci);
1428 DataLayout* prev = nullptr;
1429 for ( ; is_valid(data); data = next_data_layout(data)) {
1430 if (data->bci() >= bci) {
1431 if (data->bci() == bci) set_hint_di(dp_to_di((address)data));
1432 else if (prev != nullptr) set_hint_di(dp_to_di((address)prev));
1433 return (address)data;
1434 }
1435 prev = data;
1436 }
1437 return (address)limit_data_position();
1438 }
1439
1440 // Translate a bci to its corresponding data, or null.
1441 ProfileData* MethodData::bci_to_data(int bci) {
1589 return nullptr;
1590 }
1591
1592 // Printing
1593
1594 void MethodData::print_on(outputStream* st) const {
1595 assert(is_methodData(), "should be method data");
1596 st->print("method data for ");
1597 method()->print_value_on(st);
1598 st->cr();
1599 print_data_on(st);
1600 }
1601
1602 void MethodData::print_value_on(outputStream* st) const {
1603 assert(is_methodData(), "should be method data");
1604 st->print("method data for ");
1605 method()->print_value_on(st);
1606 }
1607
1608 void MethodData::print_data_on(outputStream* st) const {
1609 Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
1610 ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
1611 Mutex::_no_safepoint_check_flag);
1612 ResourceMark rm;
1613 ProfileData* data = first_data();
1614 if (_parameters_type_data_di != no_parameters) {
1615 parameters_type_data()->print_data_on(st);
1616 }
1617 for ( ; is_valid(data); data = next_data(data)) {
1618 st->print("%d", dp_to_di(data->dp()));
1619 st->fill_to(6);
1620 data->print_data_on(st, this);
1621 }
1622
1623 st->print_cr("--- Extra data:");
1624 DataLayout* dp = extra_data_base();
1625 DataLayout* end = args_data_limit();
1626 for (;; dp = next_extra(dp)) {
1627 assert(dp < end, "moved past end of extra data");
1628 // No need for "Atomic::load_acquire" ops,
1629 // since the data structure is monotonic.
1630 switch(dp->tag()) {
1763 }
1764
1765 bool MethodData::profile_all_parameters() {
1766 return profile_parameters_flag() == type_profile_all;
1767 }
1768
1769 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1770 if (!profile_parameters()) {
1771 return false;
1772 }
1773
1774 if (profile_all_parameters()) {
1775 return true;
1776 }
1777
1778 assert(profile_parameters_jsr292_only(), "inconsistent");
1779 return m->is_compiled_lambda_form();
1780 }
1781
1782 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1783 log_trace(cds)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1784 it->push(&_method);
1785 if (_parameters_type_data_di != no_parameters) {
1786 parameters_type_data()->metaspace_pointers_do(it);
1787 }
1788 for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1789 data->metaspace_pointers_do(it);
1790 }
1791 for (DataLayout* dp = extra_data_base();
1792 dp < extra_data_limit();
1793 dp = MethodData::next_extra(dp)) {
1794 if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1795 ResourceMark rm;
1796 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1797 data->metaspace_pointers_do(it);
1798 } else if (dp->tag() == DataLayout::no_tag ||
1799 dp->tag() == DataLayout::arg_info_data_tag) {
1800 break;
1801 }
1802 }
1803 }
1804
1805 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1806 check_extra_data_locked();
1807
1808 if (shift == 0) {
1809 return;
1810 }
1811 if (!reset) {
1812 // Move all cells of trap entry at dp left by "shift" cells
1813 intptr_t* start = (intptr_t*)dp;
1814 intptr_t* end = (intptr_t*)next_extra(dp);
1815 for (intptr_t* ptr = start; ptr < end; ptr++) {
1816 *(ptr-shift) = *ptr;
1817 }
1818 } else {
1819 // Reset "shift" cells stopping at dp
1820 intptr_t* start = ((intptr_t*)dp) - shift;
1821 intptr_t* end = (intptr_t*)dp;
1822 for (intptr_t* ptr = start; ptr < end; ptr++) {
1823 *ptr = 0;
1824 }
1825 }
1826 }
1827
1828 // Check for entries that reference an unloaded method
1829 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1830 bool _always_clean;
1831 public:
1832 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1833 bool is_live(Method* m) {
1834 if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1835 return true; // TODO: treat as unloaded instead?
1836 }
1837 return !(_always_clean) && m->method_holder()->is_loader_alive();
1838 }
1839 };
1840
1841 // Check for entries that reference a redefined method
1842 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1843 public:
1844 CleanExtraDataMethodClosure() {}
1845 bool is_live(Method* m) { return !m->is_old(); }
1846 };
1847
1848 Mutex* MethodData::extra_data_lock() {
1849 Mutex* lock = Atomic::load(&_extra_data_lock);
1850 if (lock == nullptr) {
1851 lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
1852 Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1853 if (old != nullptr) {
1854 // Another thread created the lock before us. Use that lock instead.
1855 delete lock;
1856 return old;
1857 }
1858 }
1859 return lock;
1860 }
1861
1862 // Remove SpeculativeTrapData entries that reference an unloaded or
1863 // redefined method
1864 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1865 check_extra_data_locked();
1866
1867 DataLayout* dp = extra_data_base();
1868 DataLayout* end = args_data_limit();
1869
1870 int shift = 0;
1871 for (; dp < end; dp = next_extra(dp)) {
1872 switch(dp->tag()) {
1873 case DataLayout::speculative_trap_data_tag: {
1874 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1875 Method* m = data->method();
1876 assert(m != nullptr, "should have a method");
1877 if (!cl->is_live(m)) {
1878 // "shift" accumulates the number of cells for dead
1879 // SpeculativeTrapData entries that have been seen so
1880 // far. Following entries must be shifted left by that many
1961 ResourceMark rm;
1962 CleanExtraDataMethodClosure cl;
1963
1964 // Lock to modify extra data, and prevent Safepoint from breaking the lock
1965 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1966
1967 clean_extra_data(&cl);
1968 verify_extra_data_clean(&cl);
1969 }
1970
1971 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1972 release_C_heap_structures();
1973 }
1974
1975 void MethodData::release_C_heap_structures() {
1976 #if INCLUDE_JVMCI
1977 FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1978 #endif
1979 }
1980
1981 #if INCLUDE_CDS
1982 void MethodData::remove_unshareable_info() {
1983 _extra_data_lock = nullptr;
1984 }
1985
1986 void MethodData::restore_unshareable_info(TRAPS) {
1987 //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
1988 }
1989 #endif // INCLUDE_CDS
1990
1991 #ifdef ASSERT
1992 void MethodData::check_extra_data_locked() const {
1993 // Cast const away, just to be able to verify the lock
1994 // Usually we only want non-const accesses on the lock,
1995 // so this here is an exception.
1996 MethodData* self = (MethodData*)this;
1997 assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
1998 assert(!Thread::current()->is_Java_thread() ||
1999 JavaThread::current()->is_in_no_safepoint_scope(),
2000 "JavaThread must have NoSafepointVerifier inside lock scope");
2001 }
2002 #endif
|