1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/cdsConfig.hpp"
26 #include "ci/ciMethodData.hpp"
27 #include "classfile/systemDictionaryShared.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "compiler/compilationPolicy.hpp"
30 #include "compiler/compilerDefinitions.inline.hpp"
31 #include "compiler/compilerOracle.hpp"
32 #include "interpreter/bytecode.hpp"
33 #include "interpreter/bytecodeStream.hpp"
34 #include "interpreter/linkResolver.hpp"
35 #include "memory/metaspaceClosure.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "oops/klass.inline.hpp"
38 #include "oops/method.inline.hpp"
39 #include "oops/methodData.inline.hpp"
40 #include "prims/jvmtiRedefineClasses.hpp"
41 #include "runtime/atomicAccess.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/orderAccess.hpp"
45 #include "runtime/safepointVerifiers.hpp"
46 #include "runtime/signature.hpp"
47 #include "utilities/align.hpp"
48 #include "utilities/checkedCast.hpp"
49 #include "utilities/copy.hpp"
50
51 // ==================================================================
52 // DataLayout
53 //
54 // Overlay for generic profiling data.
55
56 // Some types of data layouts need a length field.
57 bool DataLayout::needs_array_len(u1 tag) {
58 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
59 }
60
61 // Perform generic initialization of the data. More specific
62 // initialization occurs in overrides of ProfileData::post_initialize.
63 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
64 DataLayout temp;
65 temp._header._bits = (intptr_t)0;
66 temp._header._struct._tag = tag;
67 temp._header._struct._bci = bci;
68 // Write the header using a single intptr_t write. This ensures that if the layout is
69 // reinitialized readers will never see the transient state where the header is 0.
70 _header = temp._header;
71
72 for (int i = 0; i < cell_count; i++) {
73 set_cell_at(i, (intptr_t)0);
74 }
75 if (needs_array_len(tag)) {
76 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
77 }
78 if (tag == call_type_data_tag) {
79 CallTypeData::initialize(this, cell_count);
80 } else if (tag == virtual_call_type_data_tag) {
81 VirtualCallTypeData::initialize(this, cell_count);
82 }
83 }
84
85 void DataLayout::clean_weak_klass_links(bool always_clean) {
86 ResourceMark m;
87 data_in()->clean_weak_klass_links(always_clean);
88 }
89
90
91 // ==================================================================
92 // ProfileData
93 //
94 // A ProfileData object is created to refer to a section of profiling
95 // data in a structured way.
96
97 // Constructor for invalid ProfileData.
98 ProfileData::ProfileData() {
99 _data = nullptr;
100 }
101
102 char* ProfileData::print_data_on_helper(const MethodData* md) const {
103 DataLayout* dp = md->extra_data_base();
104 DataLayout* end = md->args_data_limit();
105 stringStream ss;
106 for (;; dp = MethodData::next_extra(dp)) {
107 assert(dp < end, "moved past end of extra data");
108 switch(dp->tag()) {
109 case DataLayout::speculative_trap_data_tag:
110 if (dp->bci() == bci()) {
111 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
112 int trap = data->trap_state();
113 char buf[100];
114 ss.print("trap/");
115 data->method()->print_short_name(&ss);
116 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
117 }
118 break;
119 case DataLayout::bit_data_tag:
120 break;
121 case DataLayout::no_tag:
122 case DataLayout::arg_info_data_tag:
123 return ss.as_string();
124 break;
125 default:
126 fatal("unexpected tag %d", dp->tag());
127 }
128 }
129 return nullptr;
130 }
131
132 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
133 print_data_on(st, print_data_on_helper(md));
134 }
135
136 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
137 st->print("bci: %d ", bci());
138 st->fill_to(tab_width_one + 1);
139 st->print("%s", name);
140 tab(st);
141 int trap = trap_state();
142 if (trap != 0) {
143 char buf[100];
144 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
145 }
146 if (extra != nullptr) {
147 st->print("%s", extra);
148 }
149 int flags = data()->flags();
150 if (flags != 0) {
151 st->print("flags(%d) %p/%d", flags, data(), in_bytes(DataLayout::flags_offset()));
152 }
153 }
154
155 void ProfileData::tab(outputStream* st, bool first) const {
156 st->fill_to(first ? tab_width_one : tab_width_two);
157 }
158
159 // ==================================================================
160 // BitData
161 //
162 // A BitData corresponds to a one-bit flag. This is used to indicate
163 // whether a checkcast bytecode has seen a null value.
164
165
166 void BitData::print_data_on(outputStream* st, const char* extra) const {
167 print_shared(st, "BitData", extra);
168 st->cr();
169 }
170
171 // ==================================================================
172 // CounterData
173 //
174 // A CounterData corresponds to a simple counter.
175
176 void CounterData::print_data_on(outputStream* st, const char* extra) const {
177 print_shared(st, "CounterData", extra);
178 st->print_cr("count(%u)", count());
179 }
180
181 // ==================================================================
182 // JumpData
183 //
184 // A JumpData is used to access profiling information for a direct
185 // branch. It is a counter, used for counting the number of branches,
186 // plus a data displacement, used for realigning the data pointer to
187 // the corresponding target bci.
188
189 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
190 assert(stream->bci() == bci(), "wrong pos");
191 int target;
192 Bytecodes::Code c = stream->code();
193 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
194 target = stream->dest_w();
195 } else {
196 target = stream->dest();
197 }
198 int my_di = mdo->dp_to_di(dp());
199 int target_di = mdo->bci_to_di(target);
200 int offset = target_di - my_di;
201 set_displacement(offset);
202 }
203
204 void JumpData::print_data_on(outputStream* st, const char* extra) const {
205 print_shared(st, "JumpData", extra);
206 st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
207 }
208
209 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
210 // Parameter profiling include the receiver
211 int args_count = include_receiver ? 1 : 0;
212 ResourceMark rm;
213 ReferenceArgumentCount rac(signature);
214 args_count += rac.count();
215 args_count = MIN2(args_count, max);
216 return args_count * per_arg_cell_count;
217 }
218
219 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
220 assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
221 assert(TypeStackSlotEntries::per_arg_count() > SingleTypeEntry::static_cell_count(), "code to test for arguments/results broken");
222 const methodHandle m = stream->method();
223 int bci = stream->bci();
224 Bytecode_invoke inv(m, bci);
225 int args_cell = 0;
226 if (MethodData::profile_arguments_for_invoke(m, bci)) {
227 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
228 }
229 int ret_cell = 0;
230 if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
231 ret_cell = SingleTypeEntry::static_cell_count();
232 }
233 int header_cell = 0;
234 if (args_cell + ret_cell > 0) {
235 header_cell = header_cell_count();
236 }
237
238 return header_cell + args_cell + ret_cell;
239 }
240
241 class ArgumentOffsetComputer : public SignatureIterator {
242 private:
243 int _max;
244 int _offset;
245 GrowableArray<int> _offsets;
246
247 friend class SignatureIterator; // so do_parameters_on can call do_type
248 void do_type(BasicType type) {
249 if (is_reference_type(type) && _offsets.length() < _max) {
250 _offsets.push(_offset);
251 }
252 _offset += parameter_type_word_count(type);
253 }
254
255 public:
256 ArgumentOffsetComputer(Symbol* signature, int max)
257 : SignatureIterator(signature),
258 _max(max), _offset(0),
259 _offsets(max) {
260 do_parameters_on(this); // non-virtual template execution
261 }
262
263 int off_at(int i) const { return _offsets.at(i); }
264 };
265
266 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
267 ResourceMark rm;
268 int start = 0;
269 // Parameter profiling include the receiver
270 if (include_receiver && has_receiver) {
271 set_stack_slot(0, 0);
272 set_type(0, type_none());
273 start += 1;
274 }
275 ArgumentOffsetComputer aos(signature, _number_of_entries-start);
276 for (int i = start; i < _number_of_entries; i++) {
277 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
278 set_type(i, type_none());
279 }
280 }
281
282 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
283 assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
284 Bytecode_invoke inv(stream->method(), stream->bci());
285
286 if (has_arguments()) {
287 #ifdef ASSERT
288 ResourceMark rm;
289 ReferenceArgumentCount rac(inv.signature());
290 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
291 assert(count > 0, "room for args type but none found?");
292 check_number_of_arguments(count);
293 #endif
294 _args.post_initialize(inv.signature(), inv.has_receiver(), false);
295 }
296
297 if (has_return()) {
298 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
299 _ret.post_initialize();
300 }
301 }
302
303 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
304 assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
305 Bytecode_invoke inv(stream->method(), stream->bci());
306
307 if (has_arguments()) {
308 #ifdef ASSERT
309 ResourceMark rm;
310 ReferenceArgumentCount rac(inv.signature());
311 int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
312 assert(count > 0, "room for args type but none found?");
313 check_number_of_arguments(count);
314 #endif
315 _args.post_initialize(inv.signature(), inv.has_receiver(), false);
316 }
317
318 if (has_return()) {
319 assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
320 _ret.post_initialize();
321 }
322 }
323
324 static bool is_excluded(Klass* k) {
325 #if INCLUDE_CDS
326 if (CDSConfig::is_at_aot_safepoint()) {
327 // Check for CDS exclusion only at CDS safe point.
328 if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
329 log_debug(aot, training)("Purged %s from MDO: unloaded class", k->name()->as_C_string());
330 return true;
331 } else {
332 bool excluded = SystemDictionaryShared::should_be_excluded(k);
333 if (excluded) {
334 log_debug(aot, training)("Purged %s from MDO: excluded class", k->name()->as_C_string());
335 }
336 return excluded;
337 }
338 }
339 #endif
340 return false;
341 }
342
343 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
344 for (int i = 0; i < _number_of_entries; i++) {
345 intptr_t p = type(i);
346 Klass* k = (Klass*)klass_part(p);
347 if (k != nullptr) {
348 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
349 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
350 }
351 if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) {
352 set_type(i, with_status((Klass*)nullptr, p));
353 }
354 }
355 }
356 }
357
358 void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
359 for (int i = 0; i < _number_of_entries; i++) {
360 Klass** k = (Klass**)type_adr(i); // tagged
361 it->push(k);
362 }
363 }
364
365 void SingleTypeEntry::clean_weak_klass_links(bool always_clean) {
366 intptr_t p = type();
367 Klass* k = (Klass*)klass_part(p);
368 if (k != nullptr) {
369 if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
370 return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
371 }
372 if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) {
373 set_type(with_status((Klass*)nullptr, p));
374 }
375 }
376 }
377
378 void SingleTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
379 Klass** k = (Klass**)type_adr(); // tagged
380 it->push(k);
381 }
382
383 bool TypeEntriesAtCall::return_profiling_enabled() {
384 return MethodData::profile_return();
385 }
386
387 bool TypeEntriesAtCall::arguments_profiling_enabled() {
388 return MethodData::profile_arguments();
389 }
390
391 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
392 if (is_type_none(k)) {
393 st->print("none");
394 } else if (is_type_unknown(k)) {
395 st->print("unknown");
396 } else {
397 valid_klass(k)->print_value_on(st);
398 }
399 if (was_null_seen(k)) {
400 st->print(" (null seen)");
401 }
402 }
403
404 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
405 for (int i = 0; i < _number_of_entries; i++) {
406 _pd->tab(st);
407 st->print("%d: stack(%u) ", i, stack_slot(i));
408 print_klass(st, type(i));
409 st->cr();
410 }
411 }
412
413 void SingleTypeEntry::print_data_on(outputStream* st) const {
414 _pd->tab(st);
415 print_klass(st, type());
416 st->cr();
417 }
418
419 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
420 CounterData::print_data_on(st, extra);
421 if (has_arguments()) {
422 tab(st, true);
423 st->print("argument types");
424 _args.print_data_on(st);
425 }
426 if (has_return()) {
427 tab(st, true);
428 st->print("return type");
429 _ret.print_data_on(st);
430 }
431 }
432
433 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
434 VirtualCallData::print_data_on(st, extra);
435 if (has_arguments()) {
436 tab(st, true);
437 st->print("argument types");
438 _args.print_data_on(st);
439 }
440 if (has_return()) {
441 tab(st, true);
442 st->print("return type");
443 _ret.print_data_on(st);
444 }
445 }
446
447 // ==================================================================
448 // ReceiverTypeData
449 //
450 // A ReceiverTypeData is used to access profiling information about a
451 // dynamic type check. It consists of a counter which counts the total times
452 // that the check is reached, and a series of (Klass*, count) pairs
453 // which are used to store a type profile for the receiver of the check.
454
455 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
456 for (uint row = 0; row < row_limit(); row++) {
457 Klass* p = receiver(row);
458 if (p != nullptr) {
459 if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
460 continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
461 }
462 if (always_clean || !p->is_loader_present_and_alive() || is_excluded(p)) {
463 clear_row(row);
464 }
465 }
466 }
467 }
468
469 void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
470 for (uint row = 0; row < row_limit(); row++) {
471 Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
472 it->push(recv);
473 }
474 }
475
476 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
477 uint row;
478 int entries = 0;
479 for (row = 0; row < row_limit(); row++) {
480 if (receiver(row) != nullptr) entries++;
481 }
482 st->print_cr("count(%u) entries(%u)", count(), entries);
483 int total = count();
484 for (row = 0; row < row_limit(); row++) {
485 if (receiver(row) != nullptr) {
486 total += receiver_count(row);
487 }
488 }
489 for (row = 0; row < row_limit(); row++) {
490 if (receiver(row) != nullptr) {
491 tab(st);
492 receiver(row)->print_value_on(st);
493 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
494 }
495 }
496 }
497 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
498 print_shared(st, "ReceiverTypeData", extra);
499 print_receiver_data_on(st);
500 }
501
502 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
503 print_shared(st, "VirtualCallData", extra);
504 print_receiver_data_on(st);
505 }
506
507 // ==================================================================
508 // RetData
509 //
510 // A RetData is used to access profiling information for a ret bytecode.
511 // It is composed of a count of the number of times that the ret has
512 // been executed, followed by a series of triples of the form
513 // (bci, count, di) which count the number of times that some bci was the
514 // target of the ret and cache a corresponding displacement.
515
516 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
517 for (uint row = 0; row < row_limit(); row++) {
518 set_bci_displacement(row, -1);
519 set_bci(row, no_bci);
520 }
521 // release so other threads see a consistent state. bci is used as
522 // a valid flag for bci_displacement.
523 OrderAccess::release();
524 }
525
526 // This routine needs to atomically update the RetData structure, so the
527 // caller needs to hold the RetData_lock before it gets here. Since taking
528 // the lock can block (and allow GC) and since RetData is a ProfileData is a
529 // wrapper around a derived oop, taking the lock in _this_ method will
530 // basically cause the 'this' pointer's _data field to contain junk after the
531 // lock. We require the caller to take the lock before making the ProfileData
532 // structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret
533 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
534 // First find the mdp which corresponds to the return bci.
535 address mdp = h_mdo->bci_to_dp(return_bci);
536
537 // Now check to see if any of the cache slots are open.
538 for (uint row = 0; row < row_limit(); row++) {
539 if (bci(row) == no_bci) {
540 set_bci_displacement(row, checked_cast<int>(mdp - dp()));
541 set_bci_count(row, DataLayout::counter_increment);
542 // Barrier to ensure displacement is written before the bci; allows
543 // the interpreter to read displacement without fear of race condition.
544 release_set_bci(row, return_bci);
545 break;
546 }
547 }
548 return mdp;
549 }
550
551 void RetData::print_data_on(outputStream* st, const char* extra) const {
552 print_shared(st, "RetData", extra);
553 uint row;
554 int entries = 0;
555 for (row = 0; row < row_limit(); row++) {
556 if (bci(row) != no_bci) entries++;
557 }
558 st->print_cr("count(%u) entries(%u)", count(), entries);
559 for (row = 0; row < row_limit(); row++) {
560 if (bci(row) != no_bci) {
561 tab(st);
562 st->print_cr("bci(%d: count(%u) displacement(%d))",
563 bci(row), bci_count(row), bci_displacement(row));
564 }
565 }
566 }
567
568 // ==================================================================
569 // BranchData
570 //
571 // A BranchData is used to access profiling data for a two-way branch.
572 // It consists of taken and not_taken counts as well as a data displacement
573 // for the taken case.
574
575 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
576 assert(stream->bci() == bci(), "wrong pos");
577 int target = stream->dest();
578 int my_di = mdo->dp_to_di(dp());
579 int target_di = mdo->bci_to_di(target);
580 int offset = target_di - my_di;
581 set_displacement(offset);
582 }
583
584 void BranchData::print_data_on(outputStream* st, const char* extra) const {
585 print_shared(st, "BranchData", extra);
586 if (data()->flags()) {
587 st->cr();
588 tab(st);
589 }
590 st->print_cr("taken(%u) displacement(%d)",
591 taken(), displacement());
592 tab(st);
593 st->print_cr("not taken(%u)", not_taken());
594 }
595
596 // ==================================================================
597 // MultiBranchData
598 //
599 // A MultiBranchData is used to access profiling information for
600 // a multi-way branch (*switch bytecodes). It consists of a series
601 // of (count, displacement) pairs, which count the number of times each
602 // case was taken and specify the data displacement for each branch target.
603
604 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
605 int cell_count = 0;
606 if (stream->code() == Bytecodes::_tableswitch) {
607 Bytecode_tableswitch sw(stream->method()(), stream->bcp());
608 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
609 } else {
610 Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
611 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
612 }
613 return cell_count;
614 }
615
616 void MultiBranchData::post_initialize(BytecodeStream* stream,
617 MethodData* mdo) {
618 assert(stream->bci() == bci(), "wrong pos");
619 int target;
620 int my_di;
621 int target_di;
622 int offset;
623 if (stream->code() == Bytecodes::_tableswitch) {
624 Bytecode_tableswitch sw(stream->method()(), stream->bcp());
625 int len = sw.length();
626 assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
627 for (int count = 0; count < len; count++) {
628 target = sw.dest_offset_at(count) + bci();
629 my_di = mdo->dp_to_di(dp());
630 target_di = mdo->bci_to_di(target);
631 offset = target_di - my_di;
632 set_displacement_at(count, offset);
633 }
634 target = sw.default_offset() + bci();
635 my_di = mdo->dp_to_di(dp());
636 target_di = mdo->bci_to_di(target);
637 offset = target_di - my_di;
638 set_default_displacement(offset);
639
640 } else {
641 Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
642 int npairs = sw.number_of_pairs();
643 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
644 for (int count = 0; count < npairs; count++) {
645 LookupswitchPair pair = sw.pair_at(count);
646 target = pair.offset() + bci();
647 my_di = mdo->dp_to_di(dp());
648 target_di = mdo->bci_to_di(target);
649 offset = target_di - my_di;
650 set_displacement_at(count, offset);
651 }
652 target = sw.default_offset() + bci();
653 my_di = mdo->dp_to_di(dp());
654 target_di = mdo->bci_to_di(target);
655 offset = target_di - my_di;
656 set_default_displacement(offset);
657 }
658 }
659
660 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
661 print_shared(st, "MultiBranchData", extra);
662 st->print_cr("default_count(%u) displacement(%d)",
663 default_count(), default_displacement());
664 int cases = number_of_cases();
665 for (int i = 0; i < cases; i++) {
666 tab(st);
667 st->print_cr("count(%u) displacement(%d)",
668 count_at(i), displacement_at(i));
669 }
670 }
671
672 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
673 print_shared(st, "ArgInfoData", extra);
674 int nargs = number_of_args();
675 for (int i = 0; i < nargs; i++) {
676 st->print(" 0x%x", arg_modified(i));
677 }
678 st->cr();
679 }
680
681 int ParametersTypeData::compute_cell_count(Method* m) {
682 if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
683 return 0;
684 }
685 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
686 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
687 if (obj_args > 0) {
688 return obj_args + 1; // 1 cell for array len
689 }
690 return 0;
691 }
692
693 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
694 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
695 }
696
697 bool ParametersTypeData::profiling_enabled() {
698 return MethodData::profile_parameters();
699 }
700
701 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
702 print_shared(st, "ParametersTypeData", extra);
703 tab(st);
704 _parameters.print_data_on(st);
705 st->cr();
706 }
707
708 void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
709 Method** m = (Method**)intptr_at_adr(speculative_trap_method);
710 it->push(m);
711 }
712
713 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
714 print_shared(st, "SpeculativeTrapData", extra);
715 tab(st);
716 method()->print_short_name(st);
717 st->cr();
718 }
719
720 void ArrayStoreData::print_data_on(outputStream* st, const char* extra) const {
721 print_shared(st, "ArrayStore", extra);
722 st->cr();
723 tab(st, true);
724 st->print("array");
725 _array.print_data_on(st);
726 tab(st, true);
727 st->print("element");
728 if (null_seen()) {
729 st->print(" (null seen)");
730 }
731 tab(st);
732 print_receiver_data_on(st);
733 }
734
735 void ArrayLoadData::print_data_on(outputStream* st, const char* extra) const {
736 print_shared(st, "ArrayLoad", extra);
737 st->cr();
738 tab(st, true);
739 st->print("array");
740 _array.print_data_on(st);
741 tab(st, true);
742 st->print("element");
743 _element.print_data_on(st);
744 }
745
746 void ACmpData::print_data_on(outputStream* st, const char* extra) const {
747 BranchData::print_data_on(st, extra);
748 tab(st, true);
749 st->print("left");
750 _left.print_data_on(st);
751 tab(st, true);
752 st->print("right");
753 _right.print_data_on(st);
754 }
755
756 // ==================================================================
757 // MethodData*
758 //
759 // A MethodData* holds information which has been collected about
760 // a method.
761
762 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
763 assert(!THREAD->owns_locks(), "Should not own any locks");
764 int size = MethodData::compute_allocation_size_in_words(method);
765
766 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
767 MethodData(method);
768 }
769
770 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
771 switch (code) {
772 case Bytecodes::_checkcast:
773 case Bytecodes::_instanceof:
774 if (TypeProfileCasts) {
775 return ReceiverTypeData::static_cell_count();
776 } else {
777 return BitData::static_cell_count();
778 }
779 case Bytecodes::_aaload:
780 return ArrayLoadData::static_cell_count();
781 case Bytecodes::_aastore:
782 return ArrayStoreData::static_cell_count();
783 case Bytecodes::_invokespecial:
784 case Bytecodes::_invokestatic:
785 if (MethodData::profile_arguments() || MethodData::profile_return()) {
786 return variable_cell_count;
787 } else {
788 return CounterData::static_cell_count();
789 }
790 case Bytecodes::_goto:
791 case Bytecodes::_goto_w:
792 case Bytecodes::_jsr:
793 case Bytecodes::_jsr_w:
794 return JumpData::static_cell_count();
795 case Bytecodes::_invokevirtual:
796 case Bytecodes::_invokeinterface:
797 if (MethodData::profile_arguments() || MethodData::profile_return()) {
798 return variable_cell_count;
799 } else {
800 return VirtualCallData::static_cell_count();
801 }
802 case Bytecodes::_invokedynamic:
803 if (MethodData::profile_arguments() || MethodData::profile_return()) {
804 return variable_cell_count;
805 } else {
806 return CounterData::static_cell_count();
807 }
808 case Bytecodes::_ret:
809 return RetData::static_cell_count();
810 case Bytecodes::_ifeq:
811 case Bytecodes::_ifne:
812 case Bytecodes::_iflt:
813 case Bytecodes::_ifge:
814 case Bytecodes::_ifgt:
815 case Bytecodes::_ifle:
816 case Bytecodes::_if_icmpeq:
817 case Bytecodes::_if_icmpne:
818 case Bytecodes::_if_icmplt:
819 case Bytecodes::_if_icmpge:
820 case Bytecodes::_if_icmpgt:
821 case Bytecodes::_if_icmple:
822 case Bytecodes::_ifnull:
823 case Bytecodes::_ifnonnull:
824 return BranchData::static_cell_count();
825 case Bytecodes::_if_acmpne:
826 case Bytecodes::_if_acmpeq:
827 return ACmpData::static_cell_count();
828 case Bytecodes::_lookupswitch:
829 case Bytecodes::_tableswitch:
830 return variable_cell_count;
831 default:
832 return no_profile_data;
833 }
834 }
835
836 // Compute the size of the profiling information corresponding to
837 // the current bytecode.
838 int MethodData::compute_data_size(BytecodeStream* stream) {
839 int cell_count = bytecode_cell_count(stream->code());
840 if (cell_count == no_profile_data) {
841 return 0;
842 }
843 if (cell_count == variable_cell_count) {
844 switch (stream->code()) {
845 case Bytecodes::_lookupswitch:
846 case Bytecodes::_tableswitch:
847 cell_count = MultiBranchData::compute_cell_count(stream);
848 break;
849 case Bytecodes::_invokespecial:
850 case Bytecodes::_invokestatic:
851 case Bytecodes::_invokedynamic:
852 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
853 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
854 profile_return_for_invoke(stream->method(), stream->bci())) {
855 cell_count = CallTypeData::compute_cell_count(stream);
856 } else {
857 cell_count = CounterData::static_cell_count();
858 }
859 break;
860 case Bytecodes::_invokevirtual:
861 case Bytecodes::_invokeinterface: {
862 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
863 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
864 profile_return_for_invoke(stream->method(), stream->bci())) {
865 cell_count = VirtualCallTypeData::compute_cell_count(stream);
866 } else {
867 cell_count = VirtualCallData::static_cell_count();
868 }
869 break;
870 }
871 default:
872 fatal("unexpected bytecode for var length profile data");
873 }
874 }
875 // Note: cell_count might be zero, meaning that there is just
876 // a DataLayout header, with no extra cells.
877 assert(cell_count >= 0, "sanity");
878 return DataLayout::compute_size_in_bytes(cell_count);
879 }
880
881 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
882 // Bytecodes for which we may use speculation
883 switch (code) {
884 case Bytecodes::_checkcast:
885 case Bytecodes::_instanceof:
886 case Bytecodes::_aaload:
887 case Bytecodes::_aastore:
888 case Bytecodes::_invokevirtual:
889 case Bytecodes::_invokeinterface:
890 case Bytecodes::_if_acmpeq:
891 case Bytecodes::_if_acmpne:
892 case Bytecodes::_ifnull:
893 case Bytecodes::_ifnonnull:
894 case Bytecodes::_invokestatic:
895 #ifdef COMPILER2
896 if (CompilerConfig::is_c2_enabled()) {
897 return UseTypeSpeculation;
898 }
899 #endif
900 default:
901 return false;
902 }
903 return false;
904 }
905
906 #if INCLUDE_JVMCI
907
908 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
909 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
910 }
911
912 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
913 memcpy(data(), speculation, speculation_len);
914 }
915
916 // A heuristic check to detect nmethods that outlive a failed speculations list.
917 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
918 jlong head = (jlong)(address) *failed_speculations_address;
919 if ((head & 0x1) == 0x1) {
920 stringStream st;
921 if (nm != nullptr) {
922 st.print("%d", nm->compile_id());
923 Method* method = nm->method();
924 st.print_raw("{");
925 if (method != nullptr) {
926 method->print_name(&st);
927 } else {
928 const char* jvmci_name = nm->jvmci_name();
929 if (jvmci_name != nullptr) {
930 st.print_raw(jvmci_name);
931 }
932 }
933 st.print_raw("}");
934 } else {
935 st.print("<unknown>");
936 }
937 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
938 }
939 }
940
941 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
942 assert(failed_speculations_address != nullptr, "must be");
943 size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
944
945 guarantee_failed_speculations_alive(nm, failed_speculations_address);
946
947 FailedSpeculation** cursor = failed_speculations_address;
948 FailedSpeculation* fs = nullptr;
949 do {
950 if (*cursor == nullptr) {
951 if (fs == nullptr) {
952 // lazily allocate FailedSpeculation
953 fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
954 if (fs == nullptr) {
955 // no memory -> ignore failed speculation
956 return false;
957 }
958 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
959 }
960 FailedSpeculation* old_fs = AtomicAccess::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
961 if (old_fs == nullptr) {
962 // Successfully appended fs to end of the list
963 return true;
964 }
965 }
966 guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
967 // check if the current entry matches this thread's failed speculation
968 if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
969 if (fs != nullptr) {
970 delete fs;
971 }
972 return false;
973 }
974 cursor = (*cursor)->next_adr();
975 } while (true);
976 }
977
978 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
979 assert(failed_speculations_address != nullptr, "must be");
980 FailedSpeculation* fs = *failed_speculations_address;
981 while (fs != nullptr) {
982 FailedSpeculation* next = fs->next();
983 delete fs;
984 fs = next;
985 }
986
987 // Write an unaligned value to failed_speculations_address to denote
988 // that it is no longer a valid pointer. This is allows for the check
989 // in add_failed_speculation against adding to a freed failed
990 // speculations list.
991 long* head = (long*) failed_speculations_address;
992 (*head) = (*head) | 0x1;
993 }
994 #endif // INCLUDE_JVMCI
995
996 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
997 #if INCLUDE_JVMCI
998 if (ProfileTraps) {
999 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
1000 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
1001
1002 // Make sure we have a minimum number of extra data slots to
1003 // allocate SpeculativeTrapData entries. We would want to have one
1004 // entry per compilation that inlines this method and for which
1005 // some type speculation assumption fails. So the room we need for
1006 // the SpeculativeTrapData entries doesn't directly depend on the
1007 // size of the method. Because it's hard to estimate, we reserve
1008 // space for an arbitrary number of entries.
1009 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
1010 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
1011
1012 return MAX2(extra_data_count, spec_data_count);
1013 } else {
1014 return 0;
1015 }
1016 #else // INCLUDE_JVMCI
1017 if (ProfileTraps) {
1018 // Assume that up to 3% of BCIs with no MDP will need to allocate one.
1019 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
1020 // If the method is large, let the extra BCIs grow numerous (to ~1%).
1021 int one_percent_of_data
1022 = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
1023 if (extra_data_count < one_percent_of_data)
1024 extra_data_count = one_percent_of_data;
1025 if (extra_data_count > empty_bc_count)
1026 extra_data_count = empty_bc_count; // no need for more
1027
1028 // Make sure we have a minimum number of extra data slots to
1029 // allocate SpeculativeTrapData entries. We would want to have one
1030 // entry per compilation that inlines this method and for which
1031 // some type speculation assumption fails. So the room we need for
1032 // the SpeculativeTrapData entries doesn't directly depend on the
1033 // size of the method. Because it's hard to estimate, we reserve
1034 // space for an arbitrary number of entries.
1035 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
1036 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
1037
1038 return MAX2(extra_data_count, spec_data_count);
1039 } else {
1040 return 0;
1041 }
1042 #endif // INCLUDE_JVMCI
1043 }
1044
1045 // Compute the size of the MethodData* necessary to store
1046 // profiling information about a given method. Size is in bytes.
1047 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
1048 int data_size = 0;
1049 BytecodeStream stream(method);
1050 Bytecodes::Code c;
1051 int empty_bc_count = 0; // number of bytecodes lacking data
1052 bool needs_speculative_traps = false;
1053 while ((c = stream.next()) >= 0) {
1054 int size_in_bytes = compute_data_size(&stream);
1055 data_size += size_in_bytes;
1056 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1;
1057 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1058 }
1059 int object_size = in_bytes(data_offset()) + data_size;
1060
1061 // Add some extra DataLayout cells (at least one) to track stray traps.
1062 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1063 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
1064
1065 // Add a cell to record information about modified arguments.
1066 int arg_size = method->size_of_parameters();
1067 object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1068
1069 // Reserve room for an area of the MDO dedicated to profiling of
1070 // parameters
1071 int args_cell = ParametersTypeData::compute_cell_count(method());
1072 if (args_cell > 0) {
1073 object_size += DataLayout::compute_size_in_bytes(args_cell);
1074 }
1075
1076 if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1077 int num_exception_handlers = method()->exception_table_length();
1078 object_size += num_exception_handlers * single_exception_handler_data_size();
1079 }
1080
1081 return object_size;
1082 }
1083
1084 // Compute the size of the MethodData* necessary to store
1085 // profiling information about a given method. Size is in words
1086 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1087 int byte_size = compute_allocation_size_in_bytes(method);
1088 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1089 return align_metadata_size(word_size);
1090 }
1091
1092 // Initialize an individual data segment. Returns the size of
1093 // the segment in bytes.
1094 int MethodData::initialize_data(BytecodeStream* stream,
1095 int data_index) {
1096 int cell_count = -1;
1097 u1 tag = DataLayout::no_tag;
1098 DataLayout* data_layout = data_layout_at(data_index);
1099 Bytecodes::Code c = stream->code();
1100 switch (c) {
1101 case Bytecodes::_checkcast:
1102 case Bytecodes::_instanceof:
1103 if (TypeProfileCasts) {
1104 cell_count = ReceiverTypeData::static_cell_count();
1105 tag = DataLayout::receiver_type_data_tag;
1106 } else {
1107 cell_count = BitData::static_cell_count();
1108 tag = DataLayout::bit_data_tag;
1109 }
1110 break;
1111 case Bytecodes::_aaload:
1112 cell_count = ArrayLoadData::static_cell_count();
1113 tag = DataLayout::array_load_data_tag;
1114 break;
1115 case Bytecodes::_aastore:
1116 cell_count = ArrayStoreData::static_cell_count();
1117 tag = DataLayout::array_store_data_tag;
1118 break;
1119 case Bytecodes::_invokespecial:
1120 case Bytecodes::_invokestatic: {
1121 int counter_data_cell_count = CounterData::static_cell_count();
1122 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1123 profile_return_for_invoke(stream->method(), stream->bci())) {
1124 cell_count = CallTypeData::compute_cell_count(stream);
1125 } else {
1126 cell_count = counter_data_cell_count;
1127 }
1128 if (cell_count > counter_data_cell_count) {
1129 tag = DataLayout::call_type_data_tag;
1130 } else {
1131 tag = DataLayout::counter_data_tag;
1132 }
1133 break;
1134 }
1135 case Bytecodes::_goto:
1136 case Bytecodes::_goto_w:
1137 case Bytecodes::_jsr:
1138 case Bytecodes::_jsr_w:
1139 cell_count = JumpData::static_cell_count();
1140 tag = DataLayout::jump_data_tag;
1141 break;
1142 case Bytecodes::_invokevirtual:
1143 case Bytecodes::_invokeinterface: {
1144 int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1145 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1146 profile_return_for_invoke(stream->method(), stream->bci())) {
1147 cell_count = VirtualCallTypeData::compute_cell_count(stream);
1148 } else {
1149 cell_count = virtual_call_data_cell_count;
1150 }
1151 if (cell_count > virtual_call_data_cell_count) {
1152 tag = DataLayout::virtual_call_type_data_tag;
1153 } else {
1154 tag = DataLayout::virtual_call_data_tag;
1155 }
1156 break;
1157 }
1158 case Bytecodes::_invokedynamic: {
1159 // %%% should make a type profile for any invokedynamic that takes a ref argument
1160 int counter_data_cell_count = CounterData::static_cell_count();
1161 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1162 profile_return_for_invoke(stream->method(), stream->bci())) {
1163 cell_count = CallTypeData::compute_cell_count(stream);
1164 } else {
1165 cell_count = counter_data_cell_count;
1166 }
1167 if (cell_count > counter_data_cell_count) {
1168 tag = DataLayout::call_type_data_tag;
1169 } else {
1170 tag = DataLayout::counter_data_tag;
1171 }
1172 break;
1173 }
1174 case Bytecodes::_ret:
1175 cell_count = RetData::static_cell_count();
1176 tag = DataLayout::ret_data_tag;
1177 break;
1178 case Bytecodes::_ifeq:
1179 case Bytecodes::_ifne:
1180 case Bytecodes::_iflt:
1181 case Bytecodes::_ifge:
1182 case Bytecodes::_ifgt:
1183 case Bytecodes::_ifle:
1184 case Bytecodes::_if_icmpeq:
1185 case Bytecodes::_if_icmpne:
1186 case Bytecodes::_if_icmplt:
1187 case Bytecodes::_if_icmpge:
1188 case Bytecodes::_if_icmpgt:
1189 case Bytecodes::_if_icmple:
1190 case Bytecodes::_ifnull:
1191 case Bytecodes::_ifnonnull:
1192 cell_count = BranchData::static_cell_count();
1193 tag = DataLayout::branch_data_tag;
1194 break;
1195 case Bytecodes::_if_acmpeq:
1196 case Bytecodes::_if_acmpne:
1197 cell_count = ACmpData::static_cell_count();
1198 tag = DataLayout::acmp_data_tag;
1199 break;
1200 case Bytecodes::_lookupswitch:
1201 case Bytecodes::_tableswitch:
1202 cell_count = MultiBranchData::compute_cell_count(stream);
1203 tag = DataLayout::multi_branch_data_tag;
1204 break;
1205 default:
1206 break;
1207 }
1208 assert(tag == DataLayout::multi_branch_data_tag ||
1209 ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1210 (tag == DataLayout::call_type_data_tag ||
1211 tag == DataLayout::counter_data_tag ||
1212 tag == DataLayout::virtual_call_type_data_tag ||
1213 tag == DataLayout::virtual_call_data_tag)) ||
1214 cell_count == bytecode_cell_count(c), "cell counts must agree");
1215 if (cell_count >= 0) {
1216 assert(tag != DataLayout::no_tag, "bad tag");
1217 assert(bytecode_has_profile(c), "agree w/ BHP");
1218 data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1219 return DataLayout::compute_size_in_bytes(cell_count);
1220 } else {
1221 assert(!bytecode_has_profile(c), "agree w/ !BHP");
1222 return 0;
1223 }
1224 }
1225
1226 // Get the data at an arbitrary (sort of) data index.
1227 ProfileData* MethodData::data_at(int data_index) const {
1228 if (out_of_bounds(data_index)) {
1229 return nullptr;
1230 }
1231 DataLayout* data_layout = data_layout_at(data_index);
1232 return data_layout->data_in();
1233 }
1234
1235 int DataLayout::cell_count() {
1236 switch (tag()) {
1237 case DataLayout::no_tag:
1238 default:
1239 ShouldNotReachHere();
1240 return 0;
1241 case DataLayout::bit_data_tag:
1242 return BitData::static_cell_count();
1243 case DataLayout::counter_data_tag:
1244 return CounterData::static_cell_count();
1245 case DataLayout::jump_data_tag:
1246 return JumpData::static_cell_count();
1247 case DataLayout::receiver_type_data_tag:
1248 return ReceiverTypeData::static_cell_count();
1249 case DataLayout::virtual_call_data_tag:
1250 return VirtualCallData::static_cell_count();
1251 case DataLayout::ret_data_tag:
1252 return RetData::static_cell_count();
1253 case DataLayout::branch_data_tag:
1254 return BranchData::static_cell_count();
1255 case DataLayout::multi_branch_data_tag:
1256 return ((new MultiBranchData(this))->cell_count());
1257 case DataLayout::arg_info_data_tag:
1258 return ((new ArgInfoData(this))->cell_count());
1259 case DataLayout::call_type_data_tag:
1260 return ((new CallTypeData(this))->cell_count());
1261 case DataLayout::virtual_call_type_data_tag:
1262 return ((new VirtualCallTypeData(this))->cell_count());
1263 case DataLayout::parameters_type_data_tag:
1264 return ((new ParametersTypeData(this))->cell_count());
1265 case DataLayout::speculative_trap_data_tag:
1266 return SpeculativeTrapData::static_cell_count();
1267 case DataLayout::array_store_data_tag:
1268 return ((new ArrayStoreData(this))->cell_count());
1269 case DataLayout::array_load_data_tag:
1270 return ((new ArrayLoadData(this))->cell_count());
1271 case DataLayout::acmp_data_tag:
1272 return ((new ACmpData(this))->cell_count());
1273 }
1274 }
1275 ProfileData* DataLayout::data_in() {
1276 switch (tag()) {
1277 case DataLayout::no_tag:
1278 default:
1279 ShouldNotReachHere();
1280 return nullptr;
1281 case DataLayout::bit_data_tag:
1282 return new BitData(this);
1283 case DataLayout::counter_data_tag:
1284 return new CounterData(this);
1285 case DataLayout::jump_data_tag:
1286 return new JumpData(this);
1287 case DataLayout::receiver_type_data_tag:
1288 return new ReceiverTypeData(this);
1289 case DataLayout::virtual_call_data_tag:
1290 return new VirtualCallData(this);
1291 case DataLayout::ret_data_tag:
1292 return new RetData(this);
1293 case DataLayout::branch_data_tag:
1294 return new BranchData(this);
1295 case DataLayout::multi_branch_data_tag:
1296 return new MultiBranchData(this);
1297 case DataLayout::arg_info_data_tag:
1298 return new ArgInfoData(this);
1299 case DataLayout::call_type_data_tag:
1300 return new CallTypeData(this);
1301 case DataLayout::virtual_call_type_data_tag:
1302 return new VirtualCallTypeData(this);
1303 case DataLayout::parameters_type_data_tag:
1304 return new ParametersTypeData(this);
1305 case DataLayout::speculative_trap_data_tag:
1306 return new SpeculativeTrapData(this);
1307 case DataLayout::array_store_data_tag:
1308 return new ArrayStoreData(this);
1309 case DataLayout::array_load_data_tag:
1310 return new ArrayLoadData(this);
1311 case DataLayout::acmp_data_tag:
1312 return new ACmpData(this);
1313 }
1314 }
1315
1316 // Iteration over data.
1317 ProfileData* MethodData::next_data(ProfileData* current) const {
1318 int current_index = dp_to_di(current->dp());
1319 int next_index = current_index + current->size_in_bytes();
1320 ProfileData* next = data_at(next_index);
1321 return next;
1322 }
1323
1324 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1325 int current_index = dp_to_di((address)current);
1326 int next_index = current_index + current->size_in_bytes();
1327 if (out_of_bounds(next_index)) {
1328 return nullptr;
1329 }
1330 DataLayout* next = data_layout_at(next_index);
1331 return next;
1332 }
1333
1334 // Give each of the data entries a chance to perform specific
1335 // data initialization.
1336 void MethodData::post_initialize(BytecodeStream* stream) {
1337 ResourceMark rm;
1338 ProfileData* data;
1339 for (data = first_data(); is_valid(data); data = next_data(data)) {
1340 stream->set_start(data->bci());
1341 stream->next();
1342 data->post_initialize(stream, this);
1343 }
1344 if (_parameters_type_data_di != no_parameters) {
1345 parameters_type_data()->post_initialize(nullptr, this);
1346 }
1347 }
1348
1349 // Initialize the MethodData* corresponding to a given method.
1350 MethodData::MethodData(const methodHandle& method)
1351 : _method(method()),
1352 // Holds Compile_lock
1353 _compiler_counters(),
1354 _parameters_type_data_di(parameters_uninitialized) {
1355 _extra_data_lock = nullptr;
1356 initialize();
1357 }
1358
1359 #if INCLUDE_CDS
1360 MethodData::MethodData() {
1361 // Used by cppVtables.cpp only
1362 assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
1363 }
1364 #endif
1365
1366 // Reinitialize the storage of an existing MDO at a safepoint. Doing it this way will ensure it's
1367 // not being accessed while the contents are being rewritten.
1368 class VM_ReinitializeMDO: public VM_Operation {
1369 private:
1370 MethodData* _mdo;
1371 public:
1372 VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1373 VMOp_Type type() const { return VMOp_ReinitializeMDO; }
1374 void doit() {
1375 // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1376 // over a safepoint. This means that we don't actually need to acquire the lock.
1377 _mdo->initialize();
1378 }
1379 bool allow_nested_vm_operations() const { return true; }
1380 };
1381
1382 void MethodData::reinitialize() {
1383 VM_ReinitializeMDO op(this);
1384 VMThread::execute(&op);
1385 }
1386
1387
1388 void MethodData::initialize() {
1389 Thread* thread = Thread::current();
1390 NoSafepointVerifier no_safepoint; // init function atomic wrt GC
1391 ResourceMark rm(thread);
1392
1393 init();
1394
1395 // Go through the bytecodes and allocate and initialize the
1396 // corresponding data cells.
1397 int data_size = 0;
1398 int empty_bc_count = 0; // number of bytecodes lacking data
1399 _data[0] = 0; // apparently not set below.
1400 BytecodeStream stream(methodHandle(thread, method()));
1401 Bytecodes::Code c;
1402 bool needs_speculative_traps = false;
1403 while ((c = stream.next()) >= 0) {
1404 int size_in_bytes = initialize_data(&stream, data_size);
1405 data_size += size_in_bytes;
1406 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1;
1407 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1408 }
1409 _data_size = data_size;
1410 int object_size = in_bytes(data_offset()) + data_size;
1411
1412 // Add some extra DataLayout cells (at least one) to track stray traps.
1413 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1414 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1415
1416 // Let's zero the space for the extra data
1417 if (extra_size > 0) {
1418 Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1419 }
1420
1421 // Add a cell to record information about modified arguments.
1422 // Set up _args_modified array after traps cells so that
1423 // the code for traps cells works.
1424 DataLayout *dp = data_layout_at(data_size + extra_size);
1425
1426 int arg_size = method()->size_of_parameters();
1427 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1428
1429 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1430 object_size += extra_size + arg_data_size;
1431
1432 int parms_cell = ParametersTypeData::compute_cell_count(method());
1433 // If we are profiling parameters, we reserved an area near the end
1434 // of the MDO after the slots for bytecodes (because there's no bci
1435 // for method entry so they don't fit with the framework for the
1436 // profiling of bytecodes). We store the offset within the MDO of
1437 // this area (or -1 if no parameter is profiled)
1438 int parm_data_size = 0;
1439 if (parms_cell > 0) {
1440 parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1441 object_size += parm_data_size;
1442 _parameters_type_data_di = data_size + extra_size + arg_data_size;
1443 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1444 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1445 } else {
1446 _parameters_type_data_di = no_parameters;
1447 }
1448
1449 _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1450 if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1451 int num_exception_handlers = method()->exception_table_length();
1452 object_size += num_exception_handlers * single_exception_handler_data_size();
1453 ExceptionTableElement* exception_handlers = method()->exception_table_start();
1454 for (int i = 0; i < num_exception_handlers; i++) {
1455 DataLayout *dp = exception_handler_data_at(i);
1456 dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1457 }
1458 }
1459
1460 // Set an initial hint. Don't use set_hint_di() because
1461 // first_di() may be out of bounds if data_size is 0.
1462 // In that situation, _hint_di is never used, but at
1463 // least well-defined.
1464 _hint_di = first_di();
1465
1466 post_initialize(&stream);
1467
1468 assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1469 set_size(object_size);
1470 }
1471
1472 void MethodData::init() {
1473 _compiler_counters = CompilerCounters(); // reset compiler counters
1474 _invocation_counter.init();
1475 _backedge_counter.init();
1476 _invocation_counter_start = 0;
1477 _backedge_counter_start = 0;
1478
1479 // Set per-method invoke- and backedge mask.
1480 double scale = 1.0;
1481 methodHandle mh(Thread::current(), _method);
1482 CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1483 _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1484 _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1485
1486 _tenure_traps = 0;
1487 _num_loops = 0;
1488 _num_blocks = 0;
1489 _would_profile = unknown;
1490
1491 #if INCLUDE_JVMCI
1492 _jvmci_ir_size = 0;
1493 _failed_speculations = nullptr;
1494 #endif
1495
1496 // Initialize escape flags.
1497 clear_escape_info();
1498 }
1499
1500 bool MethodData::is_mature() const {
1501 return CompilationPolicy::is_mature(const_cast<MethodData*>(this));
1502 }
1503
1504 // Translate a bci to its corresponding data index (di).
1505 address MethodData::bci_to_dp(int bci) {
1506 ResourceMark rm;
1507 DataLayout* data = data_layout_before(bci);
1508 DataLayout* prev = nullptr;
1509 for ( ; is_valid(data); data = next_data_layout(data)) {
1510 if (data->bci() >= bci) {
1511 if (data->bci() == bci) set_hint_di(dp_to_di((address)data));
1512 else if (prev != nullptr) set_hint_di(dp_to_di((address)prev));
1513 return (address)data;
1514 }
1515 prev = data;
1516 }
1517 return (address)limit_data_position();
1518 }
1519
1520 // Translate a bci to its corresponding data, or null.
1521 ProfileData* MethodData::bci_to_data(int bci) {
1522 check_extra_data_locked();
1523
1524 DataLayout* data = data_layout_before(bci);
1525 for ( ; is_valid(data); data = next_data_layout(data)) {
1526 if (data->bci() == bci) {
1527 set_hint_di(dp_to_di((address)data));
1528 return data->data_in();
1529 } else if (data->bci() > bci) {
1530 break;
1531 }
1532 }
1533 return bci_to_extra_data(bci, nullptr, false);
1534 }
1535
1536 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1537 assert(ProfileExceptionHandlers, "not profiling");
1538 for (int i = 0; i < num_exception_handler_data(); i++) {
1539 DataLayout* exception_handler_data = exception_handler_data_at(i);
1540 if (exception_handler_data->bci() == bci) {
1541 return exception_handler_data;
1542 }
1543 }
1544 return nullptr;
1545 }
1546
1547 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1548 DataLayout* data = exception_handler_bci_to_data_helper(bci);
1549 return data != nullptr ? new BitData(data) : nullptr;
1550 }
1551
1552 BitData MethodData::exception_handler_bci_to_data(int bci) {
1553 DataLayout* data = exception_handler_bci_to_data_helper(bci);
1554 assert(data != nullptr, "invalid bci");
1555 return BitData(data);
1556 }
1557
1558 DataLayout* MethodData::next_extra(DataLayout* dp) {
1559 int nb_cells = 0;
1560 switch(dp->tag()) {
1561 case DataLayout::bit_data_tag:
1562 case DataLayout::no_tag:
1563 nb_cells = BitData::static_cell_count();
1564 break;
1565 case DataLayout::speculative_trap_data_tag:
1566 nb_cells = SpeculativeTrapData::static_cell_count();
1567 break;
1568 default:
1569 fatal("unexpected tag %d", dp->tag());
1570 }
1571 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1572 }
1573
1574 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
1575 check_extra_data_locked();
1576
1577 DataLayout* end = args_data_limit();
1578
1579 for (;; dp = next_extra(dp)) {
1580 assert(dp < end, "moved past end of extra data");
1581 // No need for "AtomicAccess::load_acquire" ops,
1582 // since the data structure is monotonic.
1583 switch(dp->tag()) {
1584 case DataLayout::no_tag:
1585 return nullptr;
1586 case DataLayout::arg_info_data_tag:
1587 dp = end;
1588 return nullptr; // ArgInfoData is at the end of extra data section.
1589 case DataLayout::bit_data_tag:
1590 if (m == nullptr && dp->bci() == bci) {
1591 return new BitData(dp);
1592 }
1593 break;
1594 case DataLayout::speculative_trap_data_tag:
1595 if (m != nullptr) {
1596 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1597 if (dp->bci() == bci) {
1598 assert(data->method() != nullptr, "method must be set");
1599 if (data->method() == m) {
1600 return data;
1601 }
1602 }
1603 }
1604 break;
1605 default:
1606 fatal("unexpected tag %d", dp->tag());
1607 }
1608 }
1609 return nullptr;
1610 }
1611
1612
1613 // Translate a bci to its corresponding extra data, or null.
1614 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1615 check_extra_data_locked();
1616
1617 // This code assumes an entry for a SpeculativeTrapData is 2 cells
1618 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1619 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1620 "code needs to be adjusted");
1621
1622 // Do not create one of these if method has been redefined.
1623 if (m != nullptr && m->is_old()) {
1624 return nullptr;
1625 }
1626
1627 DataLayout* dp = extra_data_base();
1628 DataLayout* end = args_data_limit();
1629
1630 // Find if already exists
1631 ProfileData* result = bci_to_extra_data_find(bci, m, dp);
1632 if (result != nullptr || dp >= end) {
1633 return result;
1634 }
1635
1636 if (create_if_missing) {
1637 // Not found -> Allocate
1638 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1639 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1640 u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1641 // SpeculativeTrapData is 2 slots. Make sure we have room.
1642 if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1643 return nullptr;
1644 }
1645 DataLayout temp;
1646 temp.initialize(tag, checked_cast<u2>(bci), 0);
1647
1648 dp->set_header(temp.header());
1649 assert(dp->tag() == tag, "sane");
1650 assert(dp->bci() == bci, "no concurrent allocation");
1651 if (tag == DataLayout::bit_data_tag) {
1652 return new BitData(dp);
1653 } else {
1654 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1655 data->set_method(m);
1656 return data;
1657 }
1658 }
1659 return nullptr;
1660 }
1661
1662 ArgInfoData *MethodData::arg_info() {
1663 DataLayout* dp = extra_data_base();
1664 DataLayout* end = args_data_limit();
1665 for (; dp < end; dp = next_extra(dp)) {
1666 if (dp->tag() == DataLayout::arg_info_data_tag)
1667 return new ArgInfoData(dp);
1668 }
1669 return nullptr;
1670 }
1671
1672 // Printing
1673
1674 void MethodData::print_on(outputStream* st) const {
1675 assert(is_methodData(), "should be method data");
1676 st->print("method data for ");
1677 method()->print_value_on(st);
1678 st->cr();
1679 print_data_on(st);
1680 }
1681
1682 void MethodData::print_value_on(outputStream* st) const {
1683 assert(is_methodData(), "should be method data");
1684 st->print("method data for ");
1685 method()->print_value_on(st);
1686 }
1687
1688 void MethodData::print_data_on(outputStream* st) const {
1689 Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
1690 ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
1691 Mutex::_no_safepoint_check_flag);
1692 ResourceMark rm;
1693 ProfileData* data = first_data();
1694 if (_parameters_type_data_di != no_parameters) {
1695 parameters_type_data()->print_data_on(st);
1696 }
1697 for ( ; is_valid(data); data = next_data(data)) {
1698 st->print("%d", dp_to_di(data->dp()));
1699 st->fill_to(6);
1700 data->print_data_on(st, this);
1701 }
1702
1703 st->print_cr("--- Extra data:");
1704 DataLayout* dp = extra_data_base();
1705 DataLayout* end = args_data_limit();
1706 for (;; dp = next_extra(dp)) {
1707 assert(dp < end, "moved past end of extra data");
1708 // No need for "AtomicAccess::load_acquire" ops,
1709 // since the data structure is monotonic.
1710 switch(dp->tag()) {
1711 case DataLayout::no_tag:
1712 continue;
1713 case DataLayout::bit_data_tag:
1714 data = new BitData(dp);
1715 break;
1716 case DataLayout::speculative_trap_data_tag:
1717 data = new SpeculativeTrapData(dp);
1718 break;
1719 case DataLayout::arg_info_data_tag:
1720 data = new ArgInfoData(dp);
1721 dp = end; // ArgInfoData is at the end of extra data section.
1722 break;
1723 default:
1724 fatal("unexpected tag %d", dp->tag());
1725 }
1726 st->print("%d", dp_to_di(data->dp()));
1727 st->fill_to(6);
1728 data->print_data_on(st);
1729 if (dp >= end) return;
1730 }
1731 }
1732
1733 // Verification
1734
1735 void MethodData::verify_on(outputStream* st) {
1736 guarantee(is_methodData(), "object must be method data");
1737 // guarantee(m->is_perm(), "should be in permspace");
1738 this->verify_data_on(st);
1739 }
1740
1741 void MethodData::verify_data_on(outputStream* st) {
1742 NEEDS_CLEANUP;
1743 // not yet implemented.
1744 }
1745
1746 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1747 if (m->is_compiled_lambda_form()) {
1748 return true;
1749 }
1750
1751 Bytecode_invoke inv(m , bci);
1752 return inv.is_invokedynamic() || inv.is_invokehandle();
1753 }
1754
1755 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1756 Bytecode_invoke inv(m , bci);
1757 if (inv.is_invokevirtual()) {
1758 Symbol* klass = inv.klass();
1759 if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1760 klass == vmSymbols::sun_misc_Unsafe() ||
1761 klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1762 Symbol* name = inv.name();
1763 if (name->starts_with("get") || name->starts_with("put")) {
1764 return true;
1765 }
1766 }
1767 }
1768 return false;
1769 }
1770
1771 int MethodData::profile_arguments_flag() {
1772 return TypeProfileLevel % 10;
1773 }
1774
1775 bool MethodData::profile_arguments() {
1776 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1777 }
1778
1779 bool MethodData::profile_arguments_jsr292_only() {
1780 return profile_arguments_flag() == type_profile_jsr292;
1781 }
1782
1783 bool MethodData::profile_all_arguments() {
1784 return profile_arguments_flag() == type_profile_all;
1785 }
1786
1787 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1788 if (!profile_arguments()) {
1789 return false;
1790 }
1791
1792 if (profile_all_arguments()) {
1793 return true;
1794 }
1795
1796 if (profile_unsafe(m, bci)) {
1797 return true;
1798 }
1799
1800 assert(profile_arguments_jsr292_only(), "inconsistent");
1801 return profile_jsr292(m, bci);
1802 }
1803
1804 int MethodData::profile_return_flag() {
1805 return (TypeProfileLevel % 100) / 10;
1806 }
1807
1808 bool MethodData::profile_return() {
1809 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1810 }
1811
1812 bool MethodData::profile_return_jsr292_only() {
1813 return profile_return_flag() == type_profile_jsr292;
1814 }
1815
1816 bool MethodData::profile_all_return() {
1817 return profile_return_flag() == type_profile_all;
1818 }
1819
1820 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1821 if (!profile_return()) {
1822 return false;
1823 }
1824
1825 if (profile_all_return()) {
1826 return true;
1827 }
1828
1829 assert(profile_return_jsr292_only(), "inconsistent");
1830 return profile_jsr292(m, bci);
1831 }
1832
1833 int MethodData::profile_parameters_flag() {
1834 return TypeProfileLevel / 100;
1835 }
1836
1837 bool MethodData::profile_parameters() {
1838 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1839 }
1840
1841 bool MethodData::profile_parameters_jsr292_only() {
1842 return profile_parameters_flag() == type_profile_jsr292;
1843 }
1844
1845 bool MethodData::profile_all_parameters() {
1846 return profile_parameters_flag() == type_profile_all;
1847 }
1848
1849 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1850 if (!profile_parameters()) {
1851 return false;
1852 }
1853
1854 if (profile_all_parameters()) {
1855 return true;
1856 }
1857
1858 assert(profile_parameters_jsr292_only(), "inconsistent");
1859 return m->is_compiled_lambda_form();
1860 }
1861
1862 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1863 log_trace(aot, training)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
1864 it->push(&_method);
1865 if (_parameters_type_data_di != no_parameters) {
1866 parameters_type_data()->metaspace_pointers_do(it);
1867 }
1868 for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
1869 data->metaspace_pointers_do(it);
1870 }
1871 for (DataLayout* dp = extra_data_base();
1872 dp < extra_data_limit();
1873 dp = MethodData::next_extra(dp)) {
1874 if (dp->tag() == DataLayout::speculative_trap_data_tag) {
1875 ResourceMark rm;
1876 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1877 data->metaspace_pointers_do(it);
1878 } else if (dp->tag() == DataLayout::no_tag ||
1879 dp->tag() == DataLayout::arg_info_data_tag) {
1880 break;
1881 }
1882 }
1883 }
1884
1885 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1886 check_extra_data_locked();
1887
1888 if (shift == 0) {
1889 return;
1890 }
1891 if (!reset) {
1892 // Move all cells of trap entry at dp left by "shift" cells
1893 intptr_t* start = (intptr_t*)dp;
1894 intptr_t* end = (intptr_t*)next_extra(dp);
1895 for (intptr_t* ptr = start; ptr < end; ptr++) {
1896 *(ptr-shift) = *ptr;
1897 }
1898 } else {
1899 // Reset "shift" cells stopping at dp
1900 intptr_t* start = ((intptr_t*)dp) - shift;
1901 intptr_t* end = (intptr_t*)dp;
1902 for (intptr_t* ptr = start; ptr < end; ptr++) {
1903 *ptr = 0;
1904 }
1905 }
1906 }
1907
1908 // Check for entries that reference an unloaded method
1909 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1910 bool _always_clean;
1911 public:
1912 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1913 bool is_live(Method* m) {
1914 if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
1915 return true; // TODO: treat as unloaded instead?
1916 }
1917 return !(_always_clean) && m->method_holder()->is_loader_alive();
1918 }
1919 };
1920
1921 // Check for entries that reference a redefined method
1922 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1923 public:
1924 CleanExtraDataMethodClosure() {}
1925 bool is_live(Method* m) { return !m->is_old(); }
1926 };
1927
1928 Mutex* MethodData::extra_data_lock() {
1929 Mutex* lock = AtomicAccess::load_acquire(&_extra_data_lock);
1930 if (lock == nullptr) {
1931 // This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint
1932 lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock");
1933 Mutex* old = AtomicAccess::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
1934 if (old != nullptr) {
1935 // Another thread created the lock before us. Use that lock instead.
1936 delete lock;
1937 return old;
1938 }
1939 }
1940 return lock;
1941 }
1942
1943 // Remove SpeculativeTrapData entries that reference an unloaded or
1944 // redefined method
1945 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1946 check_extra_data_locked();
1947
1948 DataLayout* dp = extra_data_base();
1949 DataLayout* end = args_data_limit();
1950
1951 int shift = 0;
1952 for (; dp < end; dp = next_extra(dp)) {
1953 switch(dp->tag()) {
1954 case DataLayout::speculative_trap_data_tag: {
1955 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1956 Method* m = data->method();
1957 assert(m != nullptr, "should have a method");
1958 if (is_excluded(m->method_holder()) || !cl->is_live(m)) {
1959 // "shift" accumulates the number of cells for dead
1960 // SpeculativeTrapData entries that have been seen so
1961 // far. Following entries must be shifted left by that many
1962 // cells to remove the dead SpeculativeTrapData entries.
1963 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1964 } else {
1965 // Shift this entry left if it follows dead
1966 // SpeculativeTrapData entries
1967 clean_extra_data_helper(dp, shift);
1968 }
1969 break;
1970 }
1971 case DataLayout::bit_data_tag:
1972 // Shift this entry left if it follows dead SpeculativeTrapData
1973 // entries
1974 clean_extra_data_helper(dp, shift);
1975 continue;
1976 case DataLayout::no_tag:
1977 case DataLayout::arg_info_data_tag:
1978 // We are at end of the live trap entries. The previous "shift"
1979 // cells contain entries that are either dead or were shifted
1980 // left. They need to be reset to no_tag
1981 clean_extra_data_helper(dp, shift, true);
1982 return;
1983 default:
1984 fatal("unexpected tag %d", dp->tag());
1985 }
1986 }
1987 }
1988
1989 // Verify there's no unloaded or redefined method referenced by a
1990 // SpeculativeTrapData entry
1991 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1992 check_extra_data_locked();
1993
1994 #ifdef ASSERT
1995 DataLayout* dp = extra_data_base();
1996 DataLayout* end = args_data_limit();
1997
1998 for (; dp < end; dp = next_extra(dp)) {
1999 switch(dp->tag()) {
2000 case DataLayout::speculative_trap_data_tag: {
2001 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
2002 Method* m = data->method();
2003 assert(m != nullptr && cl->is_live(m), "Method should exist");
2004 break;
2005 }
2006 case DataLayout::bit_data_tag:
2007 continue;
2008 case DataLayout::no_tag:
2009 case DataLayout::arg_info_data_tag:
2010 return;
2011 default:
2012 fatal("unexpected tag %d", dp->tag());
2013 }
2014 }
2015 #endif
2016 }
2017
2018 void MethodData::clean_method_data(bool always_clean) {
2019 ResourceMark rm;
2020 for (ProfileData* data = first_data();
2021 is_valid(data);
2022 data = next_data(data)) {
2023 data->clean_weak_klass_links(always_clean);
2024 }
2025 ParametersTypeData* parameters = parameters_type_data();
2026 if (parameters != nullptr) {
2027 parameters->clean_weak_klass_links(always_clean);
2028 }
2029
2030 CleanExtraDataKlassClosure cl(always_clean);
2031
2032 // Lock to modify extra data, and prevent Safepoint from breaking the lock
2033 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
2034
2035 clean_extra_data(&cl);
2036 verify_extra_data_clean(&cl);
2037 }
2038
2039 // This is called during redefinition to clean all "old" redefined
2040 // methods out of MethodData for all methods.
2041 void MethodData::clean_weak_method_links() {
2042 ResourceMark rm;
2043 CleanExtraDataMethodClosure cl;
2044
2045 // Lock to modify extra data, and prevent Safepoint from breaking the lock
2046 MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
2047
2048 clean_extra_data(&cl);
2049 verify_extra_data_clean(&cl);
2050 }
2051
2052 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
2053 release_C_heap_structures();
2054 }
2055
2056 void MethodData::release_C_heap_structures() {
2057 #if INCLUDE_JVMCI
2058 FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
2059 #endif
2060 }
2061
2062 #if INCLUDE_CDS
2063 void MethodData::remove_unshareable_info() {
2064 _extra_data_lock = nullptr;
2065 #if INCLUDE_JVMCI
2066 _failed_speculations = nullptr;
2067 #endif
2068 }
2069
2070 void MethodData::restore_unshareable_info(TRAPS) {
2071 //_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
2072 }
2073 #endif // INCLUDE_CDS
2074
2075 #ifdef ASSERT
2076 void MethodData::check_extra_data_locked() const {
2077 // Cast const away, just to be able to verify the lock
2078 // Usually we only want non-const accesses on the lock,
2079 // so this here is an exception.
2080 MethodData* self = (MethodData*)this;
2081 assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
2082 assert(!Thread::current()->is_Java_thread() ||
2083 JavaThread::current()->is_in_no_safepoint_scope(),
2084 "JavaThread must have NoSafepointVerifier inside lock scope");
2085 }
2086 #endif