12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_METHODDATA_HPP
26 #define SHARE_OOPS_METHODDATA_HPP
27
28 #include "interpreter/bytecodes.hpp"
29 #include "interpreter/invocationCounter.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "oops/oop.hpp"
33 #include "runtime/atomic.hpp"
34 #include "runtime/deoptimization.hpp"
35 #include "runtime/mutex.hpp"
36 #include "utilities/align.hpp"
37 #include "utilities/copy.hpp"
38
39 class BytecodeStream;
40
41 // The MethodData object collects counts and other profile information
42 // during zeroth-tier (interpreter) and third-tier (C1 with full profiling)
43 // execution.
44 //
45 // The profile is used later by compilation heuristics. Some heuristics
46 // enable use of aggressive (or "heroic") optimizations. An aggressive
47 // optimization often has a down-side, a corner case that it handles
48 // poorly, but which is thought to be rare. The profile provides
49 // evidence of this rarity for a given method or even BCI. It allows
50 // the compiler to back out of the optimization at places where it
51 // has historically been a poor choice. Other heuristics try to use
52 // specific information gathered about types observed at a given site.
186 return Atomic::load_acquire(&_header._struct._flags);
187 }
188
189 u2 bci() const {
190 return _header._struct._bci;
191 }
192
193 void set_header(u8 value) {
194 _header._bits = value;
195 }
196 u8 header() {
197 return _header._bits;
198 }
199 void set_cell_at(int index, intptr_t value) {
200 _cells[index] = value;
201 }
202 void release_set_cell_at(int index, intptr_t value);
203 intptr_t cell_at(int index) const {
204 return _cells[index];
205 }
206
207 bool set_flag_at(u1 flag_number) {
208 const u1 bit = 1 << flag_number;
209 u1 compare_value;
210 do {
211 compare_value = _header._struct._flags;
212 if ((compare_value & bit) == bit) {
213 // already set.
214 return false;
215 }
216 } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, static_cast<u1>(compare_value | bit)));
217 return true;
218 }
219
220 bool clear_flag_at(u1 flag_number) {
221 const u1 bit = 1 << flag_number;
222 u1 compare_value;
223 u1 exchange_value;
224 do {
225 compare_value = _header._struct._flags;
329 ShouldNotReachHere();
330 return -1;
331 }
332
333 // Return the size of this data.
334 int size_in_bytes() {
335 return DataLayout::compute_size_in_bytes(cell_count());
336 }
337
338 protected:
339 // Low-level accessors for underlying data
340 void set_intptr_at(int index, intptr_t value) {
341 assert(0 <= index && index < cell_count(), "oob");
342 data()->set_cell_at(index, value);
343 }
344 void release_set_intptr_at(int index, intptr_t value);
345 intptr_t intptr_at(int index) const {
346 assert(0 <= index && index < cell_count(), "oob");
347 return data()->cell_at(index);
348 }
349 void set_uint_at(int index, uint value) {
350 set_intptr_at(index, (intptr_t) value);
351 }
352 void release_set_uint_at(int index, uint value);
353 uint uint_at(int index) const {
354 return (uint)intptr_at(index);
355 }
356 void set_int_at(int index, int value) {
357 set_intptr_at(index, (intptr_t) value);
358 }
359 void release_set_int_at(int index, int value);
360 int int_at(int index) const {
361 return (int)intptr_at(index);
362 }
363 int int_at_unchecked(int index) const {
364 return (int)data()->cell_at(index);
365 }
366 void set_oop_at(int index, oop value) {
367 set_intptr_at(index, cast_from_oop<intptr_t>(value));
368 }
369 oop oop_at(int index) const {
370 return cast_to_oop(intptr_at(index));
371 }
372
373 void set_flag_at(u1 flag_number) {
374 data()->set_flag_at(flag_number);
375 }
376 bool flag_at(u1 flag_number) const {
377 return data()->flag_at(flag_number);
378 }
379
380 // two convenient imports for use by subclasses:
381 static ByteSize cell_offset(int index) {
382 return DataLayout::cell_offset(index);
383 }
384 static u1 flag_number_to_constant(u1 flag_number) {
385 return DataLayout::flag_number_to_constant(flag_number);
386 }
387
388 ProfileData(DataLayout* data) {
389 _data = data;
390 }
391
472 VirtualCallTypeData* as_VirtualCallTypeData() const {
473 assert(is_VirtualCallTypeData(), "wrong type");
474 return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr;
475 }
476 ParametersTypeData* as_ParametersTypeData() const {
477 assert(is_ParametersTypeData(), "wrong type");
478 return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr;
479 }
480 SpeculativeTrapData* as_SpeculativeTrapData() const {
481 assert(is_SpeculativeTrapData(), "wrong type");
482 return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr;
483 }
484
485
486 // Subclass specific initialization
487 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
488
489 // GC support
490 virtual void clean_weak_klass_links(bool always_clean) {}
491
492 // CI translation: ProfileData can represent both MethodDataOop data
493 // as well as CIMethodData data. This function is provided for translating
494 // an oop in a ProfileData to the ci equivalent. Generally speaking,
495 // most ProfileData don't require any translation, so we provide the null
496 // translation here, and the required translators are in the ci subclasses.
497 virtual void translate_from(const ProfileData* data) {}
498
499 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const {
500 ShouldNotReachHere();
501 }
502
503 void print_data_on(outputStream* st, const MethodData* md) const;
504
505 void print_shared(outputStream* st, const char* name, const char* extra) const;
506 void tab(outputStream* st, bool first = false) const;
507 };
508
509 // BitData
510 //
511 // A BitData holds a flag or two in its header.
512 class BitData : public ProfileData {
837 }
838
839 // stack slot for entry i
840 uint stack_slot(int i) const {
841 assert(i >= 0 && i < _number_of_entries, "oob");
842 return _pd->uint_at(stack_slot_offset(i));
843 }
844
845 // set stack slot for entry i
846 void set_stack_slot(int i, uint num) {
847 assert(i >= 0 && i < _number_of_entries, "oob");
848 _pd->set_uint_at(stack_slot_offset(i), num);
849 }
850
851 // type for entry i
852 intptr_t type(int i) const {
853 assert(i >= 0 && i < _number_of_entries, "oob");
854 return _pd->intptr_at(type_offset_in_cells(i));
855 }
856
857 // set type for entry i
858 void set_type(int i, intptr_t k) {
859 assert(i >= 0 && i < _number_of_entries, "oob");
860 _pd->set_intptr_at(type_offset_in_cells(i), k);
861 }
862
863 static ByteSize per_arg_size() {
864 return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
865 }
866
867 static int per_arg_count() {
868 return per_arg_cell_count;
869 }
870
871 ByteSize type_offset(int i) const {
872 return DataLayout::cell_offset(type_offset_in_cells(i));
873 }
874
875 // GC support
876 void clean_weak_klass_links(bool always_clean);
877
878 void print_data_on(outputStream* st) const;
879 };
880
881 // Type entry used for return from a call. A single cell to record the
882 // type.
883 class ReturnTypeEntry : public TypeEntries {
884
885 private:
886 enum {
887 cell_count = 1
888 };
889
890 public:
891 ReturnTypeEntry(int base_off)
892 : TypeEntries(base_off) {}
893
894 void post_initialize() {
895 set_type(type_none());
896 }
897
898 intptr_t type() const {
899 return _pd->intptr_at(_base_off);
900 }
901
902 void set_type(intptr_t k) {
903 _pd->set_intptr_at(_base_off, k);
904 }
905
906 static int static_cell_count() {
907 return cell_count;
908 }
909
910 static ByteSize size() {
911 return in_ByteSize(cell_count * DataLayout::cell_size);
912 }
913
914 ByteSize type_offset() {
915 return DataLayout::cell_offset(_base_off);
916 }
917
918 // GC support
919 void clean_weak_klass_links(bool always_clean);
920
921 void print_data_on(outputStream* st) const;
922 };
923
924 // Entries to collect type information at a call: contains arguments
925 // (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
926 // number of cells. Because the number of cells for the return type is
927 // smaller than the number of cells for the type of an arguments, the
928 // number of cells is used to tell how many arguments are profiled and
929 // whether a return value is profiled. See has_arguments() and
930 // has_return().
931 class TypeEntriesAtCall {
932 private:
933 static int stack_slot_local_offset(int i) {
934 return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
935 }
936
937 static int argument_type_local_offset(int i) {
938 return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
939 }
940
1092 }
1093
1094 ByteSize argument_type_offset(int i) {
1095 return _args.type_offset(i);
1096 }
1097
1098 ByteSize return_type_offset() {
1099 return _ret.type_offset();
1100 }
1101
1102 // GC support
1103 virtual void clean_weak_klass_links(bool always_clean) {
1104 if (has_arguments()) {
1105 _args.clean_weak_klass_links(always_clean);
1106 }
1107 if (has_return()) {
1108 _ret.clean_weak_klass_links(always_clean);
1109 }
1110 }
1111
1112 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1113 };
1114
1115 // ReceiverTypeData
1116 //
1117 // A ReceiverTypeData is used to access profiling information about a
1118 // dynamic type check. It consists of a series of (Klass*, count)
1119 // pairs which are used to store a type profile for the receiver of
1120 // the check, the associated count is incremented every time the type
1121 // is seen. A per ReceiverTypeData counter is incremented on type
1122 // overflow (when there's no more room for a not yet profiled Klass*).
1123 //
1124 class ReceiverTypeData : public CounterData {
1125 friend class VMStructs;
1126 friend class JVMCIVMStructs;
1127 protected:
1128 enum {
1129 receiver0_offset = counter_cell_count,
1130 count0_offset,
1131 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1202 //
1203 set_count(0);
1204 set_receiver(row, nullptr);
1205 set_receiver_count(row, 0);
1206 }
1207
1208 // Code generation support
1209 static ByteSize receiver_offset(uint row) {
1210 return cell_offset(receiver_cell_index(row));
1211 }
1212 static ByteSize receiver_count_offset(uint row) {
1213 return cell_offset(receiver_count_cell_index(row));
1214 }
1215 static ByteSize receiver_type_data_size() {
1216 return cell_offset(static_cell_count());
1217 }
1218
1219 // GC support
1220 virtual void clean_weak_klass_links(bool always_clean);
1221
1222 void print_receiver_data_on(outputStream* st) const;
1223 void print_data_on(outputStream* st, const char* extra = nullptr) const;
1224 };
1225
1226 // VirtualCallData
1227 //
1228 // A VirtualCallData is used to access profiling information about a
1229 // virtual call. For now, it has nothing more than a ReceiverTypeData.
1230 class VirtualCallData : public ReceiverTypeData {
1231 public:
1232 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1233 assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1234 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1235 }
1236
1237 virtual bool is_VirtualCallData() const { return true; }
1238
1239 static int static_cell_count() {
1240 // At this point we could add more profile state, e.g., for arguments.
1241 // But for now it's the same size as the base record type.
1367
1368 ByteSize argument_type_offset(int i) {
1369 return _args.type_offset(i);
1370 }
1371
1372 ByteSize return_type_offset() {
1373 return _ret.type_offset();
1374 }
1375
1376 // GC support
1377 virtual void clean_weak_klass_links(bool always_clean) {
1378 ReceiverTypeData::clean_weak_klass_links(always_clean);
1379 if (has_arguments()) {
1380 _args.clean_weak_klass_links(always_clean);
1381 }
1382 if (has_return()) {
1383 _ret.clean_weak_klass_links(always_clean);
1384 }
1385 }
1386
1387 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1388 };
1389
1390 // RetData
1391 //
1392 // A RetData is used to access profiling information for a ret bytecode.
1393 // It is composed of a count of the number of times that the ret has
1394 // been executed, followed by a series of triples of the form
1395 // (bci, count, di) which count the number of times that some bci was the
1396 // target of the ret and cache a corresponding data displacement.
1397 class RetData : public CounterData {
1398 protected:
1399 enum {
1400 bci0_offset = counter_cell_count,
1401 count0_offset,
1402 displacement0_offset,
1403 ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1404 };
1405
1406 void set_bci(uint row, int bci) {
1550 // and an array start.
1551 class ArrayData : public ProfileData {
1552 friend class VMStructs;
1553 friend class JVMCIVMStructs;
1554 protected:
1555 friend class DataLayout;
1556
1557 enum {
1558 array_len_off_set,
1559 array_start_off_set
1560 };
1561
1562 uint array_uint_at(int index) const {
1563 int aindex = index + array_start_off_set;
1564 return uint_at(aindex);
1565 }
1566 int array_int_at(int index) const {
1567 int aindex = index + array_start_off_set;
1568 return int_at(aindex);
1569 }
1570 oop array_oop_at(int index) const {
1571 int aindex = index + array_start_off_set;
1572 return oop_at(aindex);
1573 }
1574 void array_set_int_at(int index, int value) {
1575 int aindex = index + array_start_off_set;
1576 set_int_at(aindex, value);
1577 }
1578
1579 // Code generation support for subclasses.
1580 static ByteSize array_element_offset(int index) {
1581 return cell_offset(array_start_off_set + index);
1582 }
1583
1584 public:
1585 ArrayData(DataLayout* layout) : ProfileData(layout) {}
1586
1587 virtual bool is_ArrayData() const { return true; }
1588
1589 static int static_cell_count() {
1590 return -1;
1591 }
1592
1593 int array_len() const {
1766
1767 int number_of_parameters() const {
1768 return array_len() / TypeStackSlotEntries::per_arg_count();
1769 }
1770
1771 const TypeStackSlotEntries* parameters() const { return &_parameters; }
1772
1773 uint stack_slot(int i) const {
1774 return _parameters.stack_slot(i);
1775 }
1776
1777 void set_type(int i, Klass* k) {
1778 intptr_t current = _parameters.type(i);
1779 _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1780 }
1781
1782 virtual void clean_weak_klass_links(bool always_clean) {
1783 _parameters.clean_weak_klass_links(always_clean);
1784 }
1785
1786 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1787
1788 static ByteSize stack_slot_offset(int i) {
1789 return cell_offset(stack_slot_local_offset(i));
1790 }
1791
1792 static ByteSize type_offset(int i) {
1793 return cell_offset(type_local_offset(i));
1794 }
1795 };
1796
1797 // SpeculativeTrapData
1798 //
1799 // A SpeculativeTrapData is used to record traps due to type
1800 // speculation. It records the root of the compilation: that type
1801 // speculation is wrong in the context of one compilation (for
1802 // method1) doesn't mean it's wrong in the context of another one (for
1803 // method2). Type speculation could have more/different data in the
1804 // context of the compilation of method2 and it's worthwhile to try an
1805 // optimization that failed for compilation of method1 in the context
1836 }
1837
1838 virtual int cell_count() const {
1839 return static_cell_count();
1840 }
1841
1842 // Direct accessor
1843 Method* method() const {
1844 return (Method*)intptr_at(speculative_trap_method);
1845 }
1846
1847 void set_method(Method* m) {
1848 assert(!m->is_old(), "cannot add old methods");
1849 set_intptr_at(speculative_trap_method, (intptr_t)m);
1850 }
1851
1852 static ByteSize method_offset() {
1853 return cell_offset(speculative_trap_method);
1854 }
1855
1856 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1857 };
1858
1859 // MethodData*
1860 //
1861 // A MethodData* holds information which has been collected about
1862 // a method. Its layout looks like this:
1863 //
1864 // -----------------------------
1865 // | header |
1866 // | klass |
1867 // -----------------------------
1868 // | method |
1869 // | size of the MethodData* |
1870 // -----------------------------
1871 // | Data entries... |
1872 // | (variable size) |
1873 // | |
1874 // . .
1875 // . .
1945
1946 class MethodData : public Metadata {
1947 friend class VMStructs;
1948 friend class JVMCIVMStructs;
1949 friend class ProfileData;
1950 friend class TypeEntriesAtCall;
1951 friend class ciMethodData;
1952
1953 // If you add a new field that points to any metaspace object, you
1954 // must add this field to MethodData::metaspace_pointers_do().
1955
1956 // Back pointer to the Method*
1957 Method* _method;
1958
1959 // Size of this oop in bytes
1960 int _size;
1961
1962 // Cached hint for bci_to_dp and bci_to_data
1963 int _hint_di;
1964
1965 Mutex _extra_data_lock;
1966
1967 MethodData(const methodHandle& method);
1968 public:
1969 static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
1970
1971 virtual bool is_methodData() const { return true; }
1972 void initialize();
1973
1974 // Whole-method sticky bits and flags
1975 enum {
1976 _trap_hist_limit = Deoptimization::Reason_TRAP_HISTORY_LENGTH,
1977 _trap_hist_mask = max_jubyte,
1978 _extra_data_count = 4 // extra DataLayout headers, for trap history
1979 }; // Public flag values
1980
1981 // Compiler-related counters.
1982 class CompilerCounters {
1983 friend class VMStructs;
1984 friend class JVMCIVMStructs;
1985
1986 uint _nof_decompiles; // count of all nmethod removals
1987 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits
1988 uint _nof_overflow_traps; // trap count, excluding _trap_hist
1989 union {
1990 intptr_t _align;
1991 // JVMCI separates trap history for OSR compilations from normal compilations
1992 u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit];
1993 } _trap_hist;
1994
1995 public:
1996 CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0) {
1997 #ifndef ZERO
1998 // Some Zero platforms do not have expected alignment, and do not use
1999 // this code. static_assert would still fire and fail for them.
2000 static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align");
2001 #endif
2002 uint size_in_words = sizeof(_trap_hist) / HeapWordSize;
2003 Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words);
2004 }
2005
2006 // Return (uint)-1 for overflow.
2007 uint trap_count(int reason) const {
2008 assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2009 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2010 }
2011
2012 uint inc_trap_count(int reason) {
2013 // Count another trap, anywhere in this method.
2014 assert(reason >= 0, "must be single trap");
2015 assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2016 uint cnt1 = 1 + _trap_hist._array[reason];
2247 return _backedge_counter_start;
2248 }
2249
2250 int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2251 int backedge_count_delta() { return backedge_count() - backedge_count_start(); }
2252
2253 void reset_start_counters() {
2254 _invocation_counter_start = invocation_count();
2255 _backedge_counter_start = backedge_count();
2256 }
2257
2258 InvocationCounter* invocation_counter() { return &_invocation_counter; }
2259 InvocationCounter* backedge_counter() { return &_backedge_counter; }
2260
2261 #if INCLUDE_JVMCI
2262 FailedSpeculation** get_failed_speculations_address() {
2263 return &_failed_speculations;
2264 }
2265 #endif
2266
2267 void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; }
2268 bool would_profile() const { return _would_profile != no_profile; }
2269
2270 int num_loops() const { return _num_loops; }
2271 void set_num_loops(short n) { _num_loops = n; }
2272 int num_blocks() const { return _num_blocks; }
2273 void set_num_blocks(short n) { _num_blocks = n; }
2274
2275 bool is_mature() const; // consult mileage and ProfileMaturityPercentage
2276 static int mileage_of(Method* m);
2277
2278 // Support for interprocedural escape analysis, from Thomas Kotzmann.
2279 enum EscapeFlag {
2280 estimated = 1 << 0,
2281 return_local = 1 << 1,
2282 return_allocated = 1 << 2,
2283 allocated_escapes = 1 << 3,
2284 unknown_modified = 1 << 4
2285 };
2286
2486 void print_value_on(outputStream* st) const;
2487
2488 // printing support for method data
2489 void print_data_on(outputStream* st) const;
2490
2491 const char* internal_name() const { return "{method data}"; }
2492
2493 // verification
2494 void verify_on(outputStream* st);
2495 void verify_data_on(outputStream* st);
2496
2497 static bool profile_parameters_for_method(const methodHandle& m);
2498 static bool profile_arguments();
2499 static bool profile_arguments_jsr292_only();
2500 static bool profile_return();
2501 static bool profile_parameters();
2502 static bool profile_return_jsr292_only();
2503
2504 void clean_method_data(bool always_clean);
2505 void clean_weak_method_links();
2506 Mutex* extra_data_lock() { return &_extra_data_lock; }
2507 void check_extra_data_locked() const NOT_DEBUG_RETURN;
2508 };
2509
2510 #endif // SHARE_OOPS_METHODDATA_HPP
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_METHODDATA_HPP
26 #define SHARE_OOPS_METHODDATA_HPP
27
28 #include "interpreter/bytecodes.hpp"
29 #include "interpreter/invocationCounter.hpp"
30 #include "oops/metadata.hpp"
31 #include "oops/method.hpp"
32 #include "runtime/atomic.hpp"
33 #include "runtime/deoptimization.hpp"
34 #include "runtime/mutex.hpp"
35 #include "utilities/align.hpp"
36 #include "utilities/copy.hpp"
37
38 class BytecodeStream;
39
40 // The MethodData object collects counts and other profile information
41 // during zeroth-tier (interpreter) and third-tier (C1 with full profiling)
42 // execution.
43 //
44 // The profile is used later by compilation heuristics. Some heuristics
45 // enable use of aggressive (or "heroic") optimizations. An aggressive
46 // optimization often has a down-side, a corner case that it handles
47 // poorly, but which is thought to be rare. The profile provides
48 // evidence of this rarity for a given method or even BCI. It allows
49 // the compiler to back out of the optimization at places where it
50 // has historically been a poor choice. Other heuristics try to use
51 // specific information gathered about types observed at a given site.
185 return Atomic::load_acquire(&_header._struct._flags);
186 }
187
188 u2 bci() const {
189 return _header._struct._bci;
190 }
191
192 void set_header(u8 value) {
193 _header._bits = value;
194 }
195 u8 header() {
196 return _header._bits;
197 }
198 void set_cell_at(int index, intptr_t value) {
199 _cells[index] = value;
200 }
201 void release_set_cell_at(int index, intptr_t value);
202 intptr_t cell_at(int index) const {
203 return _cells[index];
204 }
205 intptr_t* cell_at_adr(int index) const {
206 return const_cast<intptr_t*>(&_cells[index]);
207 }
208
209 bool set_flag_at(u1 flag_number) {
210 const u1 bit = 1 << flag_number;
211 u1 compare_value;
212 do {
213 compare_value = _header._struct._flags;
214 if ((compare_value & bit) == bit) {
215 // already set.
216 return false;
217 }
218 } while (compare_value != Atomic::cmpxchg(&_header._struct._flags, compare_value, static_cast<u1>(compare_value | bit)));
219 return true;
220 }
221
222 bool clear_flag_at(u1 flag_number) {
223 const u1 bit = 1 << flag_number;
224 u1 compare_value;
225 u1 exchange_value;
226 do {
227 compare_value = _header._struct._flags;
331 ShouldNotReachHere();
332 return -1;
333 }
334
335 // Return the size of this data.
336 int size_in_bytes() {
337 return DataLayout::compute_size_in_bytes(cell_count());
338 }
339
340 protected:
341 // Low-level accessors for underlying data
342 void set_intptr_at(int index, intptr_t value) {
343 assert(0 <= index && index < cell_count(), "oob");
344 data()->set_cell_at(index, value);
345 }
346 void release_set_intptr_at(int index, intptr_t value);
347 intptr_t intptr_at(int index) const {
348 assert(0 <= index && index < cell_count(), "oob");
349 return data()->cell_at(index);
350 }
351 intptr_t* intptr_at_adr(int index) const {
352 assert(0 <= index && index < cell_count(), "oob");
353 return data()->cell_at_adr(index);
354 }
355 void set_uint_at(int index, uint value) {
356 set_intptr_at(index, (intptr_t) value);
357 }
358 void release_set_uint_at(int index, uint value);
359 uint uint_at(int index) const {
360 return (uint)intptr_at(index);
361 }
362 void set_int_at(int index, int value) {
363 set_intptr_at(index, (intptr_t) value);
364 }
365 void release_set_int_at(int index, int value);
366 int int_at(int index) const {
367 return (int)intptr_at(index);
368 }
369 int int_at_unchecked(int index) const {
370 return (int)data()->cell_at(index);
371 }
372
373 void set_flag_at(u1 flag_number) {
374 data()->set_flag_at(flag_number);
375 }
376 bool flag_at(u1 flag_number) const {
377 return data()->flag_at(flag_number);
378 }
379
380 // two convenient imports for use by subclasses:
381 static ByteSize cell_offset(int index) {
382 return DataLayout::cell_offset(index);
383 }
384 static u1 flag_number_to_constant(u1 flag_number) {
385 return DataLayout::flag_number_to_constant(flag_number);
386 }
387
388 ProfileData(DataLayout* data) {
389 _data = data;
390 }
391
472 VirtualCallTypeData* as_VirtualCallTypeData() const {
473 assert(is_VirtualCallTypeData(), "wrong type");
474 return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : nullptr;
475 }
476 ParametersTypeData* as_ParametersTypeData() const {
477 assert(is_ParametersTypeData(), "wrong type");
478 return is_ParametersTypeData() ? (ParametersTypeData*)this : nullptr;
479 }
480 SpeculativeTrapData* as_SpeculativeTrapData() const {
481 assert(is_SpeculativeTrapData(), "wrong type");
482 return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : nullptr;
483 }
484
485
486 // Subclass specific initialization
487 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
488
489 // GC support
490 virtual void clean_weak_klass_links(bool always_clean) {}
491
492 // CDS support
493 virtual void metaspace_pointers_do(MetaspaceClosure* it) {}
494
495 // CI translation: ProfileData can represent both MethodDataOop data
496 // as well as CIMethodData data. This function is provided for translating
497 // an oop in a ProfileData to the ci equivalent. Generally speaking,
498 // most ProfileData don't require any translation, so we provide the null
499 // translation here, and the required translators are in the ci subclasses.
500 virtual void translate_from(const ProfileData* data) {}
501
502 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const {
503 ShouldNotReachHere();
504 }
505
506 void print_data_on(outputStream* st, const MethodData* md) const;
507
508 void print_shared(outputStream* st, const char* name, const char* extra) const;
509 void tab(outputStream* st, bool first = false) const;
510 };
511
512 // BitData
513 //
514 // A BitData holds a flag or two in its header.
515 class BitData : public ProfileData {
840 }
841
842 // stack slot for entry i
843 uint stack_slot(int i) const {
844 assert(i >= 0 && i < _number_of_entries, "oob");
845 return _pd->uint_at(stack_slot_offset(i));
846 }
847
848 // set stack slot for entry i
849 void set_stack_slot(int i, uint num) {
850 assert(i >= 0 && i < _number_of_entries, "oob");
851 _pd->set_uint_at(stack_slot_offset(i), num);
852 }
853
854 // type for entry i
855 intptr_t type(int i) const {
856 assert(i >= 0 && i < _number_of_entries, "oob");
857 return _pd->intptr_at(type_offset_in_cells(i));
858 }
859
860 intptr_t* type_adr(int i) const {
861 assert(i >= 0 && i < _number_of_entries, "oob");
862 return _pd->intptr_at_adr(type_offset_in_cells(i));
863 }
864
865 // set type for entry i
866 void set_type(int i, intptr_t k) {
867 assert(i >= 0 && i < _number_of_entries, "oob");
868 _pd->set_intptr_at(type_offset_in_cells(i), k);
869 }
870
871 static ByteSize per_arg_size() {
872 return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
873 }
874
875 static int per_arg_count() {
876 return per_arg_cell_count;
877 }
878
879 ByteSize type_offset(int i) const {
880 return DataLayout::cell_offset(type_offset_in_cells(i));
881 }
882
883 // GC support
884 void clean_weak_klass_links(bool always_clean);
885
886 // CDS support
887 virtual void metaspace_pointers_do(MetaspaceClosure* it);
888
889 void print_data_on(outputStream* st) const;
890 };
891
892 // Type entry used for return from a call. A single cell to record the
893 // type.
894 class ReturnTypeEntry : public TypeEntries {
895
896 private:
897 enum {
898 cell_count = 1
899 };
900
901 public:
902 ReturnTypeEntry(int base_off)
903 : TypeEntries(base_off) {}
904
905 void post_initialize() {
906 set_type(type_none());
907 }
908
909 intptr_t type() const {
910 return _pd->intptr_at(_base_off);
911 }
912
913 intptr_t* type_adr() const {
914 return _pd->intptr_at_adr(_base_off);
915 }
916
917 void set_type(intptr_t k) {
918 _pd->set_intptr_at(_base_off, k);
919 }
920
921 static int static_cell_count() {
922 return cell_count;
923 }
924
925 static ByteSize size() {
926 return in_ByteSize(cell_count * DataLayout::cell_size);
927 }
928
929 ByteSize type_offset() {
930 return DataLayout::cell_offset(_base_off);
931 }
932
933 // GC support
934 void clean_weak_klass_links(bool always_clean);
935
936 // CDS support
937 virtual void metaspace_pointers_do(MetaspaceClosure* it);
938
939 void print_data_on(outputStream* st) const;
940 };
941
942 // Entries to collect type information at a call: contains arguments
943 // (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
944 // number of cells. Because the number of cells for the return type is
945 // smaller than the number of cells for the type of an arguments, the
946 // number of cells is used to tell how many arguments are profiled and
947 // whether a return value is profiled. See has_arguments() and
948 // has_return().
949 class TypeEntriesAtCall {
950 private:
951 static int stack_slot_local_offset(int i) {
952 return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
953 }
954
955 static int argument_type_local_offset(int i) {
956 return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
957 }
958
1110 }
1111
1112 ByteSize argument_type_offset(int i) {
1113 return _args.type_offset(i);
1114 }
1115
1116 ByteSize return_type_offset() {
1117 return _ret.type_offset();
1118 }
1119
1120 // GC support
1121 virtual void clean_weak_klass_links(bool always_clean) {
1122 if (has_arguments()) {
1123 _args.clean_weak_klass_links(always_clean);
1124 }
1125 if (has_return()) {
1126 _ret.clean_weak_klass_links(always_clean);
1127 }
1128 }
1129
1130 // CDS support
1131 virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1132 if (has_arguments()) {
1133 _args.metaspace_pointers_do(it);
1134 }
1135 if (has_return()) {
1136 _ret.metaspace_pointers_do(it);
1137 }
1138 }
1139
1140 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1141 };
1142
1143 // ReceiverTypeData
1144 //
1145 // A ReceiverTypeData is used to access profiling information about a
1146 // dynamic type check. It consists of a series of (Klass*, count)
1147 // pairs which are used to store a type profile for the receiver of
1148 // the check, the associated count is incremented every time the type
1149 // is seen. A per ReceiverTypeData counter is incremented on type
1150 // overflow (when there's no more room for a not yet profiled Klass*).
1151 //
1152 class ReceiverTypeData : public CounterData {
1153 friend class VMStructs;
1154 friend class JVMCIVMStructs;
1155 protected:
1156 enum {
1157 receiver0_offset = counter_cell_count,
1158 count0_offset,
1159 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1230 //
1231 set_count(0);
1232 set_receiver(row, nullptr);
1233 set_receiver_count(row, 0);
1234 }
1235
1236 // Code generation support
1237 static ByteSize receiver_offset(uint row) {
1238 return cell_offset(receiver_cell_index(row));
1239 }
1240 static ByteSize receiver_count_offset(uint row) {
1241 return cell_offset(receiver_count_cell_index(row));
1242 }
1243 static ByteSize receiver_type_data_size() {
1244 return cell_offset(static_cell_count());
1245 }
1246
1247 // GC support
1248 virtual void clean_weak_klass_links(bool always_clean);
1249
1250 // CDS support
1251 virtual void metaspace_pointers_do(MetaspaceClosure* it);
1252
1253 void print_receiver_data_on(outputStream* st) const;
1254 void print_data_on(outputStream* st, const char* extra = nullptr) const;
1255 };
1256
1257 // VirtualCallData
1258 //
1259 // A VirtualCallData is used to access profiling information about a
1260 // virtual call. For now, it has nothing more than a ReceiverTypeData.
1261 class VirtualCallData : public ReceiverTypeData {
1262 public:
1263 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1264 assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1265 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1266 }
1267
1268 virtual bool is_VirtualCallData() const { return true; }
1269
1270 static int static_cell_count() {
1271 // At this point we could add more profile state, e.g., for arguments.
1272 // But for now it's the same size as the base record type.
1398
1399 ByteSize argument_type_offset(int i) {
1400 return _args.type_offset(i);
1401 }
1402
1403 ByteSize return_type_offset() {
1404 return _ret.type_offset();
1405 }
1406
1407 // GC support
1408 virtual void clean_weak_klass_links(bool always_clean) {
1409 ReceiverTypeData::clean_weak_klass_links(always_clean);
1410 if (has_arguments()) {
1411 _args.clean_weak_klass_links(always_clean);
1412 }
1413 if (has_return()) {
1414 _ret.clean_weak_klass_links(always_clean);
1415 }
1416 }
1417
1418 // CDS support
1419 virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1420 ReceiverTypeData::metaspace_pointers_do(it);
1421 if (has_arguments()) {
1422 _args.metaspace_pointers_do(it);
1423 }
1424 if (has_return()) {
1425 _ret.metaspace_pointers_do(it);
1426 }
1427 }
1428
1429 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1430 };
1431
1432 // RetData
1433 //
1434 // A RetData is used to access profiling information for a ret bytecode.
1435 // It is composed of a count of the number of times that the ret has
1436 // been executed, followed by a series of triples of the form
1437 // (bci, count, di) which count the number of times that some bci was the
1438 // target of the ret and cache a corresponding data displacement.
1439 class RetData : public CounterData {
1440 protected:
1441 enum {
1442 bci0_offset = counter_cell_count,
1443 count0_offset,
1444 displacement0_offset,
1445 ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1446 };
1447
1448 void set_bci(uint row, int bci) {
1592 // and an array start.
1593 class ArrayData : public ProfileData {
1594 friend class VMStructs;
1595 friend class JVMCIVMStructs;
1596 protected:
1597 friend class DataLayout;
1598
1599 enum {
1600 array_len_off_set,
1601 array_start_off_set
1602 };
1603
1604 uint array_uint_at(int index) const {
1605 int aindex = index + array_start_off_set;
1606 return uint_at(aindex);
1607 }
1608 int array_int_at(int index) const {
1609 int aindex = index + array_start_off_set;
1610 return int_at(aindex);
1611 }
1612 void array_set_int_at(int index, int value) {
1613 int aindex = index + array_start_off_set;
1614 set_int_at(aindex, value);
1615 }
1616
1617 // Code generation support for subclasses.
1618 static ByteSize array_element_offset(int index) {
1619 return cell_offset(array_start_off_set + index);
1620 }
1621
1622 public:
1623 ArrayData(DataLayout* layout) : ProfileData(layout) {}
1624
1625 virtual bool is_ArrayData() const { return true; }
1626
1627 static int static_cell_count() {
1628 return -1;
1629 }
1630
1631 int array_len() const {
1804
1805 int number_of_parameters() const {
1806 return array_len() / TypeStackSlotEntries::per_arg_count();
1807 }
1808
1809 const TypeStackSlotEntries* parameters() const { return &_parameters; }
1810
1811 uint stack_slot(int i) const {
1812 return _parameters.stack_slot(i);
1813 }
1814
1815 void set_type(int i, Klass* k) {
1816 intptr_t current = _parameters.type(i);
1817 _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1818 }
1819
1820 virtual void clean_weak_klass_links(bool always_clean) {
1821 _parameters.clean_weak_klass_links(always_clean);
1822 }
1823
1824 // CDS support
1825 virtual void metaspace_pointers_do(MetaspaceClosure* it) {
1826 _parameters.metaspace_pointers_do(it);
1827 }
1828
1829 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1830
1831 static ByteSize stack_slot_offset(int i) {
1832 return cell_offset(stack_slot_local_offset(i));
1833 }
1834
1835 static ByteSize type_offset(int i) {
1836 return cell_offset(type_local_offset(i));
1837 }
1838 };
1839
1840 // SpeculativeTrapData
1841 //
1842 // A SpeculativeTrapData is used to record traps due to type
1843 // speculation. It records the root of the compilation: that type
1844 // speculation is wrong in the context of one compilation (for
1845 // method1) doesn't mean it's wrong in the context of another one (for
1846 // method2). Type speculation could have more/different data in the
1847 // context of the compilation of method2 and it's worthwhile to try an
1848 // optimization that failed for compilation of method1 in the context
1879 }
1880
1881 virtual int cell_count() const {
1882 return static_cell_count();
1883 }
1884
1885 // Direct accessor
1886 Method* method() const {
1887 return (Method*)intptr_at(speculative_trap_method);
1888 }
1889
1890 void set_method(Method* m) {
1891 assert(!m->is_old(), "cannot add old methods");
1892 set_intptr_at(speculative_trap_method, (intptr_t)m);
1893 }
1894
1895 static ByteSize method_offset() {
1896 return cell_offset(speculative_trap_method);
1897 }
1898
1899 // CDS support
1900 virtual void metaspace_pointers_do(MetaspaceClosure* it);
1901
1902 virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
1903 };
1904
1905 // MethodData*
1906 //
1907 // A MethodData* holds information which has been collected about
1908 // a method. Its layout looks like this:
1909 //
1910 // -----------------------------
1911 // | header |
1912 // | klass |
1913 // -----------------------------
1914 // | method |
1915 // | size of the MethodData* |
1916 // -----------------------------
1917 // | Data entries... |
1918 // | (variable size) |
1919 // | |
1920 // . .
1921 // . .
1991
1992 class MethodData : public Metadata {
1993 friend class VMStructs;
1994 friend class JVMCIVMStructs;
1995 friend class ProfileData;
1996 friend class TypeEntriesAtCall;
1997 friend class ciMethodData;
1998
1999 // If you add a new field that points to any metaspace object, you
2000 // must add this field to MethodData::metaspace_pointers_do().
2001
2002 // Back pointer to the Method*
2003 Method* _method;
2004
2005 // Size of this oop in bytes
2006 int _size;
2007
2008 // Cached hint for bci_to_dp and bci_to_data
2009 int _hint_di;
2010
2011 Mutex* volatile _extra_data_lock; // FIXME: CDS support
2012
2013 MethodData(const methodHandle& method);
2014 public:
2015 MethodData();
2016
2017 static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2018
2019 virtual bool is_methodData() const { return true; }
2020 void initialize();
2021
2022 // Whole-method sticky bits and flags
2023 enum {
2024 _trap_hist_limit = Deoptimization::Reason_TRAP_HISTORY_LENGTH,
2025 _trap_hist_mask = max_jubyte,
2026 _extra_data_count = 4 // extra DataLayout headers, for trap history
2027 }; // Public flag values
2028
2029 // Compiler-related counters.
2030 class CompilerCounters {
2031 friend class VMStructs;
2032 friend class JVMCIVMStructs;
2033
2034 uint _nof_decompiles; // count of all nmethod removals
2035 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits
2036 uint _nof_overflow_traps; // trap count, excluding _trap_hist
2037 uint __gap;
2038 union {
2039 intptr_t _align;
2040 // JVMCI separates trap history for OSR compilations from normal compilations
2041 u1 _array[JVMCI_ONLY(2 *) MethodData::_trap_hist_limit];
2042 } _trap_hist;
2043
2044 public:
2045 CompilerCounters() : _nof_decompiles(0), _nof_overflow_recompiles(0), _nof_overflow_traps(0), __gap(0) {
2046 #ifndef ZERO
2047 // Some Zero platforms do not have expected alignment, and do not use
2048 // this code. static_assert would still fire and fail for them.
2049 static_assert(sizeof(_trap_hist) % HeapWordSize == 0, "align");
2050 #endif
2051 uint size_in_words = sizeof(_trap_hist) / HeapWordSize;
2052 Copy::zero_to_words((HeapWord*) &_trap_hist, size_in_words);
2053 }
2054
2055 // Return (uint)-1 for overflow.
2056 uint trap_count(int reason) const {
2057 assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2058 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2059 }
2060
2061 uint inc_trap_count(int reason) {
2062 // Count another trap, anywhere in this method.
2063 assert(reason >= 0, "must be single trap");
2064 assert((uint)reason < ARRAY_SIZE(_trap_hist._array), "oob");
2065 uint cnt1 = 1 + _trap_hist._array[reason];
2296 return _backedge_counter_start;
2297 }
2298
2299 int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2300 int backedge_count_delta() { return backedge_count() - backedge_count_start(); }
2301
2302 void reset_start_counters() {
2303 _invocation_counter_start = invocation_count();
2304 _backedge_counter_start = backedge_count();
2305 }
2306
2307 InvocationCounter* invocation_counter() { return &_invocation_counter; }
2308 InvocationCounter* backedge_counter() { return &_backedge_counter; }
2309
2310 #if INCLUDE_JVMCI
2311 FailedSpeculation** get_failed_speculations_address() {
2312 return &_failed_speculations;
2313 }
2314 #endif
2315
2316 #if INCLUDE_CDS
2317 void remove_unshareable_info();
2318 void restore_unshareable_info(TRAPS);
2319 #endif
2320
2321 void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; }
2322 bool would_profile() const { return _would_profile != no_profile; }
2323
2324 int num_loops() const { return _num_loops; }
2325 void set_num_loops(short n) { _num_loops = n; }
2326 int num_blocks() const { return _num_blocks; }
2327 void set_num_blocks(short n) { _num_blocks = n; }
2328
2329 bool is_mature() const; // consult mileage and ProfileMaturityPercentage
2330 static int mileage_of(Method* m);
2331
2332 // Support for interprocedural escape analysis, from Thomas Kotzmann.
2333 enum EscapeFlag {
2334 estimated = 1 << 0,
2335 return_local = 1 << 1,
2336 return_allocated = 1 << 2,
2337 allocated_escapes = 1 << 3,
2338 unknown_modified = 1 << 4
2339 };
2340
2540 void print_value_on(outputStream* st) const;
2541
2542 // printing support for method data
2543 void print_data_on(outputStream* st) const;
2544
2545 const char* internal_name() const { return "{method data}"; }
2546
2547 // verification
2548 void verify_on(outputStream* st);
2549 void verify_data_on(outputStream* st);
2550
2551 static bool profile_parameters_for_method(const methodHandle& m);
2552 static bool profile_arguments();
2553 static bool profile_arguments_jsr292_only();
2554 static bool profile_return();
2555 static bool profile_parameters();
2556 static bool profile_return_jsr292_only();
2557
2558 void clean_method_data(bool always_clean);
2559 void clean_weak_method_links();
2560 Mutex* extra_data_lock();
2561 void check_extra_data_locked() const NOT_DEBUG_RETURN;
2562 };
2563
2564 #endif // SHARE_OOPS_METHODDATA_HPP
|