38 // The field's type (TOS), offset, holder klass, and index within that class can all be acquired
39 // together and are used to populate this structure. These entries are contained
40 // within the ConstantPoolCache and are accessed with indices added to the bytecode after
41 // rewriting.
42
43 // Field bytecodes start with a constant pool index as their operand, which is then rewritten to
44 // a "field index", which is an index into the array of ResolvedFieldEntry.
45
46 // The explicit paddings are necessary for generating deterministic CDS archives. They prevent
47 // the C++ compiler from potentially inserting random values in unused gaps.
48
49 //class InstanceKlass;
50 class ResolvedFieldEntry {
51 friend class VMStructs;
52
53 InstanceKlass* _field_holder; // Field holder klass
54 int _field_offset; // Field offset in bytes
55 u2 _field_index; // Index into field information in holder InstanceKlass
56 u2 _cpool_index; // Constant pool index
57 u1 _tos_state; // TOS state
58 u1 _flags; // Flags: [0000|00|is_final|is_volatile]
59 u1 _get_code, _put_code; // Get and Put bytecodes of the field
60 #ifdef _LP64
61 u4 _padding;
62 #endif
63
64 public:
65 ResolvedFieldEntry(u2 cpi) :
66 _field_holder(nullptr),
67 _field_offset(0),
68 _field_index(0),
69 _cpool_index(cpi),
70 _tos_state(0),
71 _flags(0),
72 _get_code(0),
73 _put_code(0)
74 #ifdef _LP64
75 , _padding(0)
76 #endif
77 {}
78
79 ResolvedFieldEntry() :
80 ResolvedFieldEntry(0) {}
81
82 // Bit shift to get flags
83 // Note: Only two flags exists at the moment but more could be added
84 enum {
85 is_volatile_shift = 0,
86 is_final_shift = 1, // unused
87 };
88
89 // Getters
90 InstanceKlass* field_holder() const { return _field_holder; }
91 int field_offset() const { return _field_offset; }
92 u2 field_index() const { return _field_index; }
93 u2 constant_pool_index() const { return _cpool_index; }
94 u1 tos_state() const { return _tos_state; }
95 u1 get_code() const { return AtomicAccess::load_acquire(&_get_code); }
96 u1 put_code() const { return AtomicAccess::load_acquire(&_put_code); }
97 bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; }
98 bool is_volatile () const { return (_flags & (1 << is_volatile_shift)) != 0; }
99 bool is_resolved(Bytecodes::Code code) const {
100 switch(code) {
101 case Bytecodes::_getstatic:
102 case Bytecodes::_getfield:
103 return (get_code() == code);
104 case Bytecodes::_putstatic:
105 case Bytecodes::_putfield:
106 return (put_code() == code);
107 default:
108 ShouldNotReachHere();
109 return false;
110 }
111 }
112
113 // Printing
114 void print_on(outputStream* st) const;
115
116 void set_flags(bool is_final_flag, bool is_volatile_flag) {
117 int new_flags = (is_final_flag << is_final_shift) | static_cast<int>(is_volatile_flag);
118 _flags = checked_cast<u1>(new_flags);
119 assert(is_final() == is_final_flag, "Must be");
120 assert(is_volatile() == is_volatile_flag, "Must be");
121 }
122
123 inline void set_bytecode(u1* code, u1 new_code) {
124 #ifdef ASSERT
125 // Read once.
126 volatile Bytecodes::Code c = (Bytecodes::Code)*code;
127 assert(c == 0 || c == new_code || new_code == 0, "update must be consistent");
128 #endif
129 AtomicAccess::release_store(code, new_code);
130 }
131
132 // Populate the strucutre with resolution information
133 void fill_in(InstanceKlass* klass, int offset, u2 index, u1 tos_state, u1 b1, u1 b2) {
134 _field_holder = klass;
135 _field_offset = offset;
136 _field_index = index;
137 _tos_state = tos_state;
138
139 // These must be set after the other fields
140 set_bytecode(&_get_code, b1);
141 set_bytecode(&_put_code, b2);
142 }
143
144 // CDS
145 #if INCLUDE_CDS
146 void remove_unshareable_info();
147 void mark_and_relocate();
148 #endif
149
150 // Offsets
151 static ByteSize field_holder_offset() { return byte_offset_of(ResolvedFieldEntry, _field_holder); }
152 static ByteSize field_offset_offset() { return byte_offset_of(ResolvedFieldEntry, _field_offset); }
153 static ByteSize field_index_offset() { return byte_offset_of(ResolvedFieldEntry, _field_index); }
154 static ByteSize get_code_offset() { return byte_offset_of(ResolvedFieldEntry, _get_code); }
155 static ByteSize put_code_offset() { return byte_offset_of(ResolvedFieldEntry, _put_code); }
156 static ByteSize type_offset() { return byte_offset_of(ResolvedFieldEntry, _tos_state); }
157 static ByteSize flags_offset() { return byte_offset_of(ResolvedFieldEntry, _flags); }
158
159 };
160
161 #endif //SHARE_OOPS_RESOLVEDFIELDENTRY_HPP
|
38 // The field's type (TOS), offset, holder klass, and index within that class can all be acquired
39 // together and are used to populate this structure. These entries are contained
40 // within the ConstantPoolCache and are accessed with indices added to the bytecode after
41 // rewriting.
42
43 // Field bytecodes start with a constant pool index as their operand, which is then rewritten to
44 // a "field index", which is an index into the array of ResolvedFieldEntry.
45
46 // The explicit paddings are necessary for generating deterministic CDS archives. They prevent
47 // the C++ compiler from potentially inserting random values in unused gaps.
48
49 //class InstanceKlass;
50 class ResolvedFieldEntry {
51 friend class VMStructs;
52
53 InstanceKlass* _field_holder; // Field holder klass
54 int _field_offset; // Field offset in bytes
55 u2 _field_index; // Index into field information in holder InstanceKlass
56 u2 _cpool_index; // Constant pool index
57 u1 _tos_state; // TOS state
58 u1 _flags; // Flags: [000|has_null_marker|is_null_free_inline_type|is_flat|is_final|is_volatile]
59 u1 _get_code, _put_code; // Get and Put bytecodes of the field
60 #ifdef _LP64
61 u4 _padding;
62 #endif
63
64 public:
65 ResolvedFieldEntry(u2 cpi) :
66 _field_holder(nullptr),
67 _field_offset(0),
68 _field_index(0),
69 _cpool_index(cpi),
70 _tos_state(0),
71 _flags(0),
72 _get_code(0),
73 _put_code(0)
74 #ifdef _LP64
75 , _padding(0)
76 #endif
77 {}
78
79 ResolvedFieldEntry() :
80 ResolvedFieldEntry(0) {}
81
82 // Bit shift to get flags
83 enum {
84 is_volatile_shift = 0,
85 is_final_shift = 1, // unused
86 is_flat_shift = 2,
87 is_null_free_inline_type_shift = 3,
88 has_null_marker_shift = 4,
89 max_flag_shift = has_null_marker_shift
90 };
91
92 // Getters
93 InstanceKlass* field_holder() const { return _field_holder; }
94 int field_offset() const { return _field_offset; }
95 u2 field_index() const { return _field_index; }
96 u2 constant_pool_index() const { return _cpool_index; }
97 u1 tos_state() const { return _tos_state; }
98 u1 get_code() const { return AtomicAccess::load_acquire(&_get_code); }
99 u1 put_code() const { return AtomicAccess::load_acquire(&_put_code); }
100 bool is_volatile () const { return (_flags & (1 << is_volatile_shift)) != 0; }
101 bool is_final() const { return (_flags & (1 << is_final_shift)) != 0; }
102 bool is_flat() const { return (_flags & (1 << is_flat_shift)) != 0; }
103 bool is_null_free_inline_type() const { return (_flags & (1 << is_null_free_inline_type_shift)) != 0; }
104 bool has_null_marker() const { return (_flags & (1 << has_null_marker_shift)) != 0; }
105 bool is_resolved(Bytecodes::Code code) const {
106 switch(code) {
107 case Bytecodes::_getstatic:
108 case Bytecodes::_getfield:
109 return (get_code() == code);
110 case Bytecodes::_putstatic:
111 case Bytecodes::_putfield:
112 return (put_code() == code);
113 default:
114 ShouldNotReachHere();
115 return false;
116 }
117 }
118
119 // Printing
120 void print_on(outputStream* st) const;
121
122 void set_flags(bool is_volatile_flag,
123 bool is_final_flag,
124 bool is_flat_flag,
125 bool is_null_free_inline_type_flag,
126 bool has_null_marker_flag) {
127 int new_flags =
128 ((is_volatile_flag ? 1 : 0) << is_volatile_shift) |
129 ((is_final_flag ? 1 : 0) << is_final_shift) |
130 ((is_flat_flag ? 1 : 0) << is_flat_shift) |
131 ((is_null_free_inline_type_flag ? 1 : 0) << is_null_free_inline_type_shift) |
132 ((has_null_marker_flag ? 1 : 0) << has_null_marker_shift);
133 _flags = checked_cast<u1>(new_flags);
134 assert(is_volatile() == is_volatile_flag, "Must be");
135 assert(is_final() == is_final_flag, "Must be");
136 assert(is_flat() == is_flat_flag, "Must be");
137 assert(is_null_free_inline_type() == is_null_free_inline_type_flag, "Must be");
138 assert(has_null_marker() == has_null_marker_flag, "Must be");
139 }
140
141 inline void set_bytecode(u1* code, u1 new_code) {
142 #ifdef ASSERT
143 // Read once.
144 volatile Bytecodes::Code c = (Bytecodes::Code)*code;
145 assert(c == 0 || c == new_code || new_code == 0, "update must be consistent");
146 #endif
147 AtomicAccess::release_store(code, new_code);
148 }
149
150 // Populate the structure with resolution information
151 void fill_in(InstanceKlass* klass, int offset, u2 index, u1 tos_state, u1 b1, u1 b2) {
152 _field_holder = klass;
153 _field_offset = offset;
154 _field_index = index;
155 _tos_state = tos_state;
156
157 // These must be set after the other fields
158 set_bytecode(&_get_code, b1);
159 set_bytecode(&_put_code, b2);
160 assert(is_valid(), "invalid");
161 }
162
163 // CDS
164 #if INCLUDE_CDS
165 void remove_unshareable_info();
166 void mark_and_relocate();
167 #endif
168
169 // Offsets
170 static ByteSize field_holder_offset() { return byte_offset_of(ResolvedFieldEntry, _field_holder); }
171 static ByteSize field_offset_offset() { return byte_offset_of(ResolvedFieldEntry, _field_offset); }
172 static ByteSize field_index_offset() { return byte_offset_of(ResolvedFieldEntry, _field_index); }
173 static ByteSize get_code_offset() { return byte_offset_of(ResolvedFieldEntry, _get_code); }
174 static ByteSize put_code_offset() { return byte_offset_of(ResolvedFieldEntry, _put_code); }
175 static ByteSize type_offset() { return byte_offset_of(ResolvedFieldEntry, _tos_state); }
176 static ByteSize flags_offset() { return byte_offset_of(ResolvedFieldEntry, _flags); }
177
178 // Debug help
179 bool is_valid() const;
180 };
181
182 #endif //SHARE_OOPS_RESOLVEDFIELDENTRY_HPP
|