1 /*
2 * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_INLINEKLASS_HPP
26 #define SHARE_VM_OOPS_INLINEKLASS_HPP
27
28 #include "oops/instanceKlass.hpp"
29 #include "oops/layoutKind.hpp"
30 #include "oops/oopsHierarchy.hpp"
31 #include "utilities/exceptions.hpp"
32 #include "utilities/globalDefinitions.hpp"
33
34 template <typename T>
35 class Array;
36 class ClassFileParser;
37 template <typename T>
38 class GrowableArray;
39 class Method;
40 class RegisterMap;
41 class SigEntry;
42
43 // An InlineKlass is a specialized InstanceKlass for concrete value classes
44 // (abstract value classes are represented by InstanceKlass)
45
46 class InlineKlass: public InstanceKlass {
47 friend class VMStructs;
48 friend class InstanceKlass;
49
50 public:
51 static const KlassKind Kind = InlineKlassKind;
52
53 // The member fields of the InlineKlass.
54 //
55 // All Klass objects have vtables starting at offset `sizeof(InstanceKlass)`.
56 //
57 // This has the effect that sub-klasses of InstanceKlass can't have their own
58 // C++ fields, because those would overlap with the vtables (or some of the
59 // other dynamically-sized sections).
60 //
61 // To work around this we stamp out the block members *after* all
62 // dynamically-sized sections belonging to the InstanceKlass part of the
63 // object.
64 //
65 // InlineKlass object layout:
66 // +-----------------------+
67 // | sizeof(InstanceKlass) |
68 // +-----------------------+ <= InstanceKlass:header_size()
69 // | vtable |
70 // +-----------------------+
71 // | other sections |
72 // +-----------------------+ <= end_of_instance_klass()
73 // | InlineKlass::Members |
74 // +-----------------------+
75 //
76 class Members {
77 friend class InlineKlass;
78
79 // Addresses used for inline type calling convention
80 Array<SigEntry>* _extended_sig;
81 Array<VMRegPair>* _return_regs;
82
83 address _pack_handler;
84 address _pack_handler_jobject;
85 address _unpack_handler;
86
87 int _null_reset_value_offset;
88 int _payload_offset; // offset of the beginning of the payload in a heap buffered instance
89 int _payload_size_in_bytes; // size of payload layout
90 int _payload_alignment; // alignment required for payload
91 int _null_free_non_atomic_size_in_bytes; // size of null-free non-atomic flat layout
92 int _null_free_non_atomic_alignment; // alignment requirement for null-free non-atomic layout
93 int _null_free_atomic_size_in_bytes; // size and alignment requirement for a null-free atomic layout, -1 if no atomic flat layout is possible
94 int _nullable_atomic_size_in_bytes; // size and alignment requirement for a nullable layout (always atomic), -1 if no nullable flat layout is possible
95 int _nullable_non_atomic_size_in_bytes; // size and alignment requirement for a nullable non-atomic layout, -1 if not available
96 int _null_marker_offset; // expressed as an offset from the beginning of the object for a heap buffered value
97 // payload_offset must be subtracted to get the offset from the beginning of the payload
98
99 Members();
100 };
101
102 InlineKlass();
103
104 private:
105
106 // Constructor
107 InlineKlass(const ClassFileParser& parser);
108
109 // Calculates where the members are supposed to be placed
110 address calculate_members_address() const;
111
112 Members& members() {
113 assert(_adr_inline_klass_members != nullptr, "Should have been initialized");
114 return *reinterpret_cast<Members*>(_adr_inline_klass_members);
115 }
116
117 inline const Members& members() const {
118 InlineKlass* ik = const_cast<InlineKlass*>(this);
119 return const_cast<const Members&>(ik->members());
120 }
121
122 public:
123
124 bool is_empty_inline_type() const { return _misc_flags.is_empty_inline_type(); }
125 void set_is_empty_inline_type() { _misc_flags.set_is_empty_inline_type(true); }
126
127 // Members access functions
128
129 const Array<SigEntry>* extended_sig() const {return members()._extended_sig; }
130 void set_extended_sig(Array<SigEntry>* extended_sig) { members()._extended_sig = extended_sig; }
131
132 const Array<VMRegPair>* return_regs() const { return members()._return_regs; }
133 void set_return_regs(Array<VMRegPair>* return_regs) { members()._return_regs = return_regs; }
134
135 // pack and unpack handlers for inline types return
136
137 address pack_handler() const { return members()._pack_handler; }
138 void set_pack_handler(address pack_handler) { members()._pack_handler = pack_handler; }
139
140 address pack_handler_jobject() const { return members()._pack_handler_jobject; }
141 void set_pack_handler_jobject(address pack_handler_jobject) { members()._pack_handler_jobject = pack_handler_jobject; }
142
143 address unpack_handler() const { return members()._unpack_handler; }
144 void set_unpack_handler(address unpack_handler) { members()._unpack_handler = unpack_handler; }
145
146 int null_reset_value_offset() {
147 int offset = members()._null_reset_value_offset;
148 assert(offset != 0, "must not be called if not initialized");
149 return offset;
150 }
151 void set_null_reset_value_offset(int offset) { members()._null_reset_value_offset = offset; }
152
153 int payload_offset() const {
154 int offset = members()._payload_offset;
155 assert(offset != 0, "Must be initialized before use");
156 return offset;
157 }
158 void set_payload_offset(int offset) { members()._payload_offset = offset; }
159
160 int payload_size_in_bytes() const { return members()._payload_size_in_bytes; }
161 void set_payload_size_in_bytes(int payload_size) { members()._payload_size_in_bytes = payload_size; }
162
163 int payload_alignment() const { return members()._payload_alignment; }
164 void set_payload_alignment(int alignment) { members()._payload_alignment = alignment; }
165
166 int null_free_non_atomic_size_in_bytes() const { return members()._null_free_non_atomic_size_in_bytes; }
167 void set_null_free_non_atomic_size_in_bytes(int size) { members()._null_free_non_atomic_size_in_bytes = size; }
168 bool has_null_free_non_atomic_layout() const { return null_free_non_atomic_size_in_bytes() != -1; }
169
170 int null_free_non_atomic_alignment() const { return members()._null_free_non_atomic_alignment; }
171 void set_null_free_non_atomic_alignment(int alignment) { members()._null_free_non_atomic_alignment = alignment; }
172
173 int null_free_atomic_size_in_bytes() const { return members()._null_free_atomic_size_in_bytes; }
174 void set_null_free_atomic_size_in_bytes(int size) { members()._null_free_atomic_size_in_bytes = size; }
175 bool has_null_free_atomic_layout() const { return null_free_atomic_size_in_bytes() != -1; }
176
177 int nullable_atomic_size_in_bytes() const { return members()._nullable_atomic_size_in_bytes; }
178 void set_nullable_atomic_size_in_bytes(int size) { members()._nullable_atomic_size_in_bytes = size; }
179 bool has_nullable_atomic_layout() const { return nullable_atomic_size_in_bytes() != -1; }
180
181 int nullable_non_atomic_size_in_bytes() const { return members()._nullable_non_atomic_size_in_bytes; }
182 void set_nullable_non_atomic_size_in_bytes(int size) { members()._nullable_non_atomic_size_in_bytes = size; }
183 bool has_nullable_non_atomic_layout() const { return nullable_non_atomic_size_in_bytes() != -1; }
184
185 int null_marker_offset() const { return members()._null_marker_offset; }
186 void set_null_marker_offset(int offset) { members()._null_marker_offset = offset; }
187 int null_marker_offset_in_payload() const { return null_marker_offset() - payload_offset(); }
188
189 bool supports_nullable_layouts() const {
190 return has_nullable_non_atomic_layout() || has_nullable_atomic_layout();
191 }
192
193 jbyte* null_marker_address(address payload) {
194 assert(supports_nullable_layouts(), " Must do");
195 return (jbyte*)payload + null_marker_offset_in_payload();
196 }
197
198 bool is_payload_marked_as_null(address payload) {
199 assert(supports_nullable_layouts(), " Must do");
200 return *null_marker_address(payload) == 0;
201 }
202
203 void mark_payload_as_non_null(address payload) {
204 assert(supports_nullable_layouts(), " Must do");
205 *null_marker_address(payload) = 1;
206 }
207
208 void mark_payload_as_null(address payload) {
209 assert(supports_nullable_layouts(), " Must do");
210 *null_marker_address(payload) = 0;
211 }
212
213 bool is_layout_supported(LayoutKind lk);
214
215 int layout_alignment(LayoutKind kind) const;
216 int layout_size_in_bytes(LayoutKind kind) const;
217
218 #if INCLUDE_CDS
219 void remove_unshareable_info() override;
220 #endif
221
222 private:
223 int collect_fields(GrowableArray<SigEntry>* sig, int base_off = 0, int null_marker_offset = -1);
224
225 void cleanup_blobs();
226
227 public:
228 // Type testing
229 bool is_inline_klass_slow() const override { return true; }
230
231 // Casting from Klass*
232
233 static InlineKlass* cast(Klass* k) {
234 return const_cast<InlineKlass*>(cast(const_cast<const Klass*>(k)));
235 }
236
237 static const InlineKlass* cast(const Klass* k) {
238 assert(k != nullptr, "k should not be null");
239 assert(k->is_inline_klass(), "cast to InlineKlass");
240 return static_cast<const InlineKlass*>(k);
241 }
242
243 // Allocates a stand alone value in the Java heap
244 // initialized to default value (cleared memory)
245 instanceOop allocate_instance(TRAPS);
246
247 address payload_addr(oop o) const;
248
249 bool maybe_flat_in_array();
250 bool is_always_flat_in_array();
251
252 bool contains_oops() const { return nonstatic_oop_map_count() > 0; }
253 int nonstatic_oop_count();
254
255 // Methods to copy payload between containers
256 //
257 // Methods taking a LayoutKind argument expect that both the source and the destination
258 // layouts are compatible with the one specified in argument (alignment, size, presence
259 // of a null marker). Reminder: the BUFFERED layout, used in values buffered in heap,
260 // is compatible with all the other layouts.
261
262 void write_value_to_addr(oop src, void* dst, LayoutKind lk, TRAPS);
263 oop read_payload_from_addr(const oop src, size_t offset, LayoutKind lk, TRAPS);
264 void copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized);
265
266 // oop iterate raw inline type data pointer (where oop_addr may not be an oop, but backing/array-element)
267 template <typename T, class OopClosureType>
268 inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure);
269
270 template <typename T, class OopClosureType>
271 inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, void* lo, void* hi);
272
273 // calling convention support
274 void initialize_calling_convention(TRAPS);
275
276 bool can_be_passed_as_fields() const;
277 bool can_be_returned_as_fields(bool init = false) const;
278 void save_oop_fields(const RegisterMap& map, GrowableArray<Handle>& handles) const;
279 void restore_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
280 oop realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS);
281 static InlineKlass* returned_inline_klass(const RegisterMap& reg_map, bool* return_oop = nullptr, Method* method = nullptr);
282
283 static ByteSize adr_members_offset() {
284 return InstanceKlass::adr_inline_klass_members_offset();
285 }
286
287 // pack and unpack handlers. Need to be loadable from generated code
288 // so at a fixed offset from the base of the klass pointer.
289 static ByteSize pack_handler_offset() {
290 return byte_offset_of(Members, _pack_handler);
291 }
292
293 static ByteSize pack_handler_jobject_offset() {
294 return byte_offset_of(Members, _pack_handler_jobject);
295 }
296
297 static ByteSize unpack_handler_offset() {
298 return byte_offset_of(Members, _unpack_handler);
299 }
300
301 static ByteSize null_reset_value_offset_offset() {
302 return byte_offset_of(Members, _null_reset_value_offset);
303 }
304
305 static ByteSize payload_offset_offset() {
306 return byte_offset_of(Members, _payload_offset);
307 }
308
309 static ByteSize null_marker_offset_offset() {
310 return byte_offset_of(Members, _null_marker_offset);
311 }
312
313 oop null_reset_value();
314 void set_null_reset_value(oop val);
315
316 void deallocate_contents(ClassLoaderData* loader_data);
317 static void cleanup(InlineKlass* ik) ;
318
319 // Verification
320 void verify_on(outputStream* st) override;
321 void oop_verify_on(oop obj, outputStream* st) override;
322
323 };
324
325 #endif // SHARE_VM_OOPS_INLINEKLASS_HPP