1 /*
  2  * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_OOPS_INLINEKLASS_HPP
 26 #define SHARE_VM_OOPS_INLINEKLASS_HPP
 27 
 28 #include "oops/inlineOop.hpp"
 29 #include "oops/instanceKlass.hpp"
 30 #include "oops/layoutKind.hpp"
 31 #include "oops/oopsHierarchy.hpp"
 32 #include "oops/valuePayload.hpp"
 33 #include "runtime/handles.hpp"
 34 #include "utilities/exceptions.hpp"
 35 #include "utilities/globalDefinitions.hpp"
 36 
 37 template <typename T>
 38 class Array;
 39 class ClassFileParser;
 40 template <typename T>
 41 class GrowableArray;
 42 class Method;
 43 class RegisterMap;
 44 class SigEntry;
 45 
 46 // An InlineKlass is a specialized InstanceKlass for concrete value classes
 47 // (abstract value classes are represented by InstanceKlass)
 48 
 49 class InlineKlass: public InstanceKlass {
 50   friend class VMStructs;
 51   friend class InstanceKlass;
 52 
 53  public:
 54   static const KlassKind Kind = InlineKlassKind;
 55 
 56   // The member fields of the InlineKlass.
 57   //
 58   // All Klass objects have vtables starting at offset `sizeof(InstanceKlass)`.
 59   //
 60   // This has the effect that sub-klasses of InstanceKlass can't have their own
 61   // C++ fields, because those would overlap with the vtables (or some of the
 62   // other dynamically-sized sections).
 63   //
 64   // To work around this we stamp out the block members *after* all
 65   // dynamically-sized sections belonging to the InstanceKlass part of the
 66   // object.
 67   //
 68   // InlineKlass object layout:
 69   //   +-----------------------+
 70   //   | sizeof(InstanceKlass) |
 71   //   +-----------------------+ <= InstanceKlass:header_size()
 72   //   | vtable                |
 73   //   +-----------------------+
 74   //   | other sections        |
 75   //   +-----------------------+ <= end_of_instance_klass()
 76   //   | InlineKlass::Members  |
 77   //   +-----------------------+
 78   //
 79   class Members {
 80     friend class InlineKlass;
 81 
 82     // Addresses used for inline type calling convention
 83     Array<SigEntry>* _extended_sig;
 84     Array<VMRegPair>* _return_regs;
 85 
 86     address _pack_handler;
 87     address _pack_handler_jobject;
 88     address _unpack_handler;
 89 
 90     int _null_reset_value_offset;
 91     int _payload_offset;           // offset of the beginning of the payload in a heap buffered instance
 92     int _payload_size_in_bytes;    // size of payload layout
 93     int _payload_alignment;        // alignment required for payload
 94     int _null_free_non_atomic_size_in_bytes; // size of null-free non-atomic flat layout
 95     int _null_free_non_atomic_alignment;     // alignment requirement for null-free non-atomic layout
 96     int _null_free_atomic_size_in_bytes;     // size and alignment requirement for a null-free atomic layout, -1 if no atomic flat layout is possible
 97     int _nullable_atomic_size_in_bytes;      // size and alignment requirement for a nullable layout (always atomic), -1 if no nullable flat layout is possible
 98     int _nullable_non_atomic_size_in_bytes;  // size and alignment requirement for a nullable non-atomic layout, -1 if not available
 99     int _null_marker_offset;       // expressed as an offset from the beginning of the object for a heap buffered value
100                                    // payload_offset must be subtracted to get the offset from the beginning of the payload
101 
102     /* When we can't intrinsify the substitutability check, we can still avoid the call to isSubstitutable at runtime if the value object is small enough.
103      * If all the fields are contained at once in a single long, we can load such a long from both operands, use a bitwise mask to remove the extra bits
104      * (from header, padding...), and compare these masked long.
105      *
106      * This doesn't always apply, for instance, if there are oops among the fields, we shouldn't carelessly load and compare: the GC might move the object in between.
107      * To signal this fast path cannot be done on this current class, simply put 0 in _fast_acmp_mask.
108      *
109      * We also should take care of not loading further than the object, even if it means reading part of the header. For this reason, we can't use _payload_offset,
110      * but we need our special offset. The offset doesn't need to be aligned on word boundary, or anything else.
111      */
112     int _fast_acmp_offset;    // if < 0, fast acmp doesn't apply
113     int64_t _fast_acmp_mask;  // can be 0 for empty value classes
114 
115     Members();
116 
117     void print_on(outputStream* st) const;
118   };
119 
120   InlineKlass();
121 
122  private:
123 
124   // Constructor
125   InlineKlass(const ClassFileParser& parser);
126 
127   // Calculates where the members are supposed to be placed
128   address calculate_members_address() const;
129 
130   Members& members() {
131     assert(_adr_inline_klass_members != nullptr, "Should have been initialized");
132     return *reinterpret_cast<Members*>(_adr_inline_klass_members);
133   }
134 
135   inline const Members& members() const {
136     InlineKlass* ik = const_cast<InlineKlass*>(this);
137     return const_cast<const Members&>(ik->members());
138   }
139 
140  public:
141 
142   bool is_empty_inline_type() const   { return _misc_flags.is_empty_inline_type(); }
143   void set_is_empty_inline_type()     { _misc_flags.set_is_empty_inline_type(true); }
144 
145   // Members access functions
146 
147   const Array<SigEntry>* extended_sig() const                 {return members()._extended_sig; }
148   void set_extended_sig(Array<SigEntry>* extended_sig)        { members()._extended_sig = extended_sig; }
149 
150   const Array<VMRegPair>* return_regs() const                 { return members()._return_regs; }
151   void set_return_regs(Array<VMRegPair>* return_regs)         { members()._return_regs = return_regs; }
152 
153   // pack and unpack handlers for inline types return
154 
155   address pack_handler() const                                { return members()._pack_handler; }
156   void set_pack_handler(address pack_handler)                 { members()._pack_handler = pack_handler; }
157 
158   address pack_handler_jobject() const                        { return members()._pack_handler_jobject; }
159   void set_pack_handler_jobject(address pack_handler_jobject) { members()._pack_handler_jobject = pack_handler_jobject; }
160 
161   address unpack_handler() const                              { return members()._unpack_handler; }
162   void set_unpack_handler(address unpack_handler)             { members()._unpack_handler = unpack_handler; }
163 
164   int null_reset_value_offset() const {
165     int offset = members()._null_reset_value_offset;
166     assert(offset != 0, "must not be called if not initialized");
167     return offset;
168   }
169   void set_null_reset_value_offset(int offset)                { members()._null_reset_value_offset = offset; }
170 
171   int payload_offset() const {
172     int offset = members()._payload_offset;
173     assert(offset != 0, "Must be initialized before use");
174     return offset;
175   }
176   void set_payload_offset(int offset)                         { members()._payload_offset = offset; }
177 
178   int payload_size_in_bytes() const                           { return members()._payload_size_in_bytes; }
179   void set_payload_size_in_bytes(int payload_size)            { members()._payload_size_in_bytes = payload_size; }
180 
181   int payload_alignment() const                               { return members()._payload_alignment; }
182   void set_payload_alignment(int alignment)                   { members()._payload_alignment = alignment; }
183 
184   int null_free_non_atomic_size_in_bytes() const              { return members()._null_free_non_atomic_size_in_bytes; }
185   void set_null_free_non_atomic_size_in_bytes(int size)       { members()._null_free_non_atomic_size_in_bytes = size; }
186   bool has_null_free_non_atomic_layout() const                { return null_free_non_atomic_size_in_bytes() != -1; }
187 
188   int null_free_non_atomic_alignment() const                  { return members()._null_free_non_atomic_alignment; }
189   void set_null_free_non_atomic_alignment(int alignment)      { members()._null_free_non_atomic_alignment = alignment; }
190 
191   int null_free_atomic_size_in_bytes() const                  { return members()._null_free_atomic_size_in_bytes; }
192   void set_null_free_atomic_size_in_bytes(int size)           { members()._null_free_atomic_size_in_bytes = size; }
193   bool has_null_free_atomic_layout() const                    { return null_free_atomic_size_in_bytes() != -1; }
194 
195   int nullable_atomic_size_in_bytes() const                   { return members()._nullable_atomic_size_in_bytes; }
196   void set_nullable_atomic_size_in_bytes(int size)            { members()._nullable_atomic_size_in_bytes = size; }
197   bool has_nullable_atomic_layout() const                     { return nullable_atomic_size_in_bytes() != -1; }
198 
199   int nullable_non_atomic_size_in_bytes() const               { return members()._nullable_non_atomic_size_in_bytes; }
200   void set_nullable_non_atomic_size_in_bytes(int size)        { members()._nullable_non_atomic_size_in_bytes = size; }
201   bool has_nullable_non_atomic_layout() const                 { return nullable_non_atomic_size_in_bytes() != -1; }
202 
203   int null_marker_offset() const                              { return members()._null_marker_offset; }
204   void set_null_marker_offset(int offset)                     { members()._null_marker_offset = offset; }
205   int null_marker_offset_in_payload() const                   { return null_marker_offset() - payload_offset(); }
206 
207   int fast_acmp_offset() const                                { return members()._fast_acmp_offset; }
208   void set_fast_acmp_offset(int offset)                       { members()._fast_acmp_offset = offset; }
209 
210   int64_t fast_acmp_mask() const                              { return members()._fast_acmp_mask; }
211   void set_fast_acmp_mask(int64_t mask)                       { members()._fast_acmp_mask = mask; }
212 
213   bool supports_nullable_layouts() const {
214     return has_nullable_non_atomic_layout() || has_nullable_atomic_layout();
215   }
216 
217   jbyte* null_marker_address(address payload) {
218     assert(supports_nullable_layouts(), " Must do");
219     return (jbyte*)payload + null_marker_offset_in_payload();
220   }
221 
222   bool is_payload_marked_as_null(address payload) {
223     assert(supports_nullable_layouts(), " Must do");
224     return *null_marker_address(payload) == 0;
225   }
226 
227   void mark_payload_as_non_null(address payload) {
228     assert(supports_nullable_layouts(), " Must do");
229     *null_marker_address(payload) = 1;
230   }
231 
232   void mark_payload_as_null(address payload) {
233     assert(supports_nullable_layouts(), " Must do");
234     *null_marker_address(payload) = 0;
235   }
236 
237   inline bool layout_has_null_marker(LayoutKind lk) const;
238 
239   inline bool is_layout_supported(LayoutKind lk) const;
240 
241   inline int layout_alignment(LayoutKind kind) const;
242   inline int layout_size_in_bytes(LayoutKind kind) const;
243 
244 #if INCLUDE_CDS
245   void remove_unshareable_info() override;
246 #endif
247 
248  private:
249   int collect_fields(GrowableArray<SigEntry>* sig, int base_off = 0, int null_marker_offset = -1);
250 
251   void cleanup_blobs();
252 
253  public:
254   // Type testing
255   bool is_inline_klass_slow() const override { return true; }
256 
257   // Casting from Klass*
258 
259   static InlineKlass* cast(Klass* k) {
260     return const_cast<InlineKlass*>(cast(const_cast<const Klass*>(k)));
261   }
262 
263   static const InlineKlass* cast(const Klass* k) {
264     assert(k != nullptr, "k should not be null");
265     assert(k->is_inline_klass(), "cast to InlineKlass");
266     return static_cast<const InlineKlass*>(k);
267   }
268 
269   // Allocates a stand alone value in the Java heap
270   // initialized to default value (cleared memory)
271   inlineOop allocate_instance(TRAPS);
272 
273   address payload_addr(oop o) const;
274 
275   bool maybe_flat_in_array();
276   bool is_always_flat_in_array();
277 
278   bool contains_oops() const { return nonstatic_oop_map_count() > 0; }
279   int nonstatic_oop_count();
280 
281   // oop iterate raw inline type data pointer (where oop_addr may not be an oop, but backing/array-element)
282   template <typename T, class OopClosureType>
283   inline void oop_iterate_specialized(const address oop_addr, OopClosureType* closure);
284 
285   template <typename T, class OopClosureType>
286   inline void oop_iterate_specialized_bounded(const address oop_addr, OopClosureType* closure, uintptr_t lo, uintptr_t hi);
287 
288   // Support for the scalarized calling convention
289   void initialize_calling_convention(TRAPS);
290 
291   bool can_be_passed_as_fields() const;
292   bool can_be_returned_as_fields(bool init = false) const;
293   void save_oop_fields(const RegisterMap& map, GrowableArray<Handle>& handles) const;
294   void restore_oop_results(RegisterMap& map, GrowableArray<Handle>& handles) const;
295   oop realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS);
296   static InlineKlass* returned_inline_klass(const RegisterMap& reg_map, bool* return_oop = nullptr, Method* method = nullptr);
297 
298   static ByteSize adr_members_offset() {
299     return InstanceKlass::adr_inline_klass_members_offset();
300   }
301 
302   // pack and unpack handlers. Need to be loadable from generated code
303   // so at a fixed offset from the base of the klass pointer.
304   static ByteSize pack_handler_offset() {
305     return byte_offset_of(Members, _pack_handler);
306   }
307 
308   static ByteSize pack_handler_jobject_offset() {
309     return byte_offset_of(Members, _pack_handler_jobject);
310   }
311 
312   static ByteSize unpack_handler_offset() {
313     return byte_offset_of(Members, _unpack_handler);
314   }
315 
316   static ByteSize null_reset_value_offset_offset() {
317     return byte_offset_of(Members, _null_reset_value_offset);
318   }
319 
320   static ByteSize payload_offset_offset() {
321     return byte_offset_of(Members, _payload_offset);
322   }
323 
324   static ByteSize null_marker_offset_offset() {
325     return byte_offset_of(Members, _null_marker_offset);
326   }
327 
328   static ByteSize fast_acmp_offset_offset() {
329     return byte_offset_of(Members, _fast_acmp_offset);
330   }
331 
332   static ByteSize fast_acmp_mask_offset() {
333     return byte_offset_of(Members, _fast_acmp_mask);
334   }
335 
336   oop null_reset_value() const;
337   void set_null_reset_value(oop val);
338 
339   void deallocate_contents(ClassLoaderData* loader_data);
340   static void cleanup(InlineKlass* ik) ;
341 
342   void print_on(outputStream* st) const override;
343 
344   // Verification
345   void verify_on(outputStream* st) override;
346   void oop_verify_on(oop obj, outputStream* st) override;
347 };
348 
349 #endif // SHARE_VM_OOPS_INLINEKLASS_HPP