1 /*
2 * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/archiveUtils.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcLocker.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "logging/log.hpp"
34 #include "memory/metadataFactory.hpp"
35 #include "memory/metaspaceClosure.hpp"
36 #include "oops/access.hpp"
37 #include "oops/arrayKlass.hpp"
38 #include "oops/compressedOops.inline.hpp"
39 #include "oops/fieldStreams.inline.hpp"
40 #include "oops/flatArrayKlass.hpp"
41 #include "oops/inlineKlass.inline.hpp"
42 #include "oops/instanceKlass.inline.hpp"
43 #include "oops/method.hpp"
44 #include "oops/objArrayKlass.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/refArrayKlass.hpp"
47 #include "runtime/fieldDescriptor.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/registerMap.hpp"
50 #include "runtime/safepointVerifiers.hpp"
51 #include "runtime/sharedRuntime.hpp"
52 #include "runtime/signature.hpp"
53 #include "runtime/thread.inline.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/stringUtils.hpp"
56
57 InlineKlass::Members::Members()
58 : _extended_sig(nullptr),
59 _return_regs(nullptr),
60 _pack_handler(nullptr),
61 _pack_handler_jobject(nullptr),
62 _unpack_handler(nullptr),
63 _null_reset_value_offset(0),
64 _payload_offset(-1),
65 _payload_size_in_bytes(-1),
66 _payload_alignment(-1),
67 _non_atomic_size_in_bytes(-1),
68 _non_atomic_alignment(-1),
69 _atomic_size_in_bytes(-1),
70 _nullable_size_in_bytes(-1),
71 _null_marker_offset(-1) {
72 }
73
74 InlineKlass::InlineKlass() {
75 assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
76 }
77
78 // Constructor
79 InlineKlass::InlineKlass(const ClassFileParser& parser)
80 : InstanceKlass(parser, InlineKlass::Kind, markWord::inline_type_prototype()) {
81 assert(is_inline_klass(), "sanity");
82 assert(prototype_header().is_inline_type(), "sanity");
83
84 // Set up the offset to the members of this klass
85 _adr_inline_klass_members = calculate_members_address();
86
87 // Placement install the members
88 new (_adr_inline_klass_members) Members();
89
90 // Sanity check construction of the members
91 assert(pack_handler() == nullptr, "pack handler not null");
92 }
93
94 address InlineKlass::calculate_members_address() const {
95 // The members are placed after all other contents inherited from the InstanceKlass
96 return end_of_instance_klass();
97 }
98
99 oop InlineKlass::null_reset_value() {
100 assert(is_initialized() || is_being_initialized() || is_in_error_state(), "null reset value is set at the beginning of initialization");
101 oop val = java_mirror()->obj_field_acquire(null_reset_value_offset());
102 assert(val != nullptr, "Sanity check");
103 return val;
104 }
105
106 void InlineKlass::set_null_reset_value(oop val) {
107 assert(val != nullptr, "Sanity check");
108 assert(oopDesc::is_oop(val), "Sanity check");
109 assert(val->is_inline_type(), "Sanity check");
110 assert(val->klass() == this, "sanity check");
111 java_mirror()->obj_field_put(null_reset_value_offset(), val);
112 }
113
114 instanceOop InlineKlass::allocate_instance(TRAPS) {
115 instanceOop oop = InstanceKlass::allocate_instance(CHECK_NULL);
116 assert(oop->mark().is_inline_type(), "Expected inline type");
117 return oop;
118 }
119
120 int InlineKlass::nonstatic_oop_count() {
121 int oops = 0;
122 int map_count = nonstatic_oop_map_count();
123 OopMapBlock* block = start_of_nonstatic_oop_maps();
124 OopMapBlock* end = block + map_count;
125 while (block != end) {
126 oops += block->count();
127 block++;
128 }
129 return oops;
130 }
131
132 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
133 switch(kind) {
134 case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT:
135 assert(has_non_atomic_layout(), "Layout not available");
136 return non_atomic_size_in_bytes();
137 break;
138 case LayoutKind::NULL_FREE_ATOMIC_FLAT:
139 assert(has_atomic_layout(), "Layout not available");
140 return atomic_size_in_bytes();
141 break;
142 case LayoutKind::NULLABLE_ATOMIC_FLAT:
143 assert(has_nullable_atomic_layout(), "Layout not available");
144 return nullable_atomic_size_in_bytes();
145 break;
146 case LayoutKind::BUFFERED:
147 return payload_size_in_bytes();
148 break;
149 default:
150 ShouldNotReachHere();
151 }
152 }
153
154 int InlineKlass::layout_alignment(LayoutKind kind) const {
155 switch(kind) {
156 case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT:
157 assert(has_non_atomic_layout(), "Layout not available");
158 return non_atomic_alignment();
159 break;
160 case LayoutKind::NULL_FREE_ATOMIC_FLAT:
161 assert(has_atomic_layout(), "Layout not available");
162 return atomic_size_in_bytes();
163 break;
164 case LayoutKind::NULLABLE_ATOMIC_FLAT:
165 assert(has_nullable_atomic_layout(), "Layout not available");
166 return nullable_atomic_size_in_bytes();
167 break;
168 case LayoutKind::BUFFERED:
169 return payload_alignment();
170 break;
171 default:
172 ShouldNotReachHere();
173 }
174 }
175
176 bool InlineKlass::is_layout_supported(LayoutKind lk) {
177 switch(lk) {
178 case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT:
179 return has_non_atomic_layout();
180 break;
181 case LayoutKind::NULL_FREE_ATOMIC_FLAT:
182 return has_atomic_layout();
183 break;
184 case LayoutKind::NULLABLE_ATOMIC_FLAT:
185 return has_nullable_atomic_layout();
186 break;
187 case LayoutKind::BUFFERED:
188 return true;
189 break;
190 default:
191 ShouldNotReachHere();
192 }
193 }
194
195 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
196 assert(is_layout_supported(lk), "Unsupported layout");
197 assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
198 switch(lk) {
199 case LayoutKind::NULLABLE_ATOMIC_FLAT: {
200 if (is_payload_marked_as_null((address)src)) {
201 // copy null_reset value to dest
202 if (dest_is_initialized) {
203 HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
204 } else {
205 HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
206 }
207 } else {
208 // Copy has to be performed, even if this is an empty value, because of the null marker
209 if (dest_is_initialized) {
210 HeapAccess<>::value_copy(src, dst, this, lk);
211 } else {
212 HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
213 }
214 }
215 }
216 break;
217 case LayoutKind::BUFFERED:
218 case LayoutKind::NULL_FREE_ATOMIC_FLAT:
219 case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT: {
220 if (is_empty_inline_type()) return; // nothing to do
221 if (dest_is_initialized) {
222 HeapAccess<>::value_copy(src, dst, this, lk);
223 } else {
224 HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
225 }
226 }
227 break;
228 default:
229 ShouldNotReachHere();
230 }
231 }
232
233 oop InlineKlass::read_payload_from_addr(const oop src, size_t offset, LayoutKind lk, TRAPS) {
234 assert(src != nullptr, "Must be");
235 assert(is_layout_supported(lk), "Unsupported layout");
236 switch(lk) {
237 case LayoutKind::NULLABLE_ATOMIC_FLAT: {
238 if (is_payload_marked_as_null(cast_from_oop<address>(src) + offset)) {
239 return nullptr;
240 }
241 } // Fallthrough
242 case LayoutKind::BUFFERED:
243 case LayoutKind::NULL_FREE_ATOMIC_FLAT:
244 case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT: {
245 Handle obj_h(THREAD, src);
246 oop res = allocate_instance(CHECK_NULL);
247 copy_payload_to_addr((void*)(cast_from_oop<address>(obj_h()) + offset), payload_addr(res), lk, false);
248
249 // After copying, re-check if the payload is now marked as null. Another
250 // thread could have marked the src object as null after the initial check
251 // but before the copy operation, causing the null-marker to be marked in
252 // the destination. In this case, discard the allocated object and
253 // return nullptr.
254 if (LayoutKindHelper::is_nullable_flat(lk)) {
255 if (is_payload_marked_as_null(payload_addr(res))) {
256 return nullptr;
257 }
258 }
259
260 return res;
261 }
262 break;
263 default:
264 ShouldNotReachHere();
265 }
266 }
267
268 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, TRAPS) {
269 void* src_addr = nullptr;
270 if (src == nullptr) {
271 if (!LayoutKindHelper::is_nullable_flat(lk)) {
272 THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
273 }
274 // Writing null to a nullable flat field/element is usually done by writing
275 // the whole pre-allocated null_reset_value at the payload address to ensure
276 // that the null marker and all potential oops are reset to "zeros".
277 // However, the null_reset_value is allocated during class initialization.
278 // If the current value of the field is null, it is possible that the class
279 // of the field has not been initialized yet and thus the null_reset_value
280 // might not be available yet.
281 // Writing null over an already null value should not trigger class initialization.
282 // The solution is to detect null being written over null cases and return immediately
283 // (writing null over null is a no-op from a field modification point of view)
284 if (is_payload_marked_as_null((address)dst)) return;
285 src_addr = payload_addr(null_reset_value());
286 } else {
287 src_addr = payload_addr(src);
288 if (LayoutKindHelper::is_nullable_flat(lk)) {
289 mark_payload_as_non_null((address)src_addr);
290 }
291 }
292 copy_payload_to_addr(src_addr, dst, lk, true /* dest_is_initialized */);
293 }
294
295 // Arrays of...
296
297 bool InlineKlass::maybe_flat_in_array() {
298 if (!UseArrayFlattening) {
299 return false;
300 }
301 // Too many embedded oops
302 if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
303 return false;
304 }
305 // No flat layout?
306 if (!has_nullable_atomic_layout() && !has_atomic_layout() && !has_non_atomic_layout()) {
307 return false;
308 }
309 return true;
310 }
311
312 bool InlineKlass::is_always_flat_in_array() {
313 if (!UseArrayFlattening) {
314 return false;
315 }
316 // Too many embedded oops
317 if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
318 return false;
319 }
320
321 // An instance is always flat in an array if we have all layouts. Note that this could change in the future when the
322 // flattening policies are updated or if new APIs are added that allow the creation of reference arrays directly.
323 return has_nullable_atomic_layout() && has_atomic_layout() && has_non_atomic_layout();
324 }
325
326 // Inline type arguments are not passed by reference, instead each
327 // field of the inline type is passed as an argument. This helper
328 // function collects the flat field (recursively)
329 // in a list. Included with the field's type is
330 // the offset of each field in the inline type: i2c and c2i adapters
331 // need that to load or store fields. Finally, the list of fields is
332 // sorted in order of increasing offsets: the adapters and the
333 // compiled code need to agree upon the order of fields.
334 //
335 // The list of basic types that is returned starts with a T_METADATA
336 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
337 // delimiters. Every entry between the two is a field of the inline
338 // type. If there's an embedded inline type in the list, it also starts
339 // with a T_METADATA and ends with a T_VOID. This is so we can
340 // generate a unique fingerprint for the method's adapters and we can
341 // generate the list of basic types from the interpreter point of view
342 // (inline types passed as reference: iterate on the list until a
343 // T_METADATA, drop everything until and including the closing
344 // T_VOID) or the compiler point of view (each field of the inline
345 // types is an argument: drop all T_METADATA/T_VOID from the list).
346 //
347 // Value classes could also have fields in abstract super value classes.
348 // Use a HierarchicalFieldStream to get them as well.
349 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off, int null_marker_offset) {
350 int count = 0;
351 SigEntry::add_entry(sig, T_METADATA, name(), base_off);
352 for (TopDownHierarchicalNonStaticFieldStreamBase fs(this); !fs.done(); fs.next()) {
353 assert(!fs.access_flags().is_static(), "TopDownHierarchicalNonStaticFieldStreamBase should not let static fields pass.");
354 int offset = base_off + fs.offset() - (base_off > 0 ? payload_offset() : 0);
355 InstanceKlass* field_holder = fs.field_descriptor().field_holder();
356 // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
357 if (fs.is_flat()) {
358 // Resolve klass of flat field and recursively collect fields
359 int field_null_marker_offset = -1;
360 if (!fs.is_null_free_inline_type()) {
361 field_null_marker_offset = base_off + fs.null_marker_offset() - (base_off > 0 ? payload_offset() : 0);
362 }
363 Klass* vk = field_holder->get_inline_type_field_klass(fs.index());
364 count += InlineKlass::cast(vk)->collect_fields(sig, offset, field_null_marker_offset);
365 } else {
366 BasicType bt = Signature::basic_type(fs.signature());
367 SigEntry::add_entry(sig, bt, fs.name(), offset);
368 count += type2size[bt];
369 }
370 }
371 int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? payload_offset() : 0);
372 // Null markers are no real fields, add them manually at the end (C2 relies on this) of the flat fields
373 if (null_marker_offset != -1) {
374 SigEntry::add_null_marker(sig, name(), null_marker_offset);
375 count++;
376 }
377 SigEntry::add_entry(sig, T_VOID, name(), offset);
378 assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
379 return count;
380 }
381
382 void InlineKlass::initialize_calling_convention(TRAPS) {
383 // Because the pack and unpack handler addresses need to be loadable from generated code,
384 // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
385 // not have a vtable, the vtable offset is used to store these addresses.
386 if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
387 ResourceMark rm;
388 GrowableArray<SigEntry> sig_vk;
389 int nb_fields = collect_fields(&sig_vk);
390 if (*PrintInlineKlassFields != '\0') {
391 const char* class_name_str = _name->as_C_string();
392 if (StringUtils::class_list_match(PrintInlineKlassFields, class_name_str)) {
393 ttyLocker ttyl;
394 tty->print_cr("Fields of InlineKlass: %s", class_name_str);
395 for (const SigEntry& entry : sig_vk) {
396 tty->print(" %s: %s+%d", entry._name->as_C_string(), type2name(entry._bt), entry._offset);
397 if (entry._null_marker) {
398 tty->print(" (null marker)");
399 }
400 tty->print_cr("");
401 }
402 }
403 }
404 Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
405 set_extended_sig(extended_sig);
406 for (int i = 0; i < sig_vk.length(); i++) {
407 extended_sig->at_put(i, sig_vk.at(i));
408 }
409 if (can_be_returned_as_fields(/* init= */ true)) {
410 nb_fields++;
411 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
412 sig_bt[0] = T_METADATA;
413 SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
414 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
415 int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
416
417 if (total > 0) {
418 Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
419 set_return_regs(return_regs);
420 for (int i = 0; i < nb_fields; i++) {
421 return_regs->at_put(i, regs[i]);
422 }
423
424 BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
425 if (buffered_blob == nullptr) {
426 THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Out of space in CodeCache for adapters");
427 }
428 set_pack_handler(buffered_blob->pack_fields());
429 set_pack_handler_jobject(buffered_blob->pack_fields_jobject());
430 set_unpack_handler(buffered_blob->unpack_fields());
431 assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
432 assert(can_be_returned_as_fields(), "sanity");
433 }
434 }
435 if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
436 MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
437 assert(return_regs() == nullptr, "sanity");
438 }
439 }
440 }
441
442 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
443 if (extended_sig() != nullptr) {
444 MetadataFactory::free_array<SigEntry>(loader_data, members()._extended_sig);
445 set_extended_sig(nullptr);
446 }
447 if (return_regs() != nullptr) {
448 MetadataFactory::free_array<VMRegPair>(loader_data, members()._return_regs);
449 set_return_regs(nullptr);
450 }
451 cleanup_blobs();
452 InstanceKlass::deallocate_contents(loader_data);
453 }
454
455 void InlineKlass::cleanup(InlineKlass* ik) {
456 ik->cleanup_blobs();
457 }
458
459 void InlineKlass::cleanup_blobs() {
460 if (pack_handler() != nullptr) {
461 CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
462 assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
463 BufferBlob::free((BufferBlob*)buffered_blob);
464 set_pack_handler(nullptr);
465 set_pack_handler_jobject(nullptr);
466 set_unpack_handler(nullptr);
467 }
468 }
469
470 // Can this inline type be passed as multiple values?
471 bool InlineKlass::can_be_passed_as_fields() const {
472 return InlineTypePassFieldsAsArgs;
473 }
474
475 // Can this inline type be returned as multiple values?
476 bool InlineKlass::can_be_returned_as_fields(bool init) const {
477 return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
478 }
479
480 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
481 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
482 Thread* thread = Thread::current();
483 const Array<SigEntry>* sig_vk = extended_sig();
484 const Array<VMRegPair>* regs = return_regs();
485 int j = 1;
486
487 for (int i = 0; i < sig_vk->length(); i++) {
488 BasicType bt = sig_vk->at(i)._bt;
489 if (bt == T_OBJECT || bt == T_ARRAY) {
490 VMRegPair pair = regs->at(j);
491 address loc = reg_map.location(pair.first(), nullptr);
492 oop o = *(oop*)loc;
493 assert(oopDesc::is_oop_or_null(o), "Bad oop value: " PTR_FORMAT, p2i(o));
494 handles.push(Handle(thread, o));
495 }
496 if (bt == T_METADATA) {
497 continue;
498 }
499 if (bt == T_VOID &&
500 sig_vk->at(i-1)._bt != T_LONG &&
501 sig_vk->at(i-1)._bt != T_DOUBLE) {
502 continue;
503 }
504 j++;
505 }
506 assert(j == regs->length(), "missed a field?");
507 }
508
509 // Update oop fields in registers from handles after a safepoint
510 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
511 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
512 const Array<SigEntry>* sig_vk = extended_sig();
513 const Array<VMRegPair>* regs = return_regs();
514 assert(regs != nullptr, "inconsistent");
515
516 int j = 1;
517 int k = 0;
518 for (int i = 0; i < sig_vk->length(); i++) {
519 BasicType bt = sig_vk->at(i)._bt;
520 if (bt == T_OBJECT || bt == T_ARRAY) {
521 VMRegPair pair = regs->at(j);
522 address loc = reg_map.location(pair.first(), nullptr);
523 *(oop*)loc = handles.at(k++)();
524 }
525 if (bt == T_METADATA) {
526 continue;
527 }
528 if (bt == T_VOID &&
529 sig_vk->at(i-1)._bt != T_LONG &&
530 sig_vk->at(i-1)._bt != T_DOUBLE) {
531 continue;
532 }
533 j++;
534 }
535 assert(k == handles.length(), "missed a handle?");
536 assert(j == regs->length(), "missed a field?");
537 }
538
539 // Fields are in registers. Create an instance of the inline type and
540 // initialize it with the values of the fields.
541 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
542 oop new_vt = allocate_instance(CHECK_NULL);
543 const Array<SigEntry>* sig_vk = extended_sig();
544 const Array<VMRegPair>* regs = return_regs();
545
546 int j = 1;
547 int k = 0;
548 for (int i = 0; i < sig_vk->length(); i++) {
549 BasicType bt = sig_vk->at(i)._bt;
550 if (bt == T_METADATA) {
551 continue;
552 }
553 if (bt == T_VOID) {
554 if (sig_vk->at(i-1)._bt == T_LONG ||
555 sig_vk->at(i-1)._bt == T_DOUBLE) {
556 j++;
557 }
558 continue;
559 }
560 int off = sig_vk->at(i)._offset;
561 assert(off > 0, "offset in object should be positive");
562 VMRegPair pair = regs->at(j);
563 address loc = reg_map.location(pair.first(), nullptr);
564 switch(bt) {
565 case T_BOOLEAN: {
566 new_vt->bool_field_put(off, *(jboolean*)loc);
567 break;
568 }
569 case T_CHAR: {
570 new_vt->char_field_put(off, *(jchar*)loc);
571 break;
572 }
573 case T_BYTE: {
574 new_vt->byte_field_put(off, *(jbyte*)loc);
575 break;
576 }
577 case T_SHORT: {
578 new_vt->short_field_put(off, *(jshort*)loc);
579 break;
580 }
581 case T_INT: {
582 new_vt->int_field_put(off, *(jint*)loc);
583 break;
584 }
585 case T_LONG: {
586 #ifdef _LP64
587 new_vt->double_field_put(off, *(jdouble*)loc);
588 #else
589 Unimplemented();
590 #endif
591 break;
592 }
593 case T_OBJECT:
594 case T_ARRAY: {
595 Handle handle = handles.at(k++);
596 new_vt->obj_field_put(off, handle());
597 break;
598 }
599 case T_FLOAT: {
600 new_vt->float_field_put(off, *(jfloat*)loc);
601 break;
602 }
603 case T_DOUBLE: {
604 new_vt->double_field_put(off, *(jdouble*)loc);
605 break;
606 }
607 default:
608 ShouldNotReachHere();
609 }
610 *(intptr_t*)loc = 0xDEAD;
611 j++;
612 }
613 assert(j == regs->length(), "missed a field?");
614 assert(k == handles.length(), "missed an oop?");
615 return new_vt;
616 }
617
618 // Check if we return an inline type in scalarized form, i.e. check if either
619 // - The return value is a tagged InlineKlass pointer, or
620 // - The return value is an inline type oop that is also returned in scalarized form
621 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map, bool* return_oop, Method* method) {
622 BasicType bt = T_METADATA;
623 VMRegPair pair;
624 int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
625 assert(nb == 1, "broken");
626
627 address loc = map.location(pair.first(), nullptr);
628 intptr_t ptr = *(intptr_t*)loc;
629 if (is_set_nth_bit(ptr, 0)) {
630 // Return value is tagged, must be an InlineKlass pointer
631 clear_nth_bit(ptr, 0);
632 assert(Metaspace::contains((void*)ptr), "should be klass");
633 InlineKlass* vk = (InlineKlass*)ptr;
634 assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
635 if (return_oop != nullptr) {
636 // Not returning an oop
637 *return_oop = false;
638 }
639 return vk;
640 }
641 // Return value is not tagged, must be a valid oop
642 oop o = cast_to_oop(ptr);
643 assert(oopDesc::is_oop_or_null(o), "Bad oop return: " PTR_FORMAT, ptr);
644 if (return_oop != nullptr && o != nullptr && o->is_inline_type()) {
645 // Check if inline type is also returned in scalarized form
646 InlineKlass* vk_val = InlineKlass::cast(o->klass());
647 InlineKlass* vk_sig = method->returns_inline_type();
648 if (vk_val->can_be_returned_as_fields() && vk_sig != nullptr) {
649 assert(vk_val == vk_sig, "Unexpected return value");
650 return vk_val;
651 }
652 }
653 return nullptr;
654 }
655
656 // CDS support
657 #if INCLUDE_CDS
658
659 void InlineKlass::remove_unshareable_info() {
660 InstanceKlass::remove_unshareable_info();
661
662 // update it to point to the "buffered" copy of this class.
663 _adr_inline_klass_members = calculate_members_address();
664 ArchivePtrMarker::mark_pointer(&_adr_inline_klass_members);
665
666 set_extended_sig(nullptr);
667 set_return_regs(nullptr);
668 set_pack_handler(nullptr);
669 set_pack_handler_jobject(nullptr);
670 set_unpack_handler(nullptr);
671
672 assert(pack_handler() == nullptr, "pack handler not null");
673 }
674
675 #endif // CDS
676
677 // Verification
678
679 void InlineKlass::verify_on(outputStream* st) {
680 InstanceKlass::verify_on(st);
681 guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
682 }
683
684 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
685 InstanceKlass::oop_verify_on(obj, st);
686 guarantee(obj->mark().is_inline_type(), "Header is not inline type");
687 }