1 /*
2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/archiveUtils.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/shared/barrierSet.hpp"
30 #include "gc/shared/collectedHeap.inline.hpp"
31 #include "gc/shared/gcLocker.inline.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "logging/log.hpp"
34 #include "memory/metadataFactory.hpp"
35 #include "memory/metaspaceClosure.hpp"
36 #include "oops/access.hpp"
37 #include "oops/arrayKlass.hpp"
38 #include "oops/compressedOops.inline.hpp"
39 #include "oops/fieldStreams.inline.hpp"
40 #include "oops/flatArrayKlass.hpp"
41 #include "oops/inlineKlass.inline.hpp"
42 #include "oops/instanceKlass.inline.hpp"
43 #include "oops/method.hpp"
44 #include "oops/objArrayKlass.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/refArrayKlass.hpp"
47 #include "runtime/fieldDescriptor.inline.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/safepointVerifiers.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/signature.hpp"
52 #include "runtime/thread.inline.hpp"
53 #include "utilities/copy.hpp"
54 #include "utilities/stringUtils.hpp"
55
56 // Constructor
57 InlineKlass::InlineKlass(const ClassFileParser& parser)
58 : InstanceKlass(parser, InlineKlass::Kind, markWord::inline_type_prototype()) {
59 assert(is_inline_klass(), "sanity");
60 assert(prototype_header().is_inline_type(), "sanity");
61 }
62
63 InlineKlass::InlineKlass() {
64 assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
65 }
66
67 void InlineKlass::init_fixed_block() {
68 _adr_inlineklass_fixed_block = inlineklass_static_block();
69 // Addresses used for inline type calling convention
70 *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
71 *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
72 *((address*)adr_pack_handler()) = nullptr;
73 *((address*)adr_pack_handler_jobject()) = nullptr;
74 *((address*)adr_unpack_handler()) = nullptr;
75 assert(pack_handler() == nullptr, "pack handler not null");
76 set_null_reset_value_offset(0);
77 set_payload_offset(-1);
78 set_payload_size_in_bytes(-1);
79 set_payload_alignment(-1);
80 set_non_atomic_size_in_bytes(-1);
81 set_non_atomic_alignment(-1);
82 set_atomic_size_in_bytes(-1);
83 set_nullable_size_in_bytes(-1);
84 set_null_marker_offset(-1);
85 }
86
87 void InlineKlass::set_null_reset_value(oop val) {
88 assert(val != nullptr, "Sanity check");
89 assert(oopDesc::is_oop(val), "Sanity check");
90 assert(val->is_inline_type(), "Sanity check");
91 assert(val->klass() == this, "sanity check");
92 java_mirror()->obj_field_put(null_reset_value_offset(), val);
93 }
94
95 instanceOop InlineKlass::allocate_instance(TRAPS) {
96 int size = size_helper(); // Query before forming handle.
97
98 instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
99 assert(oop->mark().is_inline_type(), "Expected inline type");
100 return oop;
101 }
102
103 instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
104 int size = size_helper(); // Query before forming handle.
105
106 instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
107 assert(oop->mark().is_inline_type(), "Expected inline type");
108 return oop;
109 }
110
111 int InlineKlass::nonstatic_oop_count() {
112 int oops = 0;
113 int map_count = nonstatic_oop_map_count();
114 OopMapBlock* block = start_of_nonstatic_oop_maps();
115 OopMapBlock* end = block + map_count;
116 while (block != end) {
117 oops += block->count();
118 block++;
119 }
120 return oops;
121 }
122
123 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
124 switch(kind) {
125 case LayoutKind::NON_ATOMIC_FLAT:
126 assert(has_non_atomic_layout(), "Layout not available");
127 return non_atomic_size_in_bytes();
128 break;
129 case LayoutKind::ATOMIC_FLAT:
130 assert(has_atomic_layout(), "Layout not available");
131 return atomic_size_in_bytes();
132 break;
133 case LayoutKind::NULLABLE_ATOMIC_FLAT:
134 assert(has_nullable_atomic_layout(), "Layout not available");
135 return nullable_atomic_size_in_bytes();
136 break;
137 case LayoutKind::BUFFERED:
138 return payload_size_in_bytes();
139 break;
140 default:
141 ShouldNotReachHere();
142 }
143 }
144
145 int InlineKlass::layout_alignment(LayoutKind kind) const {
146 switch(kind) {
147 case LayoutKind::NON_ATOMIC_FLAT:
148 assert(has_non_atomic_layout(), "Layout not available");
149 return non_atomic_alignment();
150 break;
151 case LayoutKind::ATOMIC_FLAT:
152 assert(has_atomic_layout(), "Layout not available");
153 return atomic_size_in_bytes();
154 break;
155 case LayoutKind::NULLABLE_ATOMIC_FLAT:
156 assert(has_nullable_atomic_layout(), "Layout not available");
157 return nullable_atomic_size_in_bytes();
158 break;
159 case LayoutKind::BUFFERED:
160 return payload_alignment();
161 break;
162 default:
163 ShouldNotReachHere();
164 }
165 }
166
167 bool InlineKlass::is_layout_supported(LayoutKind lk) {
168 switch(lk) {
169 case LayoutKind::NON_ATOMIC_FLAT:
170 return has_non_atomic_layout();
171 break;
172 case LayoutKind::ATOMIC_FLAT:
173 return has_atomic_layout();
174 break;
175 case LayoutKind::NULLABLE_ATOMIC_FLAT:
176 return has_nullable_atomic_layout();
177 break;
178 case LayoutKind::BUFFERED:
179 return true;
180 break;
181 default:
182 ShouldNotReachHere();
183 }
184 }
185
186 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
187 assert(is_layout_supported(lk), "Unsupported layout");
188 assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
189 switch(lk) {
190 case LayoutKind::NULLABLE_ATOMIC_FLAT: {
191 if (is_payload_marked_as_null((address)src)) {
192 if (!contains_oops()) {
193 mark_payload_as_null((address)dst);
194 return;
195 }
196 // copy null_reset value to dest
197 if (dest_is_initialized) {
198 HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
199 } else {
200 HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
201 }
202 } else {
203 // Copy has to be performed, even if this is an empty value, because of the null marker
204 mark_payload_as_non_null((address)src);
205 if (dest_is_initialized) {
206 HeapAccess<>::value_copy(src, dst, this, lk);
207 } else {
208 HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
209 }
210 }
211 }
212 break;
213 case LayoutKind::BUFFERED:
214 case LayoutKind::ATOMIC_FLAT:
215 case LayoutKind::NON_ATOMIC_FLAT: {
216 if (is_empty_inline_type()) return; // nothing to do
217 if (dest_is_initialized) {
218 HeapAccess<>::value_copy(src, dst, this, lk);
219 } else {
220 HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
221 }
222 }
223 break;
224 default:
225 ShouldNotReachHere();
226 }
227 }
228
229 oop InlineKlass::read_payload_from_addr(const oop src, int offset, LayoutKind lk, TRAPS) {
230 assert(src != nullptr, "Must be");
231 assert(is_layout_supported(lk), "Unsupported layout");
232 switch(lk) {
233 case LayoutKind::NULLABLE_ATOMIC_FLAT: {
234 if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) {
235 return nullptr;
236 }
237 } // Fallthrough
238 case LayoutKind::BUFFERED:
239 case LayoutKind::ATOMIC_FLAT:
240 case LayoutKind::NON_ATOMIC_FLAT: {
241 Handle obj_h(THREAD, src);
242 oop res = allocate_instance_buffer(CHECK_NULL);
243 copy_payload_to_addr((void*)(cast_from_oop<char*>(obj_h()) + offset), payload_addr(res), lk, false);
244 if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
245 if(is_payload_marked_as_null(payload_addr(res))) {
246 return nullptr;
247 }
248 }
249 return res;
250 }
251 break;
252 default:
253 ShouldNotReachHere();
254 }
255 }
256
257 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) {
258 void* src_addr = nullptr;
259 if (src == nullptr) {
260 if (lk != LayoutKind::NULLABLE_ATOMIC_FLAT) {
261 THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
262 }
263 // Writing null to a nullable flat field/element is usually done by writing
264 // the whole pre-allocated null_reset_value at the payload address to ensure
265 // that the null marker and all potential oops are reset to "zeros".
266 // However, the null_reset_value is allocated during class initialization.
267 // If the current value of the field is null, it is possible that the class
268 // of the field has not been initialized yet and thus the null_reset_value
269 // might not be available yet.
270 // Writing null over an already null value should not trigger class initialization.
271 // The solution is to detect null being written over null cases and return immediately
272 // (writing null over null is a no-op from a field modification point of view)
273 if (is_payload_marked_as_null((address)dst)) return;
274 src_addr = payload_addr(null_reset_value());
275 } else {
276 src_addr = payload_addr(src);
277 if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
278 mark_payload_as_non_null((address)src_addr);
279 }
280 }
281 copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized);
282 }
283
284 // Arrays of...
285
286 bool InlineKlass::maybe_flat_in_array() {
287 if (!UseArrayFlattening) {
288 return false;
289 }
290 // Too many embedded oops
291 if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
292 return false;
293 }
294 // No flat layout?
295 if (!has_nullable_atomic_layout() && !has_atomic_layout() && !has_non_atomic_layout()) {
296 return false;
297 }
298 return true;
299 }
300
301 // Inline type arguments are not passed by reference, instead each
302 // field of the inline type is passed as an argument. This helper
303 // function collects the flat field (recursively)
304 // in a list. Included with the field's type is
305 // the offset of each field in the inline type: i2c and c2i adapters
306 // need that to load or store fields. Finally, the list of fields is
307 // sorted in order of increasing offsets: the adapters and the
308 // compiled code need to agree upon the order of fields.
309 //
310 // The list of basic types that is returned starts with a T_METADATA
311 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
312 // delimiters. Every entry between the two is a field of the inline
313 // type. If there's an embedded inline type in the list, it also starts
314 // with a T_METADATA and ends with a T_VOID. This is so we can
315 // generate a unique fingerprint for the method's adapters and we can
316 // generate the list of basic types from the interpreter point of view
317 // (inline types passed as reference: iterate on the list until a
318 // T_METADATA, drop everything until and including the closing
319 // T_VOID) or the compiler point of view (each field of the inline
320 // types is an argument: drop all T_METADATA/T_VOID from the list).
321 //
322 // Value classes could also have fields in abstract super value classes.
323 // Use a HierarchicalFieldStream to get them as well.
324 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off, int null_marker_offset) {
325 int count = 0;
326 SigEntry::add_entry(sig, T_METADATA, name(), base_off);
327 for (TopDownHierarchicalNonStaticFieldStreamBase fs(this); !fs.done(); fs.next()) {
328 assert(!fs.access_flags().is_static(), "TopDownHierarchicalNonStaticFieldStreamBase should not let static fields pass.");
329 int offset = base_off + fs.offset() - (base_off > 0 ? payload_offset() : 0);
330 InstanceKlass* field_holder = fs.field_descriptor().field_holder();
331 // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
332 if (fs.is_flat()) {
333 // Resolve klass of flat field and recursively collect fields
334 int field_null_marker_offset = -1;
335 if (!fs.is_null_free_inline_type()) {
336 field_null_marker_offset = base_off + fs.null_marker_offset() - (base_off > 0 ? payload_offset() : 0);
337 }
338 Klass* vk = field_holder->get_inline_type_field_klass(fs.index());
339 count += InlineKlass::cast(vk)->collect_fields(sig, offset, field_null_marker_offset);
340 } else {
341 BasicType bt = Signature::basic_type(fs.signature());
342 SigEntry::add_entry(sig, bt, fs.name(), offset);
343 count += type2size[bt];
344 }
345 }
346 int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? payload_offset() : 0);
347 // Null markers are no real fields, add them manually at the end (C2 relies on this) of the flat fields
348 if (null_marker_offset != -1) {
349 SigEntry::add_null_marker(sig, name(), null_marker_offset);
350 count++;
351 }
352 SigEntry::add_entry(sig, T_VOID, name(), offset);
353 assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
354 return count;
355 }
356
357 void InlineKlass::initialize_calling_convention(TRAPS) {
358 // Because the pack and unpack handler addresses need to be loadable from generated code,
359 // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
360 // not have a vtable, the vtable offset is used to store these addresses.
361 if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
362 ResourceMark rm;
363 GrowableArray<SigEntry> sig_vk;
364 int nb_fields = collect_fields(&sig_vk);
365 if (*PrintInlineKlassFields != '\0') {
366 const char* class_name_str = _name->as_C_string();
367 if (StringUtils::class_list_match(PrintInlineKlassFields, class_name_str)) {
368 ttyLocker ttyl;
369 tty->print_cr("Fields of InlineKlass: %s", class_name_str);
370 for (const SigEntry& entry : sig_vk) {
371 tty->print(" %s: %s+%d", entry._name->as_C_string(), type2name(entry._bt), entry._offset);
372 if (entry._null_marker) {
373 tty->print(" (null marker)");
374 }
375 tty->print_cr("");
376 }
377 }
378 }
379 Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
380 *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
381 for (int i = 0; i < sig_vk.length(); i++) {
382 extended_sig->at_put(i, sig_vk.at(i));
383 }
384 if (can_be_returned_as_fields(/* init= */ true)) {
385 nb_fields++;
386 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
387 sig_bt[0] = T_METADATA;
388 SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
389 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
390 int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
391
392 if (total > 0) {
393 Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
394 *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
395 for (int i = 0; i < nb_fields; i++) {
396 return_regs->at_put(i, regs[i]);
397 }
398
399 BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
400 if (buffered_blob == nullptr) {
401 THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Out of space in CodeCache for adapters");
402 }
403 *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
404 *((address*)adr_pack_handler_jobject()) = buffered_blob->pack_fields_jobject();
405 *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
406 assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
407 assert(can_be_returned_as_fields(), "sanity");
408 }
409 }
410 if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
411 MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
412 assert(return_regs() == nullptr, "sanity");
413 }
414 }
415 }
416
417 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
418 if (extended_sig() != nullptr) {
419 MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
420 *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
421 }
422 if (return_regs() != nullptr) {
423 MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
424 *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
425 }
426 cleanup_blobs();
427 InstanceKlass::deallocate_contents(loader_data);
428 }
429
430 void InlineKlass::cleanup(InlineKlass* ik) {
431 ik->cleanup_blobs();
432 }
433
434 void InlineKlass::cleanup_blobs() {
435 if (pack_handler() != nullptr) {
436 CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
437 assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
438 BufferBlob::free((BufferBlob*)buffered_blob);
439 *((address*)adr_pack_handler()) = nullptr;
440 *((address*)adr_pack_handler_jobject()) = nullptr;
441 *((address*)adr_unpack_handler()) = nullptr;
442 }
443 }
444
445 // Can this inline type be passed as multiple values?
446 bool InlineKlass::can_be_passed_as_fields() const {
447 return InlineTypePassFieldsAsArgs;
448 }
449
450 // Can this inline type be returned as multiple values?
451 bool InlineKlass::can_be_returned_as_fields(bool init) const {
452 return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
453 }
454
455 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
456 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
457 Thread* thread = Thread::current();
458 const Array<SigEntry>* sig_vk = extended_sig();
459 const Array<VMRegPair>* regs = return_regs();
460 int j = 1;
461
462 for (int i = 0; i < sig_vk->length(); i++) {
463 BasicType bt = sig_vk->at(i)._bt;
464 if (bt == T_OBJECT || bt == T_ARRAY) {
465 VMRegPair pair = regs->at(j);
466 address loc = reg_map.location(pair.first(), nullptr);
467 oop o = *(oop*)loc;
468 assert(oopDesc::is_oop_or_null(o), "Bad oop value: " PTR_FORMAT, p2i(o));
469 handles.push(Handle(thread, o));
470 }
471 if (bt == T_METADATA) {
472 continue;
473 }
474 if (bt == T_VOID &&
475 sig_vk->at(i-1)._bt != T_LONG &&
476 sig_vk->at(i-1)._bt != T_DOUBLE) {
477 continue;
478 }
479 j++;
480 }
481 assert(j == regs->length(), "missed a field?");
482 }
483
484 // Update oop fields in registers from handles after a safepoint
485 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
486 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
487 const Array<SigEntry>* sig_vk = extended_sig();
488 const Array<VMRegPair>* regs = return_regs();
489 assert(regs != nullptr, "inconsistent");
490
491 int j = 1;
492 int k = 0;
493 for (int i = 0; i < sig_vk->length(); i++) {
494 BasicType bt = sig_vk->at(i)._bt;
495 if (bt == T_OBJECT || bt == T_ARRAY) {
496 VMRegPair pair = regs->at(j);
497 address loc = reg_map.location(pair.first(), nullptr);
498 *(oop*)loc = handles.at(k++)();
499 }
500 if (bt == T_METADATA) {
501 continue;
502 }
503 if (bt == T_VOID &&
504 sig_vk->at(i-1)._bt != T_LONG &&
505 sig_vk->at(i-1)._bt != T_DOUBLE) {
506 continue;
507 }
508 j++;
509 }
510 assert(k == handles.length(), "missed a handle?");
511 assert(j == regs->length(), "missed a field?");
512 }
513
514 // Fields are in registers. Create an instance of the inline type and
515 // initialize it with the values of the fields.
516 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
517 oop new_vt = allocate_instance(CHECK_NULL);
518 const Array<SigEntry>* sig_vk = extended_sig();
519 const Array<VMRegPair>* regs = return_regs();
520
521 int j = 1;
522 int k = 0;
523 for (int i = 0; i < sig_vk->length(); i++) {
524 BasicType bt = sig_vk->at(i)._bt;
525 if (bt == T_METADATA) {
526 continue;
527 }
528 if (bt == T_VOID) {
529 if (sig_vk->at(i-1)._bt == T_LONG ||
530 sig_vk->at(i-1)._bt == T_DOUBLE) {
531 j++;
532 }
533 continue;
534 }
535 int off = sig_vk->at(i)._offset;
536 assert(off > 0, "offset in object should be positive");
537 VMRegPair pair = regs->at(j);
538 address loc = reg_map.location(pair.first(), nullptr);
539 switch(bt) {
540 case T_BOOLEAN: {
541 new_vt->bool_field_put(off, *(jboolean*)loc);
542 break;
543 }
544 case T_CHAR: {
545 new_vt->char_field_put(off, *(jchar*)loc);
546 break;
547 }
548 case T_BYTE: {
549 new_vt->byte_field_put(off, *(jbyte*)loc);
550 break;
551 }
552 case T_SHORT: {
553 new_vt->short_field_put(off, *(jshort*)loc);
554 break;
555 }
556 case T_INT: {
557 new_vt->int_field_put(off, *(jint*)loc);
558 break;
559 }
560 case T_LONG: {
561 #ifdef _LP64
562 new_vt->double_field_put(off, *(jdouble*)loc);
563 #else
564 Unimplemented();
565 #endif
566 break;
567 }
568 case T_OBJECT:
569 case T_ARRAY: {
570 Handle handle = handles.at(k++);
571 new_vt->obj_field_put(off, handle());
572 break;
573 }
574 case T_FLOAT: {
575 new_vt->float_field_put(off, *(jfloat*)loc);
576 break;
577 }
578 case T_DOUBLE: {
579 new_vt->double_field_put(off, *(jdouble*)loc);
580 break;
581 }
582 default:
583 ShouldNotReachHere();
584 }
585 *(intptr_t*)loc = 0xDEAD;
586 j++;
587 }
588 assert(j == regs->length(), "missed a field?");
589 assert(k == handles.length(), "missed an oop?");
590 return new_vt;
591 }
592
593 // Check if we return an inline type in scalarized form, i.e. check if either
594 // - The return value is a tagged InlineKlass pointer, or
595 // - The return value is an inline type oop that is also returned in scalarized form
596 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map, bool* return_oop, Method* method) {
597 BasicType bt = T_METADATA;
598 VMRegPair pair;
599 int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
600 assert(nb == 1, "broken");
601
602 address loc = map.location(pair.first(), nullptr);
603 intptr_t ptr = *(intptr_t*)loc;
604 if (is_set_nth_bit(ptr, 0)) {
605 // Return value is tagged, must be an InlineKlass pointer
606 clear_nth_bit(ptr, 0);
607 assert(Metaspace::contains((void*)ptr), "should be klass");
608 InlineKlass* vk = (InlineKlass*)ptr;
609 assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
610 if (return_oop != nullptr) {
611 // Not returning an oop
612 *return_oop = false;
613 }
614 return vk;
615 }
616 // Return value is not tagged, must be a valid oop
617 oop o = cast_to_oop(ptr);
618 assert(oopDesc::is_oop_or_null(o), "Bad oop return: " PTR_FORMAT, ptr);
619 if (return_oop != nullptr && o != nullptr && o->is_inline_type()) {
620 // Check if inline type is also returned in scalarized form
621 InlineKlass* vk_val = InlineKlass::cast(o->klass());
622 InlineKlass* vk_sig = method->returns_inline_type();
623 if (vk_val->can_be_returned_as_fields() && vk_sig != nullptr) {
624 assert(vk_val == vk_sig, "Unexpected return value");
625 return vk_val;
626 }
627 }
628 return nullptr;
629 }
630
631 // CDS support
632 #if INCLUDE_CDS
633 void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {
634 InstanceKlass::metaspace_pointers_do(it);
635 }
636
637 void InlineKlass::remove_unshareable_info() {
638 InstanceKlass::remove_unshareable_info();
639
640 // update it to point to the "buffered" copy of this class.
641 _adr_inlineklass_fixed_block = inlineklass_static_block();
642 ArchivePtrMarker::mark_pointer((address*)&_adr_inlineklass_fixed_block);
643
644 *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
645 *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
646 *((address*)adr_pack_handler()) = nullptr;
647 *((address*)adr_pack_handler_jobject()) = nullptr;
648 *((address*)adr_unpack_handler()) = nullptr;
649 assert(pack_handler() == nullptr, "pack handler not null");
650 }
651
652 void InlineKlass::remove_java_mirror() {
653 InstanceKlass::remove_java_mirror();
654 }
655
656 void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) {
657 InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
658 }
659 #endif // CDS
660 // oop verify
661
662 void InlineKlass::verify_on(outputStream* st) {
663 InstanceKlass::verify_on(st);
664 guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
665 }
666
667 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
668 InstanceKlass::oop_verify_on(obj, st);
669 guarantee(obj->mark().is_inline_type(), "Header is not inline type");
670 }