1 /*
  2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/archiveUtils.hpp"
 26 #include "cds/cdsConfig.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcLocker.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/metadataFactory.hpp"
 35 #include "memory/metaspaceClosure.hpp"
 36 #include "oops/access.hpp"
 37 #include "oops/arrayKlass.hpp"
 38 #include "oops/compressedOops.inline.hpp"
 39 #include "oops/fieldStreams.inline.hpp"
 40 #include "oops/flatArrayKlass.hpp"
 41 #include "oops/inlineKlass.inline.hpp"
 42 #include "oops/instanceKlass.inline.hpp"
 43 #include "oops/method.hpp"
 44 #include "oops/objArrayKlass.hpp"
 45 #include "oops/oop.inline.hpp"
 46 #include "oops/refArrayKlass.hpp"
 47 #include "runtime/fieldDescriptor.inline.hpp"
 48 #include "runtime/handles.inline.hpp"
 49 #include "runtime/registerMap.hpp"
 50 #include "runtime/safepointVerifiers.hpp"
 51 #include "runtime/sharedRuntime.hpp"
 52 #include "runtime/signature.hpp"
 53 #include "runtime/thread.inline.hpp"
 54 #include "utilities/copy.hpp"
 55 #include "utilities/stringUtils.hpp"
 56 
 57 InlineKlass::Members::Members()
 58   : _extended_sig(nullptr),
 59     _return_regs(nullptr),
 60     _pack_handler(nullptr),
 61     _pack_handler_jobject(nullptr),
 62     _unpack_handler(nullptr),
 63     _null_reset_value_offset(0),
 64     _payload_offset(-1),
 65     _payload_size_in_bytes(-1),
 66     _payload_alignment(-1),
 67     _non_atomic_size_in_bytes(-1),
 68     _non_atomic_alignment(-1),
 69     _atomic_size_in_bytes(-1),
 70     _nullable_size_in_bytes(-1),
 71     _null_marker_offset(-1) {
 72 }
 73 
 74 InlineKlass::InlineKlass() {
 75   assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
 76 }
 77 
 78 // Constructor
 79 InlineKlass::InlineKlass(const ClassFileParser& parser)
 80     : InstanceKlass(parser, InlineKlass::Kind, markWord::inline_type_prototype()) {
 81   assert(is_inline_klass(), "sanity");
 82   assert(prototype_header().is_inline_type(), "sanity");
 83 
 84   // Set up the offset to the members of this klass
 85   _adr_inline_klass_members = calculate_members_address();
 86 
 87   // Placement install the members
 88   new (_adr_inline_klass_members) Members();
 89 
 90   // Sanity check construction of the members
 91   assert(pack_handler() == nullptr, "pack handler not null");
 92 }
 93 
 94 address InlineKlass::calculate_members_address() const {
 95   // The members are placed after all other contents inherited from the InstanceKlass
 96   return end_of_instance_klass();
 97 }
 98 
 99 oop InlineKlass::null_reset_value() {
100   assert(is_initialized() || is_being_initialized() || is_in_error_state(), "null reset value is set at the beginning of initialization");
101   oop val = java_mirror()->obj_field_acquire(null_reset_value_offset());
102   assert(val != nullptr, "Sanity check");
103   return val;
104 }
105 
106 void InlineKlass::set_null_reset_value(oop val) {
107   assert(val != nullptr, "Sanity check");
108   assert(oopDesc::is_oop(val), "Sanity check");
109   assert(val->is_inline_type(), "Sanity check");
110   assert(val->klass() == this, "sanity check");
111   java_mirror()->obj_field_put(null_reset_value_offset(), val);
112 }
113 
114 instanceOop InlineKlass::allocate_instance(TRAPS) {
115   instanceOop oop = InstanceKlass::allocate_instance(CHECK_NULL);
116   assert(oop->mark().is_inline_type(), "Expected inline type");
117   return oop;
118 }
119 
120 int InlineKlass::nonstatic_oop_count() {
121   int oops = 0;
122   int map_count = nonstatic_oop_map_count();
123   OopMapBlock* block = start_of_nonstatic_oop_maps();
124   OopMapBlock* end = block + map_count;
125   while (block != end) {
126     oops += block->count();
127     block++;
128   }
129   return oops;
130 }
131 
132 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
133   switch(kind) {
134     case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT:
135       assert(has_non_atomic_layout(), "Layout not available");
136       return non_atomic_size_in_bytes();
137       break;
138     case LayoutKind::NULL_FREE_ATOMIC_FLAT:
139       assert(has_atomic_layout(), "Layout not available");
140       return atomic_size_in_bytes();
141       break;
142     case LayoutKind::NULLABLE_ATOMIC_FLAT:
143       assert(has_nullable_atomic_layout(), "Layout not available");
144       return nullable_atomic_size_in_bytes();
145       break;
146     case LayoutKind::BUFFERED:
147       return payload_size_in_bytes();
148       break;
149     default:
150       ShouldNotReachHere();
151   }
152 }
153 
154 int InlineKlass::layout_alignment(LayoutKind kind) const {
155   switch(kind) {
156     case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT:
157       assert(has_non_atomic_layout(), "Layout not available");
158       return non_atomic_alignment();
159       break;
160     case LayoutKind::NULL_FREE_ATOMIC_FLAT:
161       assert(has_atomic_layout(), "Layout not available");
162       return atomic_size_in_bytes();
163       break;
164     case LayoutKind::NULLABLE_ATOMIC_FLAT:
165       assert(has_nullable_atomic_layout(), "Layout not available");
166       return nullable_atomic_size_in_bytes();
167       break;
168     case LayoutKind::BUFFERED:
169       return payload_alignment();
170       break;
171     default:
172       ShouldNotReachHere();
173   }
174 }
175 
176 bool InlineKlass::is_layout_supported(LayoutKind lk) {
177   switch(lk) {
178     case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT:
179       return has_non_atomic_layout();
180       break;
181     case LayoutKind::NULL_FREE_ATOMIC_FLAT:
182       return has_atomic_layout();
183       break;
184     case LayoutKind::NULLABLE_ATOMIC_FLAT:
185       return has_nullable_atomic_layout();
186       break;
187     case LayoutKind::BUFFERED:
188       return true;
189       break;
190     default:
191       ShouldNotReachHere();
192   }
193 }
194 
195 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
196   assert(is_layout_supported(lk), "Unsupported layout");
197   assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
198   switch(lk) {
199     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
200     if (is_payload_marked_as_null((address)src)) {
201         if (!contains_oops()) {
202           mark_payload_as_null((address)dst);
203           return;
204         }
205         // copy null_reset value to dest
206         if (dest_is_initialized) {
207           HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
208         } else {
209           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
210         }
211       } else {
212         // Copy has to be performed, even if this is an empty value, because of the null marker
213         mark_payload_as_non_null((address)src);
214         if (dest_is_initialized) {
215           HeapAccess<>::value_copy(src, dst, this, lk);
216         } else {
217           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
218         }
219       }
220     }
221     break;
222     case LayoutKind::BUFFERED:
223     case LayoutKind::NULL_FREE_ATOMIC_FLAT:
224     case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT: {
225       if (is_empty_inline_type()) return; // nothing to do
226       if (dest_is_initialized) {
227         HeapAccess<>::value_copy(src, dst, this, lk);
228       } else {
229         HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
230       }
231     }
232     break;
233     default:
234       ShouldNotReachHere();
235   }
236 }
237 
238 oop InlineKlass::read_payload_from_addr(const oop src, size_t offset, LayoutKind lk, TRAPS) {
239   assert(src != nullptr, "Must be");
240   assert(is_layout_supported(lk), "Unsupported layout");
241   switch(lk) {
242     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
243       if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) {
244         return nullptr;
245       }
246     } // Fallthrough
247     case LayoutKind::BUFFERED:
248     case LayoutKind::NULL_FREE_ATOMIC_FLAT:
249     case LayoutKind::NULL_FREE_NON_ATOMIC_FLAT: {
250       Handle obj_h(THREAD, src);
251       oop res = allocate_instance(CHECK_NULL);
252       copy_payload_to_addr((void*)(cast_from_oop<char*>(obj_h()) + offset), payload_addr(res), lk, false);
253       if (LayoutKindHelper::is_nullable_flat(lk)) {
254         if(is_payload_marked_as_null(payload_addr(res))) {
255           return nullptr;
256         }
257       }
258       return res;
259     }
260     break;
261     default:
262       ShouldNotReachHere();
263   }
264 }
265 
266 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) {
267   void* src_addr = nullptr;
268   if (src == nullptr) {
269     if (!LayoutKindHelper::is_nullable_flat(lk)) {
270       THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
271     }
272     // Writing null to a nullable flat field/element is usually done by writing
273     // the whole pre-allocated null_reset_value at the payload address to ensure
274     // that the null marker and all potential oops are reset to "zeros".
275     // However, the null_reset_value is allocated during class initialization.
276     // If the current value of the field is null, it is possible that the class
277     // of the field has not been initialized yet and thus the null_reset_value
278     // might not be available yet.
279     // Writing null over an already null value should not trigger class initialization.
280     // The solution is to detect null being written over null cases and return immediately
281     // (writing null over null is a no-op from a field modification point of view)
282     if (is_payload_marked_as_null((address)dst)) return;
283     src_addr = payload_addr(null_reset_value());
284   } else {
285     src_addr = payload_addr(src);
286     if (LayoutKindHelper::is_nullable_flat(lk)) {
287       mark_payload_as_non_null((address)src_addr);
288     }
289   }
290   copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized);
291 }
292 
293 // Arrays of...
294 
295 bool InlineKlass::maybe_flat_in_array() {
296   if (!UseArrayFlattening) {
297     return false;
298   }
299   // Too many embedded oops
300   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
301     return false;
302   }
303   // No flat layout?
304   if (!has_nullable_atomic_layout() && !has_atomic_layout() && !has_non_atomic_layout()) {
305     return false;
306   }
307   return true;
308 }
309 
310 bool InlineKlass::is_always_flat_in_array() {
311   if (!UseArrayFlattening) {
312     return false;
313   }
314   // Too many embedded oops
315   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
316     return false;
317   }
318 
319   // An instance is always flat in an array if we have all layouts. Note that this could change in the future when the
320   // flattening policies are updated or if new APIs are added that allow the creation of reference arrays directly.
321   return has_nullable_atomic_layout() && has_atomic_layout() && has_non_atomic_layout();
322 }
323 
324 // Inline type arguments are not passed by reference, instead each
325 // field of the inline type is passed as an argument. This helper
326 // function collects the flat field (recursively)
327 // in a list. Included with the field's type is
328 // the offset of each field in the inline type: i2c and c2i adapters
329 // need that to load or store fields. Finally, the list of fields is
330 // sorted in order of increasing offsets: the adapters and the
331 // compiled code need to agree upon the order of fields.
332 //
333 // The list of basic types that is returned starts with a T_METADATA
334 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
335 // delimiters. Every entry between the two is a field of the inline
336 // type. If there's an embedded inline type in the list, it also starts
337 // with a T_METADATA and ends with a T_VOID. This is so we can
338 // generate a unique fingerprint for the method's adapters and we can
339 // generate the list of basic types from the interpreter point of view
340 // (inline types passed as reference: iterate on the list until a
341 // T_METADATA, drop everything until and including the closing
342 // T_VOID) or the compiler point of view (each field of the inline
343 // types is an argument: drop all T_METADATA/T_VOID from the list).
344 //
345 // Value classes could also have fields in abstract super value classes.
346 // Use a HierarchicalFieldStream to get them as well.
347 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off, int null_marker_offset) {
348   int count = 0;
349   SigEntry::add_entry(sig, T_METADATA, name(), base_off);
350   for (TopDownHierarchicalNonStaticFieldStreamBase fs(this); !fs.done(); fs.next()) {
351     assert(!fs.access_flags().is_static(), "TopDownHierarchicalNonStaticFieldStreamBase should not let static fields pass.");
352     int offset = base_off + fs.offset() - (base_off > 0 ? payload_offset() : 0);
353     InstanceKlass* field_holder = fs.field_descriptor().field_holder();
354     // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
355     if (fs.is_flat()) {
356       // Resolve klass of flat field and recursively collect fields
357       int field_null_marker_offset = -1;
358       if (!fs.is_null_free_inline_type()) {
359         field_null_marker_offset = base_off + fs.null_marker_offset() - (base_off > 0 ? payload_offset() : 0);
360       }
361       Klass* vk = field_holder->get_inline_type_field_klass(fs.index());
362       count += InlineKlass::cast(vk)->collect_fields(sig, offset, field_null_marker_offset);
363     } else {
364       BasicType bt = Signature::basic_type(fs.signature());
365       SigEntry::add_entry(sig, bt,  fs.name(), offset);
366       count += type2size[bt];
367     }
368   }
369   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? payload_offset() : 0);
370   // Null markers are no real fields, add them manually at the end (C2 relies on this) of the flat fields
371   if (null_marker_offset != -1) {
372     SigEntry::add_null_marker(sig, name(), null_marker_offset);
373     count++;
374   }
375   SigEntry::add_entry(sig, T_VOID, name(), offset);
376   assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
377   return count;
378 }
379 
380 void InlineKlass::initialize_calling_convention(TRAPS) {
381   // Because the pack and unpack handler addresses need to be loadable from generated code,
382   // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
383   // not have a vtable, the vtable offset is used to store these addresses.
384   if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
385     ResourceMark rm;
386     GrowableArray<SigEntry> sig_vk;
387     int nb_fields = collect_fields(&sig_vk);
388     if (*PrintInlineKlassFields != '\0') {
389       const char* class_name_str = _name->as_C_string();
390       if (StringUtils::class_list_match(PrintInlineKlassFields, class_name_str)) {
391         ttyLocker ttyl;
392         tty->print_cr("Fields of InlineKlass: %s", class_name_str);
393         for (const SigEntry& entry : sig_vk) {
394           tty->print("  %s: %s+%d", entry._name->as_C_string(), type2name(entry._bt), entry._offset);
395           if (entry._null_marker) {
396             tty->print(" (null marker)");
397           }
398           tty->print_cr("");
399         }
400       }
401     }
402     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
403     set_extended_sig(extended_sig);
404     for (int i = 0; i < sig_vk.length(); i++) {
405       extended_sig->at_put(i, sig_vk.at(i));
406     }
407     if (can_be_returned_as_fields(/* init= */ true)) {
408       nb_fields++;
409       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
410       sig_bt[0] = T_METADATA;
411       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
412       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
413       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
414 
415       if (total > 0) {
416         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
417         set_return_regs(return_regs);
418         for (int i = 0; i < nb_fields; i++) {
419           return_regs->at_put(i, regs[i]);
420         }
421 
422         BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
423         if (buffered_blob == nullptr) {
424           THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Out of space in CodeCache for adapters");
425         }
426         set_pack_handler(buffered_blob->pack_fields());
427         set_pack_handler_jobject(buffered_blob->pack_fields_jobject());
428         set_unpack_handler(buffered_blob->unpack_fields());
429         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
430         assert(can_be_returned_as_fields(), "sanity");
431       }
432     }
433     if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
434       MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
435       assert(return_regs() == nullptr, "sanity");
436     }
437   }
438 }
439 
440 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
441   if (extended_sig() != nullptr) {
442     MetadataFactory::free_array<SigEntry>(loader_data, members()._extended_sig);
443     set_extended_sig(nullptr);
444   }
445   if (return_regs() != nullptr) {
446     MetadataFactory::free_array<VMRegPair>(loader_data, members()._return_regs);
447     set_return_regs(nullptr);
448   }
449   cleanup_blobs();
450   InstanceKlass::deallocate_contents(loader_data);
451 }
452 
453 void InlineKlass::cleanup(InlineKlass* ik) {
454   ik->cleanup_blobs();
455 }
456 
457 void InlineKlass::cleanup_blobs() {
458   if (pack_handler() != nullptr) {
459     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
460     assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
461     BufferBlob::free((BufferBlob*)buffered_blob);
462     set_pack_handler(nullptr);
463     set_pack_handler_jobject(nullptr);
464     set_unpack_handler(nullptr);
465   }
466 }
467 
468 // Can this inline type be passed as multiple values?
469 bool InlineKlass::can_be_passed_as_fields() const {
470   return InlineTypePassFieldsAsArgs;
471 }
472 
473 // Can this inline type be returned as multiple values?
474 bool InlineKlass::can_be_returned_as_fields(bool init) const {
475   return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
476 }
477 
478 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
479 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
480   Thread* thread = Thread::current();
481   const Array<SigEntry>* sig_vk = extended_sig();
482   const Array<VMRegPair>* regs = return_regs();
483   int j = 1;
484 
485   for (int i = 0; i < sig_vk->length(); i++) {
486     BasicType bt = sig_vk->at(i)._bt;
487     if (bt == T_OBJECT || bt == T_ARRAY) {
488       VMRegPair pair = regs->at(j);
489       address loc = reg_map.location(pair.first(), nullptr);
490       oop o = *(oop*)loc;
491       assert(oopDesc::is_oop_or_null(o), "Bad oop value: " PTR_FORMAT, p2i(o));
492       handles.push(Handle(thread, o));
493     }
494     if (bt == T_METADATA) {
495       continue;
496     }
497     if (bt == T_VOID &&
498         sig_vk->at(i-1)._bt != T_LONG &&
499         sig_vk->at(i-1)._bt != T_DOUBLE) {
500       continue;
501     }
502     j++;
503   }
504   assert(j == regs->length(), "missed a field?");
505 }
506 
507 // Update oop fields in registers from handles after a safepoint
508 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
509   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
510   const Array<SigEntry>* sig_vk = extended_sig();
511   const Array<VMRegPair>* regs = return_regs();
512   assert(regs != nullptr, "inconsistent");
513 
514   int j = 1;
515   int k = 0;
516   for (int i = 0; i < sig_vk->length(); i++) {
517     BasicType bt = sig_vk->at(i)._bt;
518     if (bt == T_OBJECT || bt == T_ARRAY) {
519       VMRegPair pair = regs->at(j);
520       address loc = reg_map.location(pair.first(), nullptr);
521       *(oop*)loc = handles.at(k++)();
522     }
523     if (bt == T_METADATA) {
524       continue;
525     }
526     if (bt == T_VOID &&
527         sig_vk->at(i-1)._bt != T_LONG &&
528         sig_vk->at(i-1)._bt != T_DOUBLE) {
529       continue;
530     }
531     j++;
532   }
533   assert(k == handles.length(), "missed a handle?");
534   assert(j == regs->length(), "missed a field?");
535 }
536 
537 // Fields are in registers. Create an instance of the inline type and
538 // initialize it with the values of the fields.
539 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
540   oop new_vt = allocate_instance(CHECK_NULL);
541   const Array<SigEntry>* sig_vk = extended_sig();
542   const Array<VMRegPair>* regs = return_regs();
543 
544   int j = 1;
545   int k = 0;
546   for (int i = 0; i < sig_vk->length(); i++) {
547     BasicType bt = sig_vk->at(i)._bt;
548     if (bt == T_METADATA) {
549       continue;
550     }
551     if (bt == T_VOID) {
552       if (sig_vk->at(i-1)._bt == T_LONG ||
553           sig_vk->at(i-1)._bt == T_DOUBLE) {
554         j++;
555       }
556       continue;
557     }
558     int off = sig_vk->at(i)._offset;
559     assert(off > 0, "offset in object should be positive");
560     VMRegPair pair = regs->at(j);
561     address loc = reg_map.location(pair.first(), nullptr);
562     switch(bt) {
563     case T_BOOLEAN: {
564       new_vt->bool_field_put(off, *(jboolean*)loc);
565       break;
566     }
567     case T_CHAR: {
568       new_vt->char_field_put(off, *(jchar*)loc);
569       break;
570     }
571     case T_BYTE: {
572       new_vt->byte_field_put(off, *(jbyte*)loc);
573       break;
574     }
575     case T_SHORT: {
576       new_vt->short_field_put(off, *(jshort*)loc);
577       break;
578     }
579     case T_INT: {
580       new_vt->int_field_put(off, *(jint*)loc);
581       break;
582     }
583     case T_LONG: {
584 #ifdef _LP64
585       new_vt->double_field_put(off,  *(jdouble*)loc);
586 #else
587       Unimplemented();
588 #endif
589       break;
590     }
591     case T_OBJECT:
592     case T_ARRAY: {
593       Handle handle = handles.at(k++);
594       new_vt->obj_field_put(off, handle());
595       break;
596     }
597     case T_FLOAT: {
598       new_vt->float_field_put(off,  *(jfloat*)loc);
599       break;
600     }
601     case T_DOUBLE: {
602       new_vt->double_field_put(off, *(jdouble*)loc);
603       break;
604     }
605     default:
606       ShouldNotReachHere();
607     }
608     *(intptr_t*)loc = 0xDEAD;
609     j++;
610   }
611   assert(j == regs->length(), "missed a field?");
612   assert(k == handles.length(), "missed an oop?");
613   return new_vt;
614 }
615 
616 // Check if we return an inline type in scalarized form, i.e. check if either
617 // - The return value is a tagged InlineKlass pointer, or
618 // - The return value is an inline type oop that is also returned in scalarized form
619 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map, bool* return_oop, Method* method) {
620   BasicType bt = T_METADATA;
621   VMRegPair pair;
622   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
623   assert(nb == 1, "broken");
624 
625   address loc = map.location(pair.first(), nullptr);
626   intptr_t ptr = *(intptr_t*)loc;
627   if (is_set_nth_bit(ptr, 0)) {
628     // Return value is tagged, must be an InlineKlass pointer
629     clear_nth_bit(ptr, 0);
630     assert(Metaspace::contains((void*)ptr), "should be klass");
631     InlineKlass* vk = (InlineKlass*)ptr;
632     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
633     if (return_oop != nullptr) {
634       // Not returning an oop
635       *return_oop = false;
636     }
637     return vk;
638   }
639   // Return value is not tagged, must be a valid oop
640   oop o = cast_to_oop(ptr);
641   assert(oopDesc::is_oop_or_null(o), "Bad oop return: " PTR_FORMAT, ptr);
642   if (return_oop != nullptr && o != nullptr && o->is_inline_type()) {
643     // Check if inline type is also returned in scalarized form
644     InlineKlass* vk_val = InlineKlass::cast(o->klass());
645     InlineKlass* vk_sig = method->returns_inline_type();
646     if (vk_val->can_be_returned_as_fields() && vk_sig != nullptr) {
647       assert(vk_val == vk_sig, "Unexpected return value");
648       return vk_val;
649     }
650   }
651   return nullptr;
652 }
653 
654 // CDS support
655 #if INCLUDE_CDS
656 
657 void InlineKlass::remove_unshareable_info() {
658   InstanceKlass::remove_unshareable_info();
659 
660   // update it to point to the "buffered" copy of this class.
661   _adr_inline_klass_members = calculate_members_address();
662   ArchivePtrMarker::mark_pointer(&_adr_inline_klass_members);
663 
664   set_extended_sig(nullptr);
665   set_return_regs(nullptr);
666   set_pack_handler(nullptr);
667   set_pack_handler_jobject(nullptr);
668   set_unpack_handler(nullptr);
669 
670   assert(pack_handler() == nullptr, "pack handler not null");
671 }
672 
673 #endif // CDS
674 
675 // Verification
676 
677 void InlineKlass::verify_on(outputStream* st) {
678   InstanceKlass::verify_on(st);
679   guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
680 }
681 
682 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
683   InstanceKlass::oop_verify_on(obj, st);
684   guarantee(obj->mark().is_inline_type(), "Header is not inline type");
685 }