1 /*
  2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/archiveUtils.hpp"
 26 #include "cds/cdsConfig.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcLocker.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/metaspaceClosure.hpp"
 35 #include "memory/metadataFactory.hpp"
 36 #include "oops/access.hpp"
 37 #include "oops/arrayKlass.hpp"
 38 #include "oops/compressedOops.inline.hpp"
 39 #include "oops/fieldStreams.inline.hpp"
 40 #include "oops/flatArrayKlass.hpp"
 41 #include "oops/inlineKlass.inline.hpp"
 42 #include "oops/instanceKlass.inline.hpp"
 43 #include "oops/method.hpp"
 44 #include "oops/oop.inline.hpp"
 45 #include "oops/objArrayKlass.hpp"
 46 #include "oops/refArrayKlass.hpp"
 47 #include "runtime/fieldDescriptor.inline.hpp"
 48 #include "runtime/handles.inline.hpp"
 49 #include "runtime/safepointVerifiers.hpp"
 50 #include "runtime/sharedRuntime.hpp"
 51 #include "runtime/signature.hpp"
 52 #include "runtime/thread.inline.hpp"
 53 #include "utilities/copy.hpp"
 54 #include "utilities/stringUtils.hpp"
 55 
 56   // Constructor
 57 InlineKlass::InlineKlass(const ClassFileParser& parser)
 58     : InstanceKlass(parser, InlineKlass::Kind, markWord::inline_type_prototype()) {
 59   assert(is_inline_klass(), "sanity");
 60   assert(prototype_header().is_inline_type(), "sanity");
 61 }
 62 
 63 InlineKlass::InlineKlass() {
 64   assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
 65 }
 66 
 67 void InlineKlass::init_fixed_block() {
 68   _adr_inlineklass_fixed_block = inlineklass_static_block();
 69   // Addresses used for inline type calling convention
 70   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
 71   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
 72   *((address*)adr_pack_handler()) = nullptr;
 73   *((address*)adr_pack_handler_jobject()) = nullptr;
 74   *((address*)adr_unpack_handler()) = nullptr;
 75   assert(pack_handler() == nullptr, "pack handler not null");
 76   set_null_reset_value_offset(0);
 77   set_payload_offset(-1);
 78   set_payload_size_in_bytes(-1);
 79   set_payload_alignment(-1);
 80   set_non_atomic_size_in_bytes(-1);
 81   set_non_atomic_alignment(-1);
 82   set_atomic_size_in_bytes(-1);
 83   set_nullable_size_in_bytes(-1);
 84   set_null_marker_offset(-1);
 85 }
 86 
 87 void InlineKlass::set_null_reset_value(oop val) {
 88   assert(val != nullptr, "Sanity check");
 89   assert(oopDesc::is_oop(val), "Sanity check");
 90   assert(val->is_inline_type(), "Sanity check");
 91   assert(val->klass() == this, "sanity check");
 92   java_mirror()->obj_field_put(null_reset_value_offset(), val);
 93 }
 94 
 95 instanceOop InlineKlass::allocate_instance(TRAPS) {
 96   int size = size_helper();  // Query before forming handle.
 97 
 98   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
 99   assert(oop->mark().is_inline_type(), "Expected inline type");
100   return oop;
101 }
102 
103 instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
104   int size = size_helper();  // Query before forming handle.
105 
106   instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
107   assert(oop->mark().is_inline_type(), "Expected inline type");
108   return oop;
109 }
110 
111 int InlineKlass::nonstatic_oop_count() {
112   int oops = 0;
113   int map_count = nonstatic_oop_map_count();
114   OopMapBlock* block = start_of_nonstatic_oop_maps();
115   OopMapBlock* end = block + map_count;
116   while (block != end) {
117     oops += block->count();
118     block++;
119   }
120   return oops;
121 }
122 
123 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
124   switch(kind) {
125     case LayoutKind::NON_ATOMIC_FLAT:
126       assert(has_non_atomic_layout(), "Layout not available");
127       return non_atomic_size_in_bytes();
128       break;
129     case LayoutKind::ATOMIC_FLAT:
130       assert(has_atomic_layout(), "Layout not available");
131       return atomic_size_in_bytes();
132       break;
133     case LayoutKind::NULLABLE_ATOMIC_FLAT:
134       assert(has_nullable_atomic_layout(), "Layout not available");
135       return nullable_atomic_size_in_bytes();
136       break;
137     case LayoutKind::BUFFERED:
138       return payload_size_in_bytes();
139       break;
140     default:
141       ShouldNotReachHere();
142   }
143 }
144 
145 int InlineKlass::layout_alignment(LayoutKind kind) const {
146   switch(kind) {
147     case LayoutKind::NON_ATOMIC_FLAT:
148       assert(has_non_atomic_layout(), "Layout not available");
149       return non_atomic_alignment();
150       break;
151     case LayoutKind::ATOMIC_FLAT:
152       assert(has_atomic_layout(), "Layout not available");
153       return atomic_size_in_bytes();
154       break;
155     case LayoutKind::NULLABLE_ATOMIC_FLAT:
156       assert(has_nullable_atomic_layout(), "Layout not available");
157       return nullable_atomic_size_in_bytes();
158       break;
159     case LayoutKind::BUFFERED:
160       return payload_alignment();
161       break;
162     default:
163       ShouldNotReachHere();
164   }
165 }
166 
167 bool InlineKlass::is_layout_supported(LayoutKind lk) {
168   switch(lk) {
169     case LayoutKind::NON_ATOMIC_FLAT:
170       return has_non_atomic_layout();
171       break;
172     case LayoutKind::ATOMIC_FLAT:
173       return has_atomic_layout();
174       break;
175     case LayoutKind::NULLABLE_ATOMIC_FLAT:
176       return has_nullable_atomic_layout();
177       break;
178     case LayoutKind::BUFFERED:
179       return true;
180       break;
181     default:
182       ShouldNotReachHere();
183   }
184 }
185 
186 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
187   assert(is_layout_supported(lk), "Unsupported layout");
188   assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
189   switch(lk) {
190     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
191     if (is_payload_marked_as_null((address)src)) {
192         if (!contains_oops()) {
193           mark_payload_as_null((address)dst);
194           return;
195         }
196         // copy null_reset value to dest
197         if (dest_is_initialized) {
198           HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
199         } else {
200           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
201         }
202       } else {
203         // Copy has to be performed, even if this is an empty value, because of the null marker
204         mark_payload_as_non_null((address)src);
205         if (dest_is_initialized) {
206           HeapAccess<>::value_copy(src, dst, this, lk);
207         } else {
208           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
209         }
210       }
211     }
212     break;
213     case LayoutKind::BUFFERED:
214     case LayoutKind::ATOMIC_FLAT:
215     case LayoutKind::NON_ATOMIC_FLAT: {
216       if (is_empty_inline_type()) return; // nothing to do
217       if (dest_is_initialized) {
218         HeapAccess<>::value_copy(src, dst, this, lk);
219       } else {
220         HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
221       }
222     }
223     break;
224     default:
225       ShouldNotReachHere();
226   }
227 }
228 
229 oop InlineKlass::read_payload_from_addr(const oop src, int offset, LayoutKind lk, TRAPS) {
230   assert(src != nullptr, "Must be");
231   assert(is_layout_supported(lk), "Unsupported layout");
232   switch(lk) {
233     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
234       if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) {
235         return nullptr;
236       }
237     } // Fallthrough
238     case LayoutKind::BUFFERED:
239     case LayoutKind::ATOMIC_FLAT:
240     case LayoutKind::NON_ATOMIC_FLAT: {
241       Handle obj_h(THREAD, src);
242       oop res = allocate_instance_buffer(CHECK_NULL);
243       copy_payload_to_addr((void*)(cast_from_oop<char*>(obj_h()) + offset), payload_addr(res), lk, false);
244       if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
245         if(is_payload_marked_as_null(payload_addr(res))) {
246           return nullptr;
247         }
248       }
249       return res;
250     }
251     break;
252     default:
253       ShouldNotReachHere();
254   }
255 }
256 
257 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) {
258   void* src_addr = nullptr;
259   if (src == nullptr) {
260     if (lk != LayoutKind::NULLABLE_ATOMIC_FLAT) {
261       THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
262     }
263     // Writing null to a nullable flat field/element is usually done by writing
264     // the whole pre-allocated null_reset_value at the payload address to ensure
265     // that the null marker and all potential oops are reset to "zeros".
266     // However, the null_reset_value is allocated during class initialization.
267     // If the current value of the field is null, it is possible that the class
268     // of the field has not been initialized yet and thus the null_reset_value
269     // might not be available yet.
270     // Writing null over an already null value should not trigger class initialization.
271     // The solution is to detect null being written over null cases and return immediately
272     // (writing null over null is a no-op from a field modification point of view)
273     if (is_payload_marked_as_null((address)dst)) return;
274     src_addr = payload_addr(null_reset_value());
275   } else {
276     src_addr = payload_addr(src);
277     if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
278       mark_payload_as_non_null((address)src_addr);
279     }
280   }
281   copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized);
282 }
283 
284 // Arrays of...
285 
286 bool InlineKlass::maybe_flat_in_array() {
287   if (!UseArrayFlattening) {
288     return false;
289   }
290   // Too many embedded oops
291   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
292     return false;
293   }
294   // No flat layout?
295   if (!has_nullable_atomic_layout() && !has_atomic_layout() && !has_non_atomic_layout()) {
296     return false;
297   }
298   return true;
299 }
300 
301 // Inline type arguments are not passed by reference, instead each
302 // field of the inline type is passed as an argument. This helper
303 // function collects the flat field (recursively)
304 // in a list. Included with the field's type is
305 // the offset of each field in the inline type: i2c and c2i adapters
306 // need that to load or store fields. Finally, the list of fields is
307 // sorted in order of increasing offsets: the adapters and the
308 // compiled code need to agree upon the order of fields.
309 //
310 // The list of basic types that is returned starts with a T_METADATA
311 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
312 // delimiters. Every entry between the two is a field of the inline
313 // type. If there's an embedded inline type in the list, it also starts
314 // with a T_METADATA and ends with a T_VOID. This is so we can
315 // generate a unique fingerprint for the method's adapters and we can
316 // generate the list of basic types from the interpreter point of view
317 // (inline types passed as reference: iterate on the list until a
318 // T_METADATA, drop everything until and including the closing
319 // T_VOID) or the compiler point of view (each field of the inline
320 // types is an argument: drop all T_METADATA/T_VOID from the list).
321 //
322 // Value classes could also have fields in abstract super value classes.
323 // Use a HierarchicalFieldStream to get them as well.
324 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off, int null_marker_offset) {
325   int count = 0;
326   SigEntry::add_entry(sig, T_METADATA, name(), base_off);
327   for (TopDownHierarchicalNonStaticFieldStreamBase fs(this); !fs.done(); fs.next()) {
328     assert(!fs.access_flags().is_static(), "TopDownHierarchicalNonStaticFieldStreamBase should not let static fields pass.");
329     int offset = base_off + fs.offset() - (base_off > 0 ? payload_offset() : 0);
330     InstanceKlass* field_holder = fs.field_descriptor().field_holder();
331     // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
332     if (fs.is_flat()) {
333       // Resolve klass of flat field and recursively collect fields
334       int field_null_marker_offset = -1;
335       if (!fs.is_null_free_inline_type()) {
336         field_null_marker_offset = base_off + fs.null_marker_offset() - (base_off > 0 ? payload_offset() : 0);
337       }
338       Klass* vk = field_holder->get_inline_type_field_klass(fs.index());
339       count += InlineKlass::cast(vk)->collect_fields(sig, offset, field_null_marker_offset);
340     } else {
341       BasicType bt = Signature::basic_type(fs.signature());
342       SigEntry::add_entry(sig, bt,  fs.name(), offset);
343       count += type2size[bt];
344     }
345     if (field_holder != this) {
346       // Inherited field, add an empty wrapper to this to distinguish it from a "local" field
347       // with a different offset and avoid false adapter sharing. TODO 8348547 Is this sufficient?
348       SigEntry::add_entry(sig, T_METADATA, name(), base_off);
349       SigEntry::add_entry(sig, T_VOID, name(), offset);
350     }
351   }
352   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? payload_offset() : 0);
353   // Null markers are no real fields, add them manually at the end (C2 relies on this) of the flat fields
354   if (null_marker_offset != -1) {
355     SigEntry::add_null_marker(sig, name(), null_marker_offset);
356     count++;
357   }
358   SigEntry::add_entry(sig, T_VOID, name(), offset);
359   assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
360   return count;
361 }
362 
363 void InlineKlass::initialize_calling_convention(TRAPS) {
364   // Because the pack and unpack handler addresses need to be loadable from generated code,
365   // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
366   // not have a vtable, the vtable offset is used to store these addresses.
367   if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
368     ResourceMark rm;
369     GrowableArray<SigEntry> sig_vk;
370     int nb_fields = collect_fields(&sig_vk);
371     if (*PrintInlineKlassFields != '\0') {
372       const char* class_name_str = _name->as_C_string();
373       if (StringUtils::class_list_match(PrintInlineKlassFields, class_name_str)) {
374         ttyLocker ttyl;
375         tty->print_cr("Fields of InlineKlass: %s", class_name_str);
376         for (const SigEntry& entry : sig_vk) {
377           tty->print("  %s: %s+%d", entry._name->as_C_string(), type2name(entry._bt), entry._offset);
378           if (entry._null_marker) {
379             tty->print(" (null marker)");
380           }
381           tty->print_cr("");
382         }
383       }
384     }
385     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
386     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
387     for (int i = 0; i < sig_vk.length(); i++) {
388       extended_sig->at_put(i, sig_vk.at(i));
389     }
390     if (can_be_returned_as_fields(/* init= */ true)) {
391       nb_fields++;
392       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
393       sig_bt[0] = T_METADATA;
394       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
395       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
396       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
397 
398       if (total > 0) {
399         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
400         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
401         for (int i = 0; i < nb_fields; i++) {
402           return_regs->at_put(i, regs[i]);
403         }
404 
405         BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
406         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
407         *((address*)adr_pack_handler_jobject()) = buffered_blob->pack_fields_jobject();
408         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
409         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
410         assert(can_be_returned_as_fields(), "sanity");
411       }
412     }
413     if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
414       MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
415       assert(return_regs() == nullptr, "sanity");
416     }
417   }
418 }
419 
420 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
421   if (extended_sig() != nullptr) {
422     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
423     *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
424   }
425   if (return_regs() != nullptr) {
426     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
427     *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
428   }
429   cleanup_blobs();
430   InstanceKlass::deallocate_contents(loader_data);
431 }
432 
433 void InlineKlass::cleanup(InlineKlass* ik) {
434   ik->cleanup_blobs();
435 }
436 
437 void InlineKlass::cleanup_blobs() {
438   if (pack_handler() != nullptr) {
439     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
440     assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
441     BufferBlob::free((BufferBlob*)buffered_blob);
442     *((address*)adr_pack_handler()) = nullptr;
443     *((address*)adr_pack_handler_jobject()) = nullptr;
444     *((address*)adr_unpack_handler()) = nullptr;
445   }
446 }
447 
448 // Can this inline type be passed as multiple values?
449 bool InlineKlass::can_be_passed_as_fields() const {
450   return InlineTypePassFieldsAsArgs;
451 }
452 
453 // Can this inline type be returned as multiple values?
454 bool InlineKlass::can_be_returned_as_fields(bool init) const {
455   return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
456 }
457 
458 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
459 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
460   Thread* thread = Thread::current();
461   const Array<SigEntry>* sig_vk = extended_sig();
462   const Array<VMRegPair>* regs = return_regs();
463   int j = 1;
464 
465   for (int i = 0; i < sig_vk->length(); i++) {
466     BasicType bt = sig_vk->at(i)._bt;
467     if (bt == T_OBJECT || bt == T_ARRAY) {
468       VMRegPair pair = regs->at(j);
469       address loc = reg_map.location(pair.first(), nullptr);
470       oop o = *(oop*)loc;
471       assert(oopDesc::is_oop_or_null(o), "Bad oop value: " PTR_FORMAT, p2i(o));
472       handles.push(Handle(thread, o));
473     }
474     if (bt == T_METADATA) {
475       continue;
476     }
477     if (bt == T_VOID &&
478         sig_vk->at(i-1)._bt != T_LONG &&
479         sig_vk->at(i-1)._bt != T_DOUBLE) {
480       continue;
481     }
482     j++;
483   }
484   assert(j == regs->length(), "missed a field?");
485 }
486 
487 // Update oop fields in registers from handles after a safepoint
488 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
489   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
490   const Array<SigEntry>* sig_vk = extended_sig();
491   const Array<VMRegPair>* regs = return_regs();
492   assert(regs != nullptr, "inconsistent");
493 
494   int j = 1;
495   int k = 0;
496   for (int i = 0; i < sig_vk->length(); i++) {
497     BasicType bt = sig_vk->at(i)._bt;
498     if (bt == T_OBJECT || bt == T_ARRAY) {
499       VMRegPair pair = regs->at(j);
500       address loc = reg_map.location(pair.first(), nullptr);
501       *(oop*)loc = handles.at(k++)();
502     }
503     if (bt == T_METADATA) {
504       continue;
505     }
506     if (bt == T_VOID &&
507         sig_vk->at(i-1)._bt != T_LONG &&
508         sig_vk->at(i-1)._bt != T_DOUBLE) {
509       continue;
510     }
511     j++;
512   }
513   assert(k == handles.length(), "missed a handle?");
514   assert(j == regs->length(), "missed a field?");
515 }
516 
517 // Fields are in registers. Create an instance of the inline type and
518 // initialize it with the values of the fields.
519 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
520   oop new_vt = allocate_instance(CHECK_NULL);
521   const Array<SigEntry>* sig_vk = extended_sig();
522   const Array<VMRegPair>* regs = return_regs();
523 
524   int j = 1;
525   int k = 0;
526   for (int i = 0; i < sig_vk->length(); i++) {
527     BasicType bt = sig_vk->at(i)._bt;
528     if (bt == T_METADATA) {
529       continue;
530     }
531     if (bt == T_VOID) {
532       if (sig_vk->at(i-1)._bt == T_LONG ||
533           sig_vk->at(i-1)._bt == T_DOUBLE) {
534         j++;
535       }
536       continue;
537     }
538     int off = sig_vk->at(i)._offset;
539     assert(off > 0, "offset in object should be positive");
540     VMRegPair pair = regs->at(j);
541     address loc = reg_map.location(pair.first(), nullptr);
542     switch(bt) {
543     case T_BOOLEAN: {
544       new_vt->bool_field_put(off, *(jboolean*)loc);
545       break;
546     }
547     case T_CHAR: {
548       new_vt->char_field_put(off, *(jchar*)loc);
549       break;
550     }
551     case T_BYTE: {
552       new_vt->byte_field_put(off, *(jbyte*)loc);
553       break;
554     }
555     case T_SHORT: {
556       new_vt->short_field_put(off, *(jshort*)loc);
557       break;
558     }
559     case T_INT: {
560       new_vt->int_field_put(off, *(jint*)loc);
561       break;
562     }
563     case T_LONG: {
564 #ifdef _LP64
565       new_vt->double_field_put(off,  *(jdouble*)loc);
566 #else
567       Unimplemented();
568 #endif
569       break;
570     }
571     case T_OBJECT:
572     case T_ARRAY: {
573       Handle handle = handles.at(k++);
574       new_vt->obj_field_put(off, handle());
575       break;
576     }
577     case T_FLOAT: {
578       new_vt->float_field_put(off,  *(jfloat*)loc);
579       break;
580     }
581     case T_DOUBLE: {
582       new_vt->double_field_put(off, *(jdouble*)loc);
583       break;
584     }
585     default:
586       ShouldNotReachHere();
587     }
588     *(intptr_t*)loc = 0xDEAD;
589     j++;
590   }
591   assert(j == regs->length(), "missed a field?");
592   assert(k == handles.length(), "missed an oop?");
593   return new_vt;
594 }
595 
596 // Check if we return an inline type in scalarized form, i.e. check if either
597 // - The return value is a tagged InlineKlass pointer, or
598 // - The return value is an inline type oop that is also returned in scalarized form
599 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map, bool* return_oop, Method* method) {
600   BasicType bt = T_METADATA;
601   VMRegPair pair;
602   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
603   assert(nb == 1, "broken");
604 
605   address loc = map.location(pair.first(), nullptr);
606   intptr_t ptr = *(intptr_t*)loc;
607   if (is_set_nth_bit(ptr, 0)) {
608     // Return value is tagged, must be an InlineKlass pointer
609     clear_nth_bit(ptr, 0);
610     assert(Metaspace::contains((void*)ptr), "should be klass");
611     InlineKlass* vk = (InlineKlass*)ptr;
612     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
613     if (return_oop != nullptr) {
614       // Not returning an oop
615       *return_oop = false;
616     }
617     return vk;
618   }
619   // Return value is not tagged, must be a valid oop
620   oop o = cast_to_oop(ptr);
621   assert(oopDesc::is_oop_or_null(o, true), "Bad oop return: " PTR_FORMAT, ptr);
622   if (return_oop != nullptr && o != nullptr && o->is_inline_type()) {
623     // Check if inline type is also returned in scalarized form
624     InlineKlass* vk_val = InlineKlass::cast(o->klass());
625     InlineKlass* vk_sig = method->returns_inline_type();
626     if (vk_val->can_be_returned_as_fields() && vk_sig != nullptr) {
627       assert(vk_val == vk_sig, "Unexpected return value");
628       return vk_val;
629     }
630   }
631   return nullptr;
632 }
633 
634 // CDS support
635 #if INCLUDE_CDS
636 void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {
637   InstanceKlass::metaspace_pointers_do(it);
638 }
639 
640 void InlineKlass::remove_unshareable_info() {
641   InstanceKlass::remove_unshareable_info();
642 
643   // update it to point to the "buffered" copy of this class.
644   _adr_inlineklass_fixed_block = inlineklass_static_block();
645   ArchivePtrMarker::mark_pointer((address*)&_adr_inlineklass_fixed_block);
646 
647   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
648   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
649   *((address*)adr_pack_handler()) = nullptr;
650   *((address*)adr_pack_handler_jobject()) = nullptr;
651   *((address*)adr_unpack_handler()) = nullptr;
652   assert(pack_handler() == nullptr, "pack handler not null");
653 }
654 
655 void InlineKlass::remove_java_mirror() {
656   InstanceKlass::remove_java_mirror();
657 }
658 
659 void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) {
660   InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
661 }
662 #endif // CDS
663 // oop verify
664 
665 void InlineKlass::verify_on(outputStream* st) {
666   InstanceKlass::verify_on(st);
667   guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
668 }
669 
670 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
671   InstanceKlass::oop_verify_on(obj, st);
672   guarantee(obj->mark().is_inline_type(), "Header is not inline type");
673 }