1 /*
  2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/archiveUtils.hpp"
 26 #include "cds/cdsConfig.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcLocker.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/metaspaceClosure.hpp"
 35 #include "memory/metadataFactory.hpp"
 36 #include "oops/access.hpp"
 37 #include "oops/compressedOops.inline.hpp"
 38 #include "oops/fieldStreams.inline.hpp"
 39 #include "oops/flatArrayKlass.hpp"
 40 #include "oops/inlineKlass.inline.hpp"
 41 #include "oops/instanceKlass.inline.hpp"
 42 #include "oops/method.hpp"
 43 #include "oops/oop.inline.hpp"
 44 #include "oops/objArrayKlass.hpp"
 45 #include "runtime/fieldDescriptor.inline.hpp"
 46 #include "runtime/handles.inline.hpp"
 47 #include "runtime/safepointVerifiers.hpp"
 48 #include "runtime/sharedRuntime.hpp"
 49 #include "runtime/signature.hpp"
 50 #include "runtime/thread.inline.hpp"
 51 #include "utilities/copy.hpp"
 52 
 53   // Constructor
 54 InlineKlass::InlineKlass(const ClassFileParser& parser)
 55     : InstanceKlass(parser, InlineKlass::Kind, markWord::inline_type_prototype()) {
 56   assert(is_inline_klass(), "sanity");
 57   assert(prototype_header().is_inline_type(), "sanity");
 58 }
 59 
 60 InlineKlass::InlineKlass() {
 61   assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
 62 }
 63 
 64 void InlineKlass::init_fixed_block() {
 65   _adr_inlineklass_fixed_block = inlineklass_static_block();
 66   // Addresses used for inline type calling convention
 67   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
 68   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
 69   *((address*)adr_pack_handler()) = nullptr;
 70   *((address*)adr_pack_handler_jobject()) = nullptr;
 71   *((address*)adr_unpack_handler()) = nullptr;
 72   assert(pack_handler() == nullptr, "pack handler not null");
 73   *((address*)adr_non_atomic_flat_array_klass()) = nullptr;
 74   *((address*)adr_atomic_flat_array_klass()) = nullptr;
 75   *((address*)adr_nullable_atomic_flat_array_klass()) = nullptr;
 76   *((address*)adr_null_free_reference_array_klass()) = nullptr;
 77   set_null_reset_value_offset(0);
 78   set_payload_offset(-1);
 79   set_payload_size_in_bytes(-1);
 80   set_payload_alignment(-1);
 81   set_non_atomic_size_in_bytes(-1);
 82   set_non_atomic_alignment(-1);
 83   set_atomic_size_in_bytes(-1);
 84   set_nullable_size_in_bytes(-1);
 85   set_null_marker_offset(-1);
 86 }
 87 
 88 void InlineKlass::set_null_reset_value(oop val) {
 89   assert(val != nullptr, "Sanity check");
 90   assert(oopDesc::is_oop(val), "Sanity check");
 91   assert(val->is_inline_type(), "Sanity check");
 92   assert(val->klass() == this, "sanity check");
 93   java_mirror()->obj_field_put(null_reset_value_offset(), val);
 94 }
 95 
 96 instanceOop InlineKlass::allocate_instance(TRAPS) {
 97   int size = size_helper();  // Query before forming handle.
 98 
 99   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
100   assert(oop->mark().is_inline_type(), "Expected inline type");
101   return oop;
102 }
103 
104 instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
105   int size = size_helper();  // Query before forming handle.
106 
107   instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
108   assert(oop->mark().is_inline_type(), "Expected inline type");
109   return oop;
110 }
111 
112 int InlineKlass::nonstatic_oop_count() {
113   int oops = 0;
114   int map_count = nonstatic_oop_map_count();
115   OopMapBlock* block = start_of_nonstatic_oop_maps();
116   OopMapBlock* end = block + map_count;
117   while (block != end) {
118     oops += block->count();
119     block++;
120   }
121   return oops;
122 }
123 
124 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
125   switch(kind) {
126     case LayoutKind::NON_ATOMIC_FLAT:
127       assert(has_non_atomic_layout(), "Layout not available");
128       return non_atomic_size_in_bytes();
129       break;
130     case LayoutKind::ATOMIC_FLAT:
131       assert(has_atomic_layout(), "Layout not available");
132       return atomic_size_in_bytes();
133       break;
134     case LayoutKind::NULLABLE_ATOMIC_FLAT:
135       assert(has_nullable_atomic_layout(), "Layout not available");
136       return nullable_atomic_size_in_bytes();
137       break;
138     case LayoutKind::BUFFERED:
139       return payload_size_in_bytes();
140       break;
141     default:
142       ShouldNotReachHere();
143   }
144 }
145 
146 int InlineKlass::layout_alignment(LayoutKind kind) const {
147   switch(kind) {
148     case LayoutKind::NON_ATOMIC_FLAT:
149       assert(has_non_atomic_layout(), "Layout not available");
150       return non_atomic_alignment();
151       break;
152     case LayoutKind::ATOMIC_FLAT:
153       assert(has_atomic_layout(), "Layout not available");
154       return atomic_size_in_bytes();
155       break;
156     case LayoutKind::NULLABLE_ATOMIC_FLAT:
157       assert(has_nullable_atomic_layout(), "Layout not available");
158       return nullable_atomic_size_in_bytes();
159       break;
160     case LayoutKind::BUFFERED:
161       return payload_alignment();
162       break;
163     default:
164       ShouldNotReachHere();
165   }
166 }
167 
168 bool InlineKlass::is_layout_supported(LayoutKind lk) {
169   switch(lk) {
170     case LayoutKind::NON_ATOMIC_FLAT:
171       return has_non_atomic_layout();
172       break;
173     case LayoutKind::ATOMIC_FLAT:
174       return has_atomic_layout();
175       break;
176     case LayoutKind::NULLABLE_ATOMIC_FLAT:
177       return has_nullable_atomic_layout();
178       break;
179     case LayoutKind::BUFFERED:
180       return true;
181       break;
182     default:
183       ShouldNotReachHere();
184   }
185 }
186 
187 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
188   assert(is_layout_supported(lk), "Unsupported layout");
189   assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
190   switch(lk) {
191     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
192     if (is_payload_marked_as_null((address)src)) {
193         if (!contains_oops()) {
194           mark_payload_as_null((address)dst);
195           return;
196         }
197         // copy null_reset value to dest
198         if (dest_is_initialized) {
199           HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
200         } else {
201           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
202         }
203       } else {
204         // Copy has to be performed, even if this is an empty value, because of the null marker
205         mark_payload_as_non_null((address)src);
206         if (dest_is_initialized) {
207           HeapAccess<>::value_copy(src, dst, this, lk);
208         } else {
209           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
210         }
211       }
212     }
213     break;
214     case LayoutKind::BUFFERED:
215     case LayoutKind::ATOMIC_FLAT:
216     case LayoutKind::NON_ATOMIC_FLAT: {
217       if (is_empty_inline_type()) return; // nothing to do
218       if (dest_is_initialized) {
219         HeapAccess<>::value_copy(src, dst, this, lk);
220       } else {
221         HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
222       }
223     }
224     break;
225     default:
226       ShouldNotReachHere();
227   }
228 }
229 
230 oop InlineKlass::read_payload_from_addr(oop src, int offset, LayoutKind lk, TRAPS) {
231   assert(src != nullptr, "Must be");
232   assert(is_layout_supported(lk), "Unsupported layout");
233   switch(lk) {
234     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
235       if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) {
236         return nullptr;
237       }
238     } // Fallthrough
239     case LayoutKind::BUFFERED:
240     case LayoutKind::ATOMIC_FLAT:
241     case LayoutKind::NON_ATOMIC_FLAT: {
242       Handle obj_h(THREAD, src);
243       oop res = allocate_instance_buffer(CHECK_NULL);
244       copy_payload_to_addr((void*)((char*)(oopDesc*)obj_h() + offset), payload_addr(res), lk, false);
245       if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
246         if(is_payload_marked_as_null(payload_addr(res))) {
247           return nullptr;
248         }
249       }
250       return res;
251     }
252     break;
253     default:
254       ShouldNotReachHere();
255   }
256 }
257 
258 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) {
259   void* src_addr = nullptr;
260   if (src == nullptr) {
261     if (lk != LayoutKind::NULLABLE_ATOMIC_FLAT) {
262       THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
263     }
264     // Writing null to a nullable flat field/element is usually done by writing
265     // the whole pre-allocated null_reset_value at the payload address to ensure
266     // that the null marker and all potential oops are reset to "zeros".
267     // However, the null_reset_value is allocated during class initialization.
268     // If the current value of the field is null, it is possible that the class
269     // of the field has not been initialized yet and thus the null_reset_value
270     // might not be available yet.
271     // Writing null over an already null value should not trigger class initialization.
272     // The solution is to detect null being written over null cases and return immediately
273     // (writing null over null is a no-op from a field modification point of view)
274     if (is_payload_marked_as_null((address)dst)) return;
275     src_addr = payload_addr(null_reset_value());
276   } else {
277     src_addr = payload_addr(src);
278     if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
279       mark_payload_as_non_null((address)src_addr);
280     }
281   }
282   copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized);
283 }
284 
285 // Arrays of...
286 
287 bool InlineKlass::flat_array() {
288   if (!UseArrayFlattening) {
289     return false;
290   }
291   // Too many embedded oops
292   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
293     return false;
294   }
295   // Declared atomic but not naturally atomic.
296   if (must_be_atomic() && !is_naturally_atomic()) {
297     return false;
298   }
299   // VM enforcing AlwaysAtomicAccess only...
300   if (AlwaysAtomicAccesses && (!is_naturally_atomic())) {
301     return false;
302   }
303   // No flat layout?
304   if (!has_nullable_atomic_layout() && !has_atomic_layout() && !has_non_atomic_layout()) {
305     return false;
306   }
307   return true;
308 }
309 
310 ObjArrayKlass* InlineKlass::null_free_reference_array(TRAPS) {
311   if (Atomic::load_acquire(adr_null_free_reference_array_klass()) == nullptr) {
312     // Atomic creation of array_klasses
313     RecursiveLocker rl(MultiArray_lock, THREAD);
314 
315     // Check if update has already taken place
316     if (null_free_reference_array_klass() == nullptr) {
317       ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, true, CHECK_NULL);
318 
319       // use 'release' to pair with lock-free load
320       Atomic::release_store(adr_null_free_reference_array_klass(), k);
321     }
322   }
323   return null_free_reference_array_klass();
324 }
325 
326 
327 // There's no reason for this method to have a TRAP argument
328 FlatArrayKlass* InlineKlass::flat_array_klass(LayoutKind lk, TRAPS) {
329   FlatArrayKlass* volatile* adr_flat_array_klass = nullptr;
330   switch(lk) {
331     case LayoutKind::NON_ATOMIC_FLAT:
332       assert(has_non_atomic_layout(), "Must be");
333       adr_flat_array_klass = adr_non_atomic_flat_array_klass();
334       break;
335     case LayoutKind::ATOMIC_FLAT:
336     assert(has_atomic_layout(), "Must be");
337       adr_flat_array_klass = adr_atomic_flat_array_klass();
338       break;
339     case LayoutKind::NULLABLE_ATOMIC_FLAT:
340       assert(has_nullable_atomic_layout(), "Must be");
341       adr_flat_array_klass = adr_nullable_atomic_flat_array_klass();
342       break;
343     default:
344       ShouldNotReachHere();
345   }
346 
347   if (Atomic::load_acquire(adr_flat_array_klass) == nullptr) {
348     // Atomic creation of array_klasses
349     RecursiveLocker rl(MultiArray_lock, THREAD);
350 
351     if (*adr_flat_array_klass == nullptr) {
352       FlatArrayKlass* k = FlatArrayKlass::allocate_klass(this, lk, CHECK_NULL);
353       Atomic::release_store(adr_flat_array_klass, k);
354     }
355   }
356   return *adr_flat_array_klass;
357 }
358 
359 FlatArrayKlass* InlineKlass::flat_array_klass_or_null(LayoutKind lk) {
360     FlatArrayKlass* volatile* adr_flat_array_klass = nullptr;
361   switch(lk) {
362     case LayoutKind::NON_ATOMIC_FLAT:
363       assert(has_non_atomic_layout(), "Must be");
364       adr_flat_array_klass = adr_non_atomic_flat_array_klass();
365       break;
366     case LayoutKind::ATOMIC_FLAT:
367     assert(has_atomic_layout(), "Must be");
368       adr_flat_array_klass = adr_atomic_flat_array_klass();
369       break;
370     case LayoutKind::NULLABLE_ATOMIC_FLAT:
371       assert(has_nullable_atomic_layout(), "Must be");
372       adr_flat_array_klass = adr_nullable_atomic_flat_array_klass();
373       break;
374     default:
375       ShouldNotReachHere();
376   }
377 
378   // Need load-acquire for lock-free read
379   FlatArrayKlass* k = Atomic::load_acquire(adr_flat_array_klass);
380   return k;
381 }
382 
383 // Inline type arguments are not passed by reference, instead each
384 // field of the inline type is passed as an argument. This helper
385 // function collects the flat field (recursively)
386 // in a list. Included with the field's type is
387 // the offset of each field in the inline type: i2c and c2i adapters
388 // need that to load or store fields. Finally, the list of fields is
389 // sorted in order of increasing offsets: the adapters and the
390 // compiled code need to agree upon the order of fields.
391 //
392 // The list of basic types that is returned starts with a T_METADATA
393 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
394 // delimiters. Every entry between the two is a field of the inline
395 // type. If there's an embedded inline type in the list, it also starts
396 // with a T_METADATA and ends with a T_VOID. This is so we can
397 // generate a unique fingerprint for the method's adapters and we can
398 // generate the list of basic types from the interpreter point of view
399 // (inline types passed as reference: iterate on the list until a
400 // T_METADATA, drop everything until and including the closing
401 // T_VOID) or the compiler point of view (each field of the inline
402 // types is an argument: drop all T_METADATA/T_VOID from the list).
403 //
404 // Value classes could also have fields in abstract super value classes.
405 // Use a HierarchicalFieldStream to get them as well.
406 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, float& max_offset, int base_off, int null_marker_offset) {
407   int count = 0;
408   SigEntry::add_entry(sig, T_METADATA, name(), base_off);
409   max_offset = base_off;
410   for (HierarchicalFieldStream<JavaFieldStream> fs(this); !fs.done(); fs.next()) {
411     if (fs.access_flags().is_static()) continue;
412     int offset = base_off + fs.offset() - (base_off > 0 ? payload_offset() : 0);
413     // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
414     if (fs.is_flat()) {
415       // Resolve klass of flat field and recursively collect fields
416       int field_null_marker_offset = -1;
417       if (!fs.is_null_free_inline_type()) {
418         field_null_marker_offset = base_off + fs.null_marker_offset() - (base_off > 0 ? payload_offset() : 0);
419       }
420       Klass* vk = get_inline_type_field_klass(fs.index());
421       count += InlineKlass::cast(vk)->collect_fields(sig, max_offset, offset, field_null_marker_offset);
422     } else {
423       BasicType bt = Signature::basic_type(fs.signature());
424       SigEntry::add_entry(sig, bt, fs.signature(), offset);
425       count += type2size[bt];
426     }
427     if (fs.field_descriptor().field_holder() != this) {
428       // Inherited field, add an empty wrapper to this to distinguish it from a "local" field
429       // with a different offset and avoid false adapter sharing. TODO 8348547 Is this sufficient?
430       SigEntry::add_entry(sig, T_METADATA, name(), base_off);
431       SigEntry::add_entry(sig, T_VOID, name(), offset);
432     }
433     max_offset = MAX2(max_offset, (float)offset);
434   }
435   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? payload_offset() : 0);
436   // Null markers are no real fields, add them manually at the end (C2 relies on this) of the flat fields
437   if (null_marker_offset != -1) {
438     max_offset += 0.1f; // We add the markers "in-between" because they are no real fields
439     SigEntry::add_entry(sig, T_BOOLEAN, name(), null_marker_offset, max_offset);
440     count++;
441   }
442   SigEntry::add_entry(sig, T_VOID, name(), offset);
443   if (base_off == 0) {
444     sig->sort(SigEntry::compare);
445   }
446   assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
447   return count;
448 }
449 
450 void InlineKlass::initialize_calling_convention(TRAPS) {
451   // Because the pack and unpack handler addresses need to be loadable from generated code,
452   // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
453   // not have a vtable, the vtable offset is used to store these addresses.
454   if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
455     ResourceMark rm;
456     GrowableArray<SigEntry> sig_vk;
457     float max_offset = 0;
458     int nb_fields = collect_fields(&sig_vk, max_offset);
459     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
460     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
461     for (int i = 0; i < sig_vk.length(); i++) {
462       extended_sig->at_put(i, sig_vk.at(i));
463     }
464     if (can_be_returned_as_fields(/* init= */ true)) {
465       nb_fields++;
466       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
467       sig_bt[0] = T_METADATA;
468       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
469       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
470       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
471 
472       if (total > 0) {
473         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
474         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
475         for (int i = 0; i < nb_fields; i++) {
476           return_regs->at_put(i, regs[i]);
477         }
478 
479         BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
480         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
481         *((address*)adr_pack_handler_jobject()) = buffered_blob->pack_fields_jobject();
482         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
483         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
484         assert(can_be_returned_as_fields(), "sanity");
485       }
486     }
487     if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
488       MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
489       assert(return_regs() == nullptr, "sanity");
490     }
491   }
492 }
493 
494 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
495   if (extended_sig() != nullptr) {
496     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
497     *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
498   }
499   if (return_regs() != nullptr) {
500     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
501     *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
502   }
503   cleanup_blobs();
504   InstanceKlass::deallocate_contents(loader_data);
505 }
506 
507 void InlineKlass::cleanup(InlineKlass* ik) {
508   ik->cleanup_blobs();
509 }
510 
511 void InlineKlass::cleanup_blobs() {
512   if (pack_handler() != nullptr) {
513     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
514     assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
515     BufferBlob::free((BufferBlob*)buffered_blob);
516     *((address*)adr_pack_handler()) = nullptr;
517     *((address*)adr_pack_handler_jobject()) = nullptr;
518     *((address*)adr_unpack_handler()) = nullptr;
519   }
520 }
521 
522 // Can this inline type be passed as multiple values?
523 bool InlineKlass::can_be_passed_as_fields() const {
524   return InlineTypePassFieldsAsArgs;
525 }
526 
527 // Can this inline type be returned as multiple values?
528 bool InlineKlass::can_be_returned_as_fields(bool init) const {
529   return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
530 }
531 
532 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
533 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
534   Thread* thread = Thread::current();
535   const Array<SigEntry>* sig_vk = extended_sig();
536   const Array<VMRegPair>* regs = return_regs();
537   int j = 1;
538 
539   for (int i = 0; i < sig_vk->length(); i++) {
540     BasicType bt = sig_vk->at(i)._bt;
541     if (bt == T_OBJECT || bt == T_ARRAY) {
542       VMRegPair pair = regs->at(j);
543       address loc = reg_map.location(pair.first(), nullptr);
544       oop v = *(oop*)loc;
545       assert(v == nullptr || oopDesc::is_oop(v), "not an oop?");
546       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
547       handles.push(Handle(thread, v));
548     }
549     if (bt == T_METADATA) {
550       continue;
551     }
552     if (bt == T_VOID &&
553         sig_vk->at(i-1)._bt != T_LONG &&
554         sig_vk->at(i-1)._bt != T_DOUBLE) {
555       continue;
556     }
557     j++;
558   }
559   assert(j == regs->length(), "missed a field?");
560 }
561 
562 // Update oop fields in registers from handles after a safepoint
563 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
564   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
565   const Array<SigEntry>* sig_vk = extended_sig();
566   const Array<VMRegPair>* regs = return_regs();
567   assert(regs != nullptr, "inconsistent");
568 
569   int j = 1;
570   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
571     BasicType bt = sig_vk->at(i)._bt;
572     if (bt == T_OBJECT || bt == T_ARRAY) {
573       VMRegPair pair = regs->at(j);
574       address loc = reg_map.location(pair.first(), nullptr);
575       *(oop*)loc = handles.at(k++)();
576     }
577     if (bt == T_METADATA) {
578       continue;
579     }
580     if (bt == T_VOID &&
581         sig_vk->at(i-1)._bt != T_LONG &&
582         sig_vk->at(i-1)._bt != T_DOUBLE) {
583       continue;
584     }
585     j++;
586   }
587   assert(j == regs->length(), "missed a field?");
588 }
589 
590 // Fields are in registers. Create an instance of the inline type and
591 // initialize it with the values of the fields.
592 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
593   oop new_vt = allocate_instance(CHECK_NULL);
594   const Array<SigEntry>* sig_vk = extended_sig();
595   const Array<VMRegPair>* regs = return_regs();
596 
597   int j = 1;
598   int k = 0;
599   for (int i = 0; i < sig_vk->length(); i++) {
600     BasicType bt = sig_vk->at(i)._bt;
601     if (bt == T_METADATA) {
602       continue;
603     }
604     if (bt == T_VOID) {
605       if (sig_vk->at(i-1)._bt == T_LONG ||
606           sig_vk->at(i-1)._bt == T_DOUBLE) {
607         j++;
608       }
609       continue;
610     }
611     int off = sig_vk->at(i)._offset;
612     assert(off > 0, "offset in object should be positive");
613     VMRegPair pair = regs->at(j);
614     address loc = reg_map.location(pair.first(), nullptr);
615     switch(bt) {
616     case T_BOOLEAN: {
617       new_vt->bool_field_put(off, *(jboolean*)loc);
618       break;
619     }
620     case T_CHAR: {
621       new_vt->char_field_put(off, *(jchar*)loc);
622       break;
623     }
624     case T_BYTE: {
625       new_vt->byte_field_put(off, *(jbyte*)loc);
626       break;
627     }
628     case T_SHORT: {
629       new_vt->short_field_put(off, *(jshort*)loc);
630       break;
631     }
632     case T_INT: {
633       new_vt->int_field_put(off, *(jint*)loc);
634       break;
635     }
636     case T_LONG: {
637 #ifdef _LP64
638       new_vt->double_field_put(off,  *(jdouble*)loc);
639 #else
640       Unimplemented();
641 #endif
642       break;
643     }
644     case T_OBJECT:
645     case T_ARRAY: {
646       Handle handle = handles.at(k++);
647       new_vt->obj_field_put(off, handle());
648       break;
649     }
650     case T_FLOAT: {
651       new_vt->float_field_put(off,  *(jfloat*)loc);
652       break;
653     }
654     case T_DOUBLE: {
655       new_vt->double_field_put(off, *(jdouble*)loc);
656       break;
657     }
658     default:
659       ShouldNotReachHere();
660     }
661     *(intptr_t*)loc = 0xDEAD;
662     j++;
663   }
664   assert(j == regs->length(), "missed a field?");
665   assert(k == handles.length(), "missed an oop?");
666   return new_vt;
667 }
668 
669 // Check the return register for an InlineKlass oop
670 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map) {
671   BasicType bt = T_METADATA;
672   VMRegPair pair;
673   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
674   assert(nb == 1, "broken");
675 
676   address loc = map.location(pair.first(), nullptr);
677   intptr_t ptr = *(intptr_t*)loc;
678   if (is_set_nth_bit(ptr, 0)) {
679     // Return value is tagged, must be an InlineKlass pointer
680     clear_nth_bit(ptr, 0);
681     assert(Metaspace::contains((void*)ptr), "should be klass");
682     InlineKlass* vk = (InlineKlass*)ptr;
683     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
684     return vk;
685   }
686   // Return value is not tagged, must be a valid oop
687   assert(oopDesc::is_oop_or_null(cast_to_oop(ptr), true),
688          "Bad oop return: " PTR_FORMAT, ptr);
689   return nullptr;
690 }
691 
692 // CDS support
693 
694 void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {
695   InstanceKlass::metaspace_pointers_do(it);
696 
697   InlineKlass* this_ptr = this;
698   it->push((Klass**)adr_non_atomic_flat_array_klass());
699   it->push((Klass**)adr_atomic_flat_array_klass());
700   it->push((Klass**)adr_nullable_atomic_flat_array_klass());
701   it->push((Klass**)adr_null_free_reference_array_klass());
702 }
703 
704 void InlineKlass::remove_unshareable_info() {
705   InstanceKlass::remove_unshareable_info();
706 
707   // update it to point to the "buffered" copy of this class.
708   _adr_inlineklass_fixed_block = inlineklass_static_block();
709   ArchivePtrMarker::mark_pointer((address*)&_adr_inlineklass_fixed_block);
710 
711   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
712   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
713   *((address*)adr_pack_handler()) = nullptr;
714   *((address*)adr_pack_handler_jobject()) = nullptr;
715   *((address*)adr_unpack_handler()) = nullptr;
716   assert(pack_handler() == nullptr, "pack handler not null");
717   if (non_atomic_flat_array_klass() != nullptr) {
718     non_atomic_flat_array_klass()->remove_unshareable_info();
719   }
720   if (atomic_flat_array_klass() != nullptr) {
721     atomic_flat_array_klass()->remove_unshareable_info();
722   }
723   if (nullable_atomic_flat_array_klass() != nullptr) {
724     nullable_atomic_flat_array_klass()->remove_unshareable_info();
725   }
726   if (null_free_reference_array_klass() != nullptr) {
727     null_free_reference_array_klass()->remove_unshareable_info();
728   }
729 }
730 
731 void InlineKlass::remove_java_mirror() {
732   InstanceKlass::remove_java_mirror();
733   if (non_atomic_flat_array_klass() != nullptr) {
734     non_atomic_flat_array_klass()->remove_java_mirror();
735   }
736   if (atomic_flat_array_klass() != nullptr) {
737     atomic_flat_array_klass()->remove_java_mirror();
738   }
739   if (nullable_atomic_flat_array_klass() != nullptr) {
740     nullable_atomic_flat_array_klass()->remove_java_mirror();
741   }
742   if (null_free_reference_array_klass() != nullptr) {
743     null_free_reference_array_klass()->remove_java_mirror();
744   }
745 }
746 
747 void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) {
748   InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
749   if (non_atomic_flat_array_klass() != nullptr) {
750     non_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
751   }
752   if (atomic_flat_array_klass() != nullptr) {
753     atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
754   }
755   if (nullable_atomic_flat_array_klass() != nullptr) {
756     nullable_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
757   }
758   if (null_free_reference_array_klass() != nullptr) {
759     null_free_reference_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
760   }
761 }
762 
763 // oop verify
764 
765 void InlineKlass::verify_on(outputStream* st) {
766   InstanceKlass::verify_on(st);
767   guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
768 }
769 
770 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
771   InstanceKlass::oop_verify_on(obj, st);
772   guarantee(obj->mark().is_inline_type(), "Header is not inline type");
773 }