1 /*
  2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/archiveUtils.hpp"
 26 #include "cds/cdsConfig.hpp"
 27 #include "classfile/vmSymbols.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "gc/shared/barrierSet.hpp"
 30 #include "gc/shared/collectedHeap.inline.hpp"
 31 #include "gc/shared/gcLocker.inline.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/metaspaceClosure.hpp"
 35 #include "memory/metadataFactory.hpp"
 36 #include "oops/access.hpp"
 37 #include "oops/compressedOops.inline.hpp"
 38 #include "oops/fieldStreams.inline.hpp"
 39 #include "oops/flatArrayKlass.hpp"
 40 #include "oops/inlineKlass.inline.hpp"
 41 #include "oops/instanceKlass.inline.hpp"
 42 #include "oops/method.hpp"
 43 #include "oops/oop.inline.hpp"
 44 #include "oops/objArrayKlass.hpp"
 45 #include "runtime/fieldDescriptor.inline.hpp"
 46 #include "runtime/handles.inline.hpp"
 47 #include "runtime/safepointVerifiers.hpp"
 48 #include "runtime/sharedRuntime.hpp"
 49 #include "runtime/signature.hpp"
 50 #include "runtime/thread.inline.hpp"
 51 #include "utilities/copy.hpp"
 52 
 53   // Constructor
 54 InlineKlass::InlineKlass(const ClassFileParser& parser)
 55     : InstanceKlass(parser, InlineKlass::Kind, markWord::inline_type_prototype()) {
 56   assert(is_inline_klass(), "sanity");
 57   assert(prototype_header().is_inline_type(), "sanity");
 58 }
 59 
 60 InlineKlass::InlineKlass() {
 61   assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
 62 }
 63 
 64 void InlineKlass::init_fixed_block() {
 65   _adr_inlineklass_fixed_block = inlineklass_static_block();
 66   // Addresses used for inline type calling convention
 67   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
 68   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
 69   *((address*)adr_pack_handler()) = nullptr;
 70   *((address*)adr_pack_handler_jobject()) = nullptr;
 71   *((address*)adr_unpack_handler()) = nullptr;
 72   assert(pack_handler() == nullptr, "pack handler not null");
 73   *((address*)adr_non_atomic_flat_array_klass()) = nullptr;
 74   *((address*)adr_atomic_flat_array_klass()) = nullptr;
 75   *((address*)adr_nullable_atomic_flat_array_klass()) = nullptr;
 76   *((address*)adr_null_free_reference_array_klass()) = nullptr;
 77   set_null_reset_value_offset(0);
 78   set_payload_offset(-1);
 79   set_payload_size_in_bytes(-1);
 80   set_payload_alignment(-1);
 81   set_non_atomic_size_in_bytes(-1);
 82   set_non_atomic_alignment(-1);
 83   set_atomic_size_in_bytes(-1);
 84   set_nullable_size_in_bytes(-1);
 85   set_null_marker_offset(-1);
 86 }
 87 
 88 void InlineKlass::set_null_reset_value(oop val) {
 89   assert(val != nullptr, "Sanity check");
 90   assert(oopDesc::is_oop(val), "Sanity check");
 91   assert(val->is_inline_type(), "Sanity check");
 92   assert(val->klass() == this, "sanity check");
 93   java_mirror()->obj_field_put(null_reset_value_offset(), val);
 94 }
 95 
 96 instanceOop InlineKlass::allocate_instance(TRAPS) {
 97   int size = size_helper();  // Query before forming handle.
 98 
 99   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
100   assert(oop->mark().is_inline_type(), "Expected inline type");
101   return oop;
102 }
103 
104 instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
105   int size = size_helper();  // Query before forming handle.
106 
107   instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
108   assert(oop->mark().is_inline_type(), "Expected inline type");
109   return oop;
110 }
111 
112 int InlineKlass::nonstatic_oop_count() {
113   int oops = 0;
114   int map_count = nonstatic_oop_map_count();
115   OopMapBlock* block = start_of_nonstatic_oop_maps();
116   OopMapBlock* end = block + map_count;
117   while (block != end) {
118     oops += block->count();
119     block++;
120   }
121   return oops;
122 }
123 
124 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
125   switch(kind) {
126     case LayoutKind::NON_ATOMIC_FLAT:
127       assert(has_non_atomic_layout(), "Layout not available");
128       return non_atomic_size_in_bytes();
129       break;
130     case LayoutKind::ATOMIC_FLAT:
131       assert(has_atomic_layout(), "Layout not available");
132       return atomic_size_in_bytes();
133       break;
134     case LayoutKind::NULLABLE_ATOMIC_FLAT:
135       assert(has_nullable_atomic_layout(), "Layout not available");
136       return nullable_atomic_size_in_bytes();
137       break;
138     case LayoutKind::BUFFERED:
139       return payload_size_in_bytes();
140       break;
141     default:
142       ShouldNotReachHere();
143   }
144 }
145 
146 int InlineKlass::layout_alignment(LayoutKind kind) const {
147   switch(kind) {
148     case LayoutKind::NON_ATOMIC_FLAT:
149       assert(has_non_atomic_layout(), "Layout not available");
150       return non_atomic_alignment();
151       break;
152     case LayoutKind::ATOMIC_FLAT:
153       assert(has_atomic_layout(), "Layout not available");
154       return atomic_size_in_bytes();
155       break;
156     case LayoutKind::NULLABLE_ATOMIC_FLAT:
157       assert(has_nullable_atomic_layout(), "Layout not available");
158       return nullable_atomic_size_in_bytes();
159       break;
160     case LayoutKind::BUFFERED:
161       return payload_alignment();
162       break;
163     default:
164       ShouldNotReachHere();
165   }
166 }
167 
168 bool InlineKlass::is_layout_supported(LayoutKind lk) {
169   switch(lk) {
170     case LayoutKind::NON_ATOMIC_FLAT:
171       return has_non_atomic_layout();
172       break;
173     case LayoutKind::ATOMIC_FLAT:
174       return has_atomic_layout();
175       break;
176     case LayoutKind::NULLABLE_ATOMIC_FLAT:
177       return has_nullable_atomic_layout();
178       break;
179     case LayoutKind::BUFFERED:
180       return true;
181       break;
182     default:
183       ShouldNotReachHere();
184   }
185 }
186 
187 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
188   assert(is_layout_supported(lk), "Unsupported layout");
189   assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
190   switch(lk) {
191     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
192     if (is_payload_marked_as_null((address)src)) {
193         if (!contains_oops()) {
194           mark_payload_as_null((address)dst);
195           return;
196         }
197         // copy null_reset value to dest
198         if (dest_is_initialized) {
199           HeapAccess<>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
200         } else {
201           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(payload_addr(null_reset_value()), dst, this, lk);
202         }
203       } else {
204         // Copy has to be performed, even if this is an empty value, because of the null marker
205         mark_payload_as_non_null((address)src);
206         if (dest_is_initialized) {
207           HeapAccess<>::value_copy(src, dst, this, lk);
208         } else {
209           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
210         }
211       }
212     }
213     break;
214     case LayoutKind::BUFFERED:
215     case LayoutKind::ATOMIC_FLAT:
216     case LayoutKind::NON_ATOMIC_FLAT: {
217       if (is_empty_inline_type()) return; // nothing to do
218       if (dest_is_initialized) {
219         HeapAccess<>::value_copy(src, dst, this, lk);
220       } else {
221         HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
222       }
223     }
224     break;
225     default:
226       ShouldNotReachHere();
227   }
228 }
229 
230 oop InlineKlass::read_payload_from_addr(oop src, int offset, LayoutKind lk, TRAPS) {
231   assert(src != nullptr, "Must be");
232   assert(is_layout_supported(lk), "Unsupported layout");
233   switch(lk) {
234     case LayoutKind::NULLABLE_ATOMIC_FLAT: {
235       if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) {
236         return nullptr;
237       }
238     } // Fallthrough
239     case LayoutKind::BUFFERED:
240     case LayoutKind::ATOMIC_FLAT:
241     case LayoutKind::NON_ATOMIC_FLAT: {
242       Handle obj_h(THREAD, src);
243       oop res = allocate_instance_buffer(CHECK_NULL);
244       copy_payload_to_addr((void*)((char*)(oopDesc*)obj_h() + offset), payload_addr(res), lk, false);
245       if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
246         if(is_payload_marked_as_null(payload_addr(res))) {
247           return nullptr;
248         }
249       }
250       return res;
251     }
252     break;
253     default:
254       ShouldNotReachHere();
255   }
256 }
257 
258 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) {
259   void* src_addr = nullptr;
260   if (src == nullptr) {
261     if (lk != LayoutKind::NULLABLE_ATOMIC_FLAT) {
262       THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
263     }
264     // Writing null to a nullable flat field/element is usually done by writing
265     // the whole pre-allocated null_reset_value at the payload address to ensure
266     // that the null marker and all potential oops are reset to "zeros".
267     // However, the null_reset_value is allocated during class initialization.
268     // If the current value of the field is null, it is possible that the class
269     // of the field has not been initialized yet and thus the null_reset_value
270     // might not be available yet.
271     // Writing null over an already null value should not trigger class initialization.
272     // The solution is to detect null being written over null cases and return immediately
273     // (writing null over null is a no-op from a field modification point of view)
274     if (is_payload_marked_as_null((address)dst)) return;
275     src_addr = payload_addr(null_reset_value());
276   } else {
277     src_addr = payload_addr(src);
278     if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
279       mark_payload_as_non_null((address)src_addr);
280     }
281   }
282   copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized);
283 }
284 
285 // Arrays of...
286 
287 bool InlineKlass::maybe_flat_in_array() {
288   if (!UseArrayFlattening) {
289     return false;
290   }
291   // Too many embedded oops
292   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
293     return false;
294   }
295   // No flat layout?
296   if (!has_nullable_atomic_layout() && !has_atomic_layout() && !has_non_atomic_layout()) {
297     return false;
298   }
299   return true;
300 }
301 
302 ObjArrayKlass* InlineKlass::null_free_reference_array(TRAPS) {
303   if (Atomic::load_acquire(adr_null_free_reference_array_klass()) == nullptr) {
304     // Atomic creation of array_klasses
305     RecursiveLocker rl(MultiArray_lock, THREAD);
306 
307     // Check if update has already taken place
308     if (null_free_reference_array_klass() == nullptr) {
309       ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, true, CHECK_NULL);
310 
311       // use 'release' to pair with lock-free load
312       Atomic::release_store(adr_null_free_reference_array_klass(), k);
313     }
314   }
315   return null_free_reference_array_klass();
316 }
317 
318 
319 // There's no reason for this method to have a TRAP argument
320 FlatArrayKlass* InlineKlass::flat_array_klass(LayoutKind lk, TRAPS) {
321   FlatArrayKlass* volatile* adr_flat_array_klass = nullptr;
322   switch(lk) {
323     case LayoutKind::NON_ATOMIC_FLAT:
324       assert(has_non_atomic_layout(), "Must be");
325       adr_flat_array_klass = adr_non_atomic_flat_array_klass();
326       break;
327     case LayoutKind::ATOMIC_FLAT:
328     assert(has_atomic_layout(), "Must be");
329       adr_flat_array_klass = adr_atomic_flat_array_klass();
330       break;
331     case LayoutKind::NULLABLE_ATOMIC_FLAT:
332       assert(has_nullable_atomic_layout(), "Must be");
333       adr_flat_array_klass = adr_nullable_atomic_flat_array_klass();
334       break;
335     default:
336       ShouldNotReachHere();
337   }
338 
339   if (Atomic::load_acquire(adr_flat_array_klass) == nullptr) {
340     // Atomic creation of array_klasses
341     RecursiveLocker rl(MultiArray_lock, THREAD);
342 
343     if (*adr_flat_array_klass == nullptr) {
344       FlatArrayKlass* k = FlatArrayKlass::allocate_klass(this, lk, CHECK_NULL);
345       Atomic::release_store(adr_flat_array_klass, k);
346     }
347   }
348   return *adr_flat_array_klass;
349 }
350 
351 FlatArrayKlass* InlineKlass::flat_array_klass_or_null(LayoutKind lk) {
352     FlatArrayKlass* volatile* adr_flat_array_klass = nullptr;
353   switch(lk) {
354     case LayoutKind::NON_ATOMIC_FLAT:
355       assert(has_non_atomic_layout(), "Must be");
356       adr_flat_array_klass = adr_non_atomic_flat_array_klass();
357       break;
358     case LayoutKind::ATOMIC_FLAT:
359     assert(has_atomic_layout(), "Must be");
360       adr_flat_array_klass = adr_atomic_flat_array_klass();
361       break;
362     case LayoutKind::NULLABLE_ATOMIC_FLAT:
363       assert(has_nullable_atomic_layout(), "Must be");
364       adr_flat_array_klass = adr_nullable_atomic_flat_array_klass();
365       break;
366     default:
367       ShouldNotReachHere();
368   }
369 
370   // Need load-acquire for lock-free read
371   FlatArrayKlass* k = Atomic::load_acquire(adr_flat_array_klass);
372   return k;
373 }
374 
375 // Inline type arguments are not passed by reference, instead each
376 // field of the inline type is passed as an argument. This helper
377 // function collects the flat field (recursively)
378 // in a list. Included with the field's type is
379 // the offset of each field in the inline type: i2c and c2i adapters
380 // need that to load or store fields. Finally, the list of fields is
381 // sorted in order of increasing offsets: the adapters and the
382 // compiled code need to agree upon the order of fields.
383 //
384 // The list of basic types that is returned starts with a T_METADATA
385 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
386 // delimiters. Every entry between the two is a field of the inline
387 // type. If there's an embedded inline type in the list, it also starts
388 // with a T_METADATA and ends with a T_VOID. This is so we can
389 // generate a unique fingerprint for the method's adapters and we can
390 // generate the list of basic types from the interpreter point of view
391 // (inline types passed as reference: iterate on the list until a
392 // T_METADATA, drop everything until and including the closing
393 // T_VOID) or the compiler point of view (each field of the inline
394 // types is an argument: drop all T_METADATA/T_VOID from the list).
395 //
396 // Value classes could also have fields in abstract super value classes.
397 // Use a HierarchicalFieldStream to get them as well.
398 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, float& max_offset, int base_off, int null_marker_offset) {
399   int count = 0;
400   SigEntry::add_entry(sig, T_METADATA, name(), base_off);
401   max_offset = base_off;
402   for (HierarchicalFieldStream<JavaFieldStream> fs(this); !fs.done(); fs.next()) {
403     if (fs.access_flags().is_static()) continue;
404     int offset = base_off + fs.offset() - (base_off > 0 ? payload_offset() : 0);
405     InstanceKlass* field_holder = fs.field_descriptor().field_holder();
406     // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
407     if (fs.is_flat()) {
408       // Resolve klass of flat field and recursively collect fields
409       int field_null_marker_offset = -1;
410       if (!fs.is_null_free_inline_type()) {
411         field_null_marker_offset = base_off + fs.null_marker_offset() - (base_off > 0 ? payload_offset() : 0);
412       }
413       Klass* vk = field_holder->get_inline_type_field_klass(fs.index());
414       count += InlineKlass::cast(vk)->collect_fields(sig, max_offset, offset, field_null_marker_offset);
415     } else {
416       BasicType bt = Signature::basic_type(fs.signature());
417       SigEntry::add_entry(sig, bt, fs.signature(), offset);
418       count += type2size[bt];
419     }
420     if (field_holder != this) {
421       // Inherited field, add an empty wrapper to this to distinguish it from a "local" field
422       // with a different offset and avoid false adapter sharing. TODO 8348547 Is this sufficient?
423       SigEntry::add_entry(sig, T_METADATA, name(), base_off);
424       SigEntry::add_entry(sig, T_VOID, name(), offset);
425     }
426     max_offset = MAX2(max_offset, (float)offset);
427   }
428   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? payload_offset() : 0);
429   // Null markers are no real fields, add them manually at the end (C2 relies on this) of the flat fields
430   if (null_marker_offset != -1) {
431     max_offset += 0.1f; // We add the markers "in-between" because they are no real fields
432     SigEntry::add_entry(sig, T_BOOLEAN, name(), null_marker_offset, max_offset);
433     count++;
434   }
435   SigEntry::add_entry(sig, T_VOID, name(), offset);
436   if (base_off == 0) {
437     sig->sort(SigEntry::compare);
438   }
439   assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
440   return count;
441 }
442 
443 void InlineKlass::initialize_calling_convention(TRAPS) {
444   // Because the pack and unpack handler addresses need to be loadable from generated code,
445   // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
446   // not have a vtable, the vtable offset is used to store these addresses.
447   if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
448     ResourceMark rm;
449     GrowableArray<SigEntry> sig_vk;
450     float max_offset = 0;
451     int nb_fields = collect_fields(&sig_vk, max_offset);
452     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
453     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
454     for (int i = 0; i < sig_vk.length(); i++) {
455       extended_sig->at_put(i, sig_vk.at(i));
456     }
457     if (can_be_returned_as_fields(/* init= */ true)) {
458       nb_fields++;
459       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
460       sig_bt[0] = T_METADATA;
461       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
462       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
463       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
464 
465       if (total > 0) {
466         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
467         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
468         for (int i = 0; i < nb_fields; i++) {
469           return_regs->at_put(i, regs[i]);
470         }
471 
472         BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
473         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
474         *((address*)adr_pack_handler_jobject()) = buffered_blob->pack_fields_jobject();
475         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
476         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
477         assert(can_be_returned_as_fields(), "sanity");
478       }
479     }
480     if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
481       MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
482       assert(return_regs() == nullptr, "sanity");
483     }
484   }
485 }
486 
487 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
488   if (extended_sig() != nullptr) {
489     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
490     *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
491   }
492   if (return_regs() != nullptr) {
493     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
494     *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
495   }
496   cleanup_blobs();
497   InstanceKlass::deallocate_contents(loader_data);
498 }
499 
500 void InlineKlass::cleanup(InlineKlass* ik) {
501   ik->cleanup_blobs();
502 }
503 
504 void InlineKlass::cleanup_blobs() {
505   if (pack_handler() != nullptr) {
506     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
507     assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
508     BufferBlob::free((BufferBlob*)buffered_blob);
509     *((address*)adr_pack_handler()) = nullptr;
510     *((address*)adr_pack_handler_jobject()) = nullptr;
511     *((address*)adr_unpack_handler()) = nullptr;
512   }
513 }
514 
515 // Can this inline type be passed as multiple values?
516 bool InlineKlass::can_be_passed_as_fields() const {
517   return InlineTypePassFieldsAsArgs;
518 }
519 
520 // Can this inline type be returned as multiple values?
521 bool InlineKlass::can_be_returned_as_fields(bool init) const {
522   return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
523 }
524 
525 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
526 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
527   Thread* thread = Thread::current();
528   const Array<SigEntry>* sig_vk = extended_sig();
529   const Array<VMRegPair>* regs = return_regs();
530   int j = 1;
531 
532   for (int i = 0; i < sig_vk->length(); i++) {
533     BasicType bt = sig_vk->at(i)._bt;
534     if (bt == T_OBJECT || bt == T_ARRAY) {
535       VMRegPair pair = regs->at(j);
536       address loc = reg_map.location(pair.first(), nullptr);
537       oop v = *(oop*)loc;
538       assert(v == nullptr || oopDesc::is_oop(v), "not an oop?");
539       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
540       handles.push(Handle(thread, v));
541     }
542     if (bt == T_METADATA) {
543       continue;
544     }
545     if (bt == T_VOID &&
546         sig_vk->at(i-1)._bt != T_LONG &&
547         sig_vk->at(i-1)._bt != T_DOUBLE) {
548       continue;
549     }
550     j++;
551   }
552   assert(j == regs->length(), "missed a field?");
553 }
554 
555 // Update oop fields in registers from handles after a safepoint
556 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
557   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
558   const Array<SigEntry>* sig_vk = extended_sig();
559   const Array<VMRegPair>* regs = return_regs();
560   assert(regs != nullptr, "inconsistent");
561 
562   int j = 1;
563   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
564     BasicType bt = sig_vk->at(i)._bt;
565     if (bt == T_OBJECT || bt == T_ARRAY) {
566       VMRegPair pair = regs->at(j);
567       address loc = reg_map.location(pair.first(), nullptr);
568       *(oop*)loc = handles.at(k++)();
569     }
570     if (bt == T_METADATA) {
571       continue;
572     }
573     if (bt == T_VOID &&
574         sig_vk->at(i-1)._bt != T_LONG &&
575         sig_vk->at(i-1)._bt != T_DOUBLE) {
576       continue;
577     }
578     j++;
579   }
580   assert(j == regs->length(), "missed a field?");
581 }
582 
583 // Fields are in registers. Create an instance of the inline type and
584 // initialize it with the values of the fields.
585 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
586   oop new_vt = allocate_instance(CHECK_NULL);
587   const Array<SigEntry>* sig_vk = extended_sig();
588   const Array<VMRegPair>* regs = return_regs();
589 
590   int j = 1;
591   int k = 0;
592   for (int i = 0; i < sig_vk->length(); i++) {
593     BasicType bt = sig_vk->at(i)._bt;
594     if (bt == T_METADATA) {
595       continue;
596     }
597     if (bt == T_VOID) {
598       if (sig_vk->at(i-1)._bt == T_LONG ||
599           sig_vk->at(i-1)._bt == T_DOUBLE) {
600         j++;
601       }
602       continue;
603     }
604     int off = sig_vk->at(i)._offset;
605     assert(off > 0, "offset in object should be positive");
606     VMRegPair pair = regs->at(j);
607     address loc = reg_map.location(pair.first(), nullptr);
608     switch(bt) {
609     case T_BOOLEAN: {
610       new_vt->bool_field_put(off, *(jboolean*)loc);
611       break;
612     }
613     case T_CHAR: {
614       new_vt->char_field_put(off, *(jchar*)loc);
615       break;
616     }
617     case T_BYTE: {
618       new_vt->byte_field_put(off, *(jbyte*)loc);
619       break;
620     }
621     case T_SHORT: {
622       new_vt->short_field_put(off, *(jshort*)loc);
623       break;
624     }
625     case T_INT: {
626       new_vt->int_field_put(off, *(jint*)loc);
627       break;
628     }
629     case T_LONG: {
630 #ifdef _LP64
631       new_vt->double_field_put(off,  *(jdouble*)loc);
632 #else
633       Unimplemented();
634 #endif
635       break;
636     }
637     case T_OBJECT:
638     case T_ARRAY: {
639       Handle handle = handles.at(k++);
640       new_vt->obj_field_put(off, handle());
641       break;
642     }
643     case T_FLOAT: {
644       new_vt->float_field_put(off,  *(jfloat*)loc);
645       break;
646     }
647     case T_DOUBLE: {
648       new_vt->double_field_put(off, *(jdouble*)loc);
649       break;
650     }
651     default:
652       ShouldNotReachHere();
653     }
654     *(intptr_t*)loc = 0xDEAD;
655     j++;
656   }
657   assert(j == regs->length(), "missed a field?");
658   assert(k == handles.length(), "missed an oop?");
659   return new_vt;
660 }
661 
662 // Check the return register for an InlineKlass oop
663 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map) {
664   BasicType bt = T_METADATA;
665   VMRegPair pair;
666   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
667   assert(nb == 1, "broken");
668 
669   address loc = map.location(pair.first(), nullptr);
670   intptr_t ptr = *(intptr_t*)loc;
671   if (is_set_nth_bit(ptr, 0)) {
672     // Return value is tagged, must be an InlineKlass pointer
673     clear_nth_bit(ptr, 0);
674     assert(Metaspace::contains((void*)ptr), "should be klass");
675     InlineKlass* vk = (InlineKlass*)ptr;
676     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
677     return vk;
678   }
679   // Return value is not tagged, must be a valid oop
680   assert(oopDesc::is_oop_or_null(cast_to_oop(ptr), true),
681          "Bad oop return: " PTR_FORMAT, ptr);
682   return nullptr;
683 }
684 
685 // CDS support
686 
687 void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {
688   InstanceKlass::metaspace_pointers_do(it);
689 
690   InlineKlass* this_ptr = this;
691   it->push((Klass**)adr_non_atomic_flat_array_klass());
692   it->push((Klass**)adr_atomic_flat_array_klass());
693   it->push((Klass**)adr_nullable_atomic_flat_array_klass());
694   it->push((Klass**)adr_null_free_reference_array_klass());
695 }
696 
697 void InlineKlass::remove_unshareable_info() {
698   InstanceKlass::remove_unshareable_info();
699 
700   // update it to point to the "buffered" copy of this class.
701   _adr_inlineklass_fixed_block = inlineklass_static_block();
702   ArchivePtrMarker::mark_pointer((address*)&_adr_inlineklass_fixed_block);
703 
704   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
705   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
706   *((address*)adr_pack_handler()) = nullptr;
707   *((address*)adr_pack_handler_jobject()) = nullptr;
708   *((address*)adr_unpack_handler()) = nullptr;
709   assert(pack_handler() == nullptr, "pack handler not null");
710   if (non_atomic_flat_array_klass() != nullptr) {
711     non_atomic_flat_array_klass()->remove_unshareable_info();
712   }
713   if (atomic_flat_array_klass() != nullptr) {
714     atomic_flat_array_klass()->remove_unshareable_info();
715   }
716   if (nullable_atomic_flat_array_klass() != nullptr) {
717     nullable_atomic_flat_array_klass()->remove_unshareable_info();
718   }
719   if (null_free_reference_array_klass() != nullptr) {
720     null_free_reference_array_klass()->remove_unshareable_info();
721   }
722 }
723 
724 void InlineKlass::remove_java_mirror() {
725   InstanceKlass::remove_java_mirror();
726   if (non_atomic_flat_array_klass() != nullptr) {
727     non_atomic_flat_array_klass()->remove_java_mirror();
728   }
729   if (atomic_flat_array_klass() != nullptr) {
730     atomic_flat_array_klass()->remove_java_mirror();
731   }
732   if (nullable_atomic_flat_array_klass() != nullptr) {
733     nullable_atomic_flat_array_klass()->remove_java_mirror();
734   }
735   if (null_free_reference_array_klass() != nullptr) {
736     null_free_reference_array_klass()->remove_java_mirror();
737   }
738 }
739 
740 void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) {
741   InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
742   if (non_atomic_flat_array_klass() != nullptr) {
743     non_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
744   }
745   if (atomic_flat_array_klass() != nullptr) {
746     atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
747   }
748   if (nullable_atomic_flat_array_klass() != nullptr) {
749     nullable_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
750   }
751   if (null_free_reference_array_klass() != nullptr) {
752     null_free_reference_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
753   }
754 }
755 
756 // oop verify
757 
758 void InlineKlass::verify_on(outputStream* st) {
759   InstanceKlass::verify_on(st);
760   guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
761 }
762 
763 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
764   InstanceKlass::oop_verify_on(obj, st);
765   guarantee(obj->mark().is_inline_type(), "Header is not inline type");
766 }