1 /*
  2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveUtils.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "gc/shared/collectedHeap.inline.hpp"
 32 #include "gc/shared/gcLocker.inline.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "logging/log.hpp"
 35 #include "memory/metaspaceClosure.hpp"
 36 #include "memory/metadataFactory.hpp"
 37 #include "oops/access.hpp"
 38 #include "oops/compressedOops.inline.hpp"
 39 #include "oops/fieldStreams.inline.hpp"
 40 #include "oops/flatArrayKlass.hpp"
 41 #include "oops/inlineKlass.inline.hpp"
 42 #include "oops/instanceKlass.inline.hpp"
 43 #include "oops/method.hpp"
 44 #include "oops/oop.inline.hpp"
 45 #include "oops/objArrayKlass.hpp"
 46 #include "runtime/fieldDescriptor.inline.hpp"
 47 #include "runtime/handles.inline.hpp"
 48 #include "runtime/safepointVerifiers.hpp"
 49 #include "runtime/sharedRuntime.hpp"
 50 #include "runtime/signature.hpp"
 51 #include "runtime/thread.inline.hpp"
 52 #include "utilities/copy.hpp"
 53 
 54   // Constructor
 55 InlineKlass::InlineKlass(const ClassFileParser& parser)
 56     : InstanceKlass(parser, InlineKlass::Kind) {
 57   set_prototype_header(markWord::inline_type_prototype());
 58   assert(is_inline_klass(), "sanity");
 59   assert(prototype_header().is_inline_type(), "sanity");
 60 }
 61 
 62 InlineKlass::InlineKlass() {
 63   assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
 64 }
 65 
 66 void InlineKlass::init_fixed_block() {
 67   _adr_inlineklass_fixed_block = inlineklass_static_block();
 68   // Addresses used for inline type calling convention
 69   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
 70   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
 71   *((address*)adr_pack_handler()) = nullptr;
 72   *((address*)adr_pack_handler_jobject()) = nullptr;
 73   *((address*)adr_unpack_handler()) = nullptr;
 74   assert(pack_handler() == nullptr, "pack handler not null");
 75   *((address*)adr_non_atomic_flat_array_klass()) = nullptr;
 76   *((address*)adr_atomic_flat_array_klass()) = nullptr;
 77   *((address*)adr_nullable_atomic_flat_array_klass()) = nullptr;
 78   *((address*)adr_null_free_reference_array_klass()) = nullptr;
 79   set_default_value_offset(0);
 80   set_null_reset_value_offset(0);
 81   set_first_field_offset(-1);
 82   set_payload_size_in_bytes(-1);
 83   set_payload_alignment(-1);
 84   set_non_atomic_size_in_bytes(-1);
 85   set_non_atomic_alignment(-1);
 86   set_atomic_size_in_bytes(-1);
 87   set_nullable_size_in_bytes(-1);
 88   set_null_marker_offset(-1);
 89 }
 90 
 91 void InlineKlass::set_default_value(oop val) {
 92   assert(val != nullptr, "Sanity check");
 93   assert(oopDesc::is_oop(val), "Sanity check");
 94   assert(val->is_inline_type(), "Sanity check");
 95   assert(val->klass() == this, "sanity check");
 96   java_mirror()->obj_field_put(default_value_offset(), val);
 97 }
 98 
 99 void InlineKlass::set_null_reset_value(oop val) {
100   assert(val != nullptr, "Sanity check");
101   assert(oopDesc::is_oop(val), "Sanity check");
102   assert(val->is_inline_type(), "Sanity check");
103   assert(val->klass() == this, "sanity check");
104   java_mirror()->obj_field_put(null_reset_value_offset(), val);
105 }
106 
107 instanceOop InlineKlass::allocate_instance(TRAPS) {
108   int size = size_helper();  // Query before forming handle.
109 
110   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
111   assert(oop->mark().is_inline_type(), "Expected inline type");
112   return oop;
113 }
114 
115 instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
116   int size = size_helper();  // Query before forming handle.
117 
118   instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
119   assert(oop->mark().is_inline_type(), "Expected inline type");
120   return oop;
121 }
122 
123 int InlineKlass::nonstatic_oop_count() {
124   int oops = 0;
125   int map_count = nonstatic_oop_map_count();
126   OopMapBlock* block = start_of_nonstatic_oop_maps();
127   OopMapBlock* end = block + map_count;
128   while (block != end) {
129     oops += block->count();
130     block++;
131   }
132   return oops;
133 }
134 
135 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
136   switch(kind) {
137     case LayoutKind::NON_ATOMIC_FLAT:
138       assert(has_non_atomic_layout(), "Layout not available");
139       return non_atomic_size_in_bytes();
140       break;
141     case LayoutKind::ATOMIC_FLAT:
142       assert(has_atomic_layout(), "Layout not available");
143       return atomic_size_in_bytes();
144       break;
145     case LayoutKind::NULLABLE_ATOMIC_FLAT:
146       assert(has_nullable_atomic_layout(), "Layout not available");
147       return nullable_atomic_size_in_bytes();
148       break;
149     case PAYLOAD:
150       return payload_size_in_bytes();
151       break;
152     default:
153       ShouldNotReachHere();
154   }
155 }
156 
157 int InlineKlass::layout_alignment(LayoutKind kind) const {
158   switch(kind) {
159     case LayoutKind::NON_ATOMIC_FLAT:
160       assert(has_non_atomic_layout(), "Layout not available");
161       return non_atomic_alignment();
162       break;
163     case LayoutKind::ATOMIC_FLAT:
164       assert(has_atomic_layout(), "Layout not available");
165       return atomic_size_in_bytes();
166       break;
167     case LayoutKind::NULLABLE_ATOMIC_FLAT:
168       assert(has_nullable_atomic_layout(), "Layout not available");
169       return nullable_atomic_size_in_bytes();
170       break;
171     case LayoutKind::PAYLOAD:
172       return payload_alignment();
173       break;
174     default:
175       ShouldNotReachHere();
176   }
177 }
178 
179 bool InlineKlass::is_layout_supported(LayoutKind lk) {
180   switch(lk) {
181     case LayoutKind::NON_ATOMIC_FLAT:
182       return has_non_atomic_layout();
183       break;
184     case LayoutKind::ATOMIC_FLAT:
185       return has_atomic_layout();
186       break;
187     case LayoutKind::NULLABLE_ATOMIC_FLAT:
188       return has_nullable_atomic_layout();
189       break;
190     case LayoutKind::PAYLOAD:
191       return true;
192       break;
193     default:
194       ShouldNotReachHere();
195   }
196 }
197 
198 void InlineKlass::copy_payload_to_addr(void* src, void* dst, LayoutKind lk, bool dest_is_initialized) {
199   assert(is_layout_supported(lk), "Unsupported layout");
200   assert(lk != LayoutKind::REFERENCE && lk != LayoutKind::UNKNOWN, "Sanity check");
201   switch(lk) {
202     case NULLABLE_ATOMIC_FLAT: {
203     if (is_payload_marked_as_null((address)src)) {
204         if (!contains_oops()) {
205           mark_payload_as_null((address)dst);
206           return;
207         }
208         // copy null_reset value to dest
209         if (dest_is_initialized) {
210           HeapAccess<>::value_copy(data_for_oop(null_reset_value()), dst, this, lk);
211         } else {
212           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(data_for_oop(null_reset_value()), dst, this, lk);
213         }
214       } else {
215         // Copy has to be performed, even if this is an empty value, because of the null marker
216         mark_payload_as_non_null((address)src);
217         if (dest_is_initialized) {
218           HeapAccess<>::value_copy(src, dst, this, lk);
219         } else {
220           HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
221         }
222       }
223     }
224     break;
225     case PAYLOAD:
226     case ATOMIC_FLAT:
227     case NON_ATOMIC_FLAT: {
228       if (is_empty_inline_type()) return; // nothing to do
229       if (dest_is_initialized) {
230         HeapAccess<>::value_copy(src, dst, this, lk);
231       } else {
232         HeapAccess<IS_DEST_UNINITIALIZED>::value_copy(src, dst, this, lk);
233       }
234     }
235     break;
236     default:
237       ShouldNotReachHere();
238   }
239 }
240 
241 oop InlineKlass::read_payload_from_addr(oop src, int offset, LayoutKind lk, TRAPS) {
242   assert(src != nullptr, "Must be");
243   assert(is_layout_supported(lk), "Unsupported layout");
244   switch(lk) {
245     case NULLABLE_ATOMIC_FLAT: {
246       if (is_payload_marked_as_null((address)((char*)(oopDesc*)src + offset))) {
247         return nullptr;
248       }
249     } // Fallthrough
250     case PAYLOAD:
251     case ATOMIC_FLAT:
252     case NON_ATOMIC_FLAT: {
253       if (is_empty_inline_type()) {
254         return default_value();
255       }
256       Handle obj_h(THREAD, src);
257       oop res = allocate_instance_buffer(CHECK_NULL);
258       copy_payload_to_addr((void*)((char*)(oopDesc*)obj_h() + offset), data_for_oop(res), lk, false);
259       if (lk == NULLABLE_ATOMIC_FLAT) {
260         if(is_payload_marked_as_null(data_for_oop(res))) {
261           return nullptr;
262         }
263       }
264       return res;
265     }
266     break;
267     default:
268       ShouldNotReachHere();
269   }
270 }
271 
272 void InlineKlass::write_value_to_addr(oop src, void* dst, LayoutKind lk, bool dest_is_initialized, TRAPS) {
273   void* src_addr = nullptr;
274   if (src == nullptr) {
275     if (lk != NULLABLE_ATOMIC_FLAT) {
276       THROW_MSG(vmSymbols::java_lang_NullPointerException(), "Value is null");
277     }
278     src_addr = data_for_oop(null_reset_value());
279   } else {
280     src_addr = data_for_oop(src);
281     if (lk == NULLABLE_ATOMIC_FLAT) {
282       mark_payload_as_non_null((address)src_addr);
283     }
284   }
285   copy_payload_to_addr(src_addr, dst, lk, dest_is_initialized);
286 }
287 
288 // Arrays of...
289 
290 bool InlineKlass::flat_array() {
291   if (!UseFlatArray) {
292     return false;
293   }
294   // Too big
295   int elem_bytes = payload_size_in_bytes();
296   if ((FlatArrayElementMaxSize >= 0) && (elem_bytes > FlatArrayElementMaxSize)) {
297     return false;
298   }
299   // Too many embedded oops
300   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
301     return false;
302   }
303   // Declared atomic but not naturally atomic.
304   if (must_be_atomic() && !is_naturally_atomic()) {
305     return false;
306   }
307   // VM enforcing InlineArrayAtomicAccess only...
308   if (InlineArrayAtomicAccess && (!is_naturally_atomic())) {
309     return false;
310   }
311   return true;
312 }
313 
314 ObjArrayKlass* InlineKlass::null_free_reference_array(TRAPS) {
315   if (Atomic::load_acquire(adr_null_free_reference_array_klass()) == nullptr) {
316     // Atomic creation of array_klasses
317     RecursiveLocker rl(MultiArray_lock, THREAD);
318 
319     // Check if update has already taken place
320     if (null_free_reference_array_klass() == nullptr) {
321       ObjArrayKlass* k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, true, CHECK_NULL);
322 
323       // use 'release' to pair with lock-free load
324       Atomic::release_store(adr_null_free_reference_array_klass(), k);
325     }
326   }
327   return null_free_reference_array_klass();
328 }
329 
330 
331 // There's no reason for this method to have a TRAP argument
332 FlatArrayKlass* InlineKlass::flat_array_klass(LayoutKind lk, TRAPS) {
333   FlatArrayKlass* volatile* adr_flat_array_klass = nullptr;
334   switch(lk) {
335     case NON_ATOMIC_FLAT:
336       assert(has_non_atomic_layout(), "Must be");
337       adr_flat_array_klass = adr_non_atomic_flat_array_klass();
338       break;
339     case ATOMIC_FLAT:
340     assert(has_atomic_layout(), "Must be");
341       adr_flat_array_klass = adr_atomic_flat_array_klass();
342       break;
343     case NULLABLE_ATOMIC_FLAT:
344       assert(has_nullable_atomic_layout(), "Must be");
345       adr_flat_array_klass = adr_nullable_atomic_flat_array_klass();
346       break;
347     default:
348       ShouldNotReachHere();
349   }
350 
351   if (Atomic::load_acquire(adr_flat_array_klass) == nullptr) {
352     // Atomic creation of array_klasses
353     RecursiveLocker rl(MultiArray_lock, THREAD);
354 
355     if (*adr_flat_array_klass == nullptr) {
356       FlatArrayKlass* k = FlatArrayKlass::allocate_klass(this, lk, CHECK_NULL);
357       Atomic::release_store(adr_flat_array_klass, k);
358     }
359   }
360   return *adr_flat_array_klass;
361 }
362 
363 FlatArrayKlass* InlineKlass::flat_array_klass_or_null(LayoutKind lk) {
364     FlatArrayKlass* volatile* adr_flat_array_klass = nullptr;
365   switch(lk) {
366     case NON_ATOMIC_FLAT:
367       assert(has_non_atomic_layout(), "Must be");
368       adr_flat_array_klass = adr_non_atomic_flat_array_klass();
369       break;
370     case ATOMIC_FLAT:
371     assert(has_atomic_layout(), "Must be");
372       adr_flat_array_klass = adr_atomic_flat_array_klass();
373       break;
374     case NULLABLE_ATOMIC_FLAT:
375       assert(has_nullable_atomic_layout(), "Must be");
376       adr_flat_array_klass = adr_nullable_atomic_flat_array_klass();
377       break;
378     default:
379       ShouldNotReachHere();
380   }
381 
382   // Need load-acquire for lock-free read
383   FlatArrayKlass* k = Atomic::load_acquire(adr_flat_array_klass);
384   return k;
385 }
386 
387 // Inline type arguments are not passed by reference, instead each
388 // field of the inline type is passed as an argument. This helper
389 // function collects the flat field (recursively)
390 // in a list. Included with the field's type is
391 // the offset of each field in the inline type: i2c and c2i adapters
392 // need that to load or store fields. Finally, the list of fields is
393 // sorted in order of increasing offsets: the adapters and the
394 // compiled code need to agree upon the order of fields.
395 //
396 // The list of basic types that is returned starts with a T_METADATA
397 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
398 // delimiters. Every entry between the two is a field of the inline
399 // type. If there's an embedded inline type in the list, it also starts
400 // with a T_METADATA and ends with a T_VOID. This is so we can
401 // generate a unique fingerprint for the method's adapters and we can
402 // generate the list of basic types from the interpreter point of view
403 // (inline types passed as reference: iterate on the list until a
404 // T_METADATA, drop everything until and including the closing
405 // T_VOID) or the compiler point of view (each field of the inline
406 // types is an argument: drop all T_METADATA/T_VOID from the list).
407 //
408 // Value classes could also have fields in abstract super value classes.
409 // Use a HierarchicalFieldStream to get them as well.
410 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off) {
411   int count = 0;
412   SigEntry::add_entry(sig, T_METADATA, name(), base_off);
413   for (HierarchicalFieldStream<JavaFieldStream> fs(this); !fs.done(); fs.next()) {
414     if (fs.access_flags().is_static()) continue;
415     int offset = base_off + fs.offset() - (base_off > 0 ? first_field_offset() : 0);
416     // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
417     if (fs.is_flat()) {
418       // Resolve klass of flat field and recursively collect fields
419       Klass* vk = get_inline_type_field_klass(fs.index());
420       count += InlineKlass::cast(vk)->collect_fields(sig, offset);
421     } else {
422       BasicType bt = Signature::basic_type(fs.signature());
423       SigEntry::add_entry(sig, bt, fs.signature(), offset);
424       count += type2size[bt];
425     }
426   }
427   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
428   SigEntry::add_entry(sig, T_VOID, name(), offset);
429   if (base_off == 0) {
430     sig->sort(SigEntry::compare);
431   }
432   assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
433   return count;
434 }
435 
436 void InlineKlass::initialize_calling_convention(TRAPS) {
437   // Because the pack and unpack handler addresses need to be loadable from generated code,
438   // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
439   // not have a vtable, the vtable offset is used to store these addresses.
440   if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
441     ResourceMark rm;
442     GrowableArray<SigEntry> sig_vk;
443     int nb_fields = collect_fields(&sig_vk);
444     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
445     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
446     for (int i = 0; i < sig_vk.length(); i++) {
447       extended_sig->at_put(i, sig_vk.at(i));
448     }
449     if (can_be_returned_as_fields(/* init= */ true)) {
450       nb_fields++;
451       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
452       sig_bt[0] = T_METADATA;
453       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
454       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
455       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
456 
457       if (total > 0) {
458         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
459         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
460         for (int i = 0; i < nb_fields; i++) {
461           return_regs->at_put(i, regs[i]);
462         }
463 
464         BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
465         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
466         *((address*)adr_pack_handler_jobject()) = buffered_blob->pack_fields_jobject();
467         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
468         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
469         assert(can_be_returned_as_fields(), "sanity");
470       }
471     }
472     if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
473       MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
474       assert(return_regs() == nullptr, "sanity");
475     }
476   }
477 }
478 
479 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
480   if (extended_sig() != nullptr) {
481     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
482     *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
483   }
484   if (return_regs() != nullptr) {
485     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
486     *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
487   }
488   cleanup_blobs();
489   InstanceKlass::deallocate_contents(loader_data);
490 }
491 
492 void InlineKlass::cleanup(InlineKlass* ik) {
493   ik->cleanup_blobs();
494 }
495 
496 void InlineKlass::cleanup_blobs() {
497   if (pack_handler() != nullptr) {
498     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
499     assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
500     BufferBlob::free((BufferBlob*)buffered_blob);
501     *((address*)adr_pack_handler()) = nullptr;
502     *((address*)adr_pack_handler_jobject()) = nullptr;
503     *((address*)adr_unpack_handler()) = nullptr;
504   }
505 }
506 
507 // Can this inline type be passed as multiple values?
508 bool InlineKlass::can_be_passed_as_fields() const {
509   return InlineTypePassFieldsAsArgs;
510 }
511 
512 // Can this inline type be returned as multiple values?
513 bool InlineKlass::can_be_returned_as_fields(bool init) const {
514   return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
515 }
516 
517 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
518 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
519   Thread* thread = Thread::current();
520   const Array<SigEntry>* sig_vk = extended_sig();
521   const Array<VMRegPair>* regs = return_regs();
522   int j = 1;
523 
524   for (int i = 0; i < sig_vk->length(); i++) {
525     BasicType bt = sig_vk->at(i)._bt;
526     if (bt == T_OBJECT || bt == T_ARRAY) {
527       VMRegPair pair = regs->at(j);
528       address loc = reg_map.location(pair.first(), nullptr);
529       oop v = *(oop*)loc;
530       assert(v == nullptr || oopDesc::is_oop(v), "not an oop?");
531       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
532       handles.push(Handle(thread, v));
533     }
534     if (bt == T_METADATA) {
535       continue;
536     }
537     if (bt == T_VOID &&
538         sig_vk->at(i-1)._bt != T_LONG &&
539         sig_vk->at(i-1)._bt != T_DOUBLE) {
540       continue;
541     }
542     j++;
543   }
544   assert(j == regs->length(), "missed a field?");
545 }
546 
547 // Update oop fields in registers from handles after a safepoint
548 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
549   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
550   const Array<SigEntry>* sig_vk = extended_sig();
551   const Array<VMRegPair>* regs = return_regs();
552   assert(regs != nullptr, "inconsistent");
553 
554   int j = 1;
555   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
556     BasicType bt = sig_vk->at(i)._bt;
557     if (bt == T_OBJECT || bt == T_ARRAY) {
558       VMRegPair pair = regs->at(j);
559       address loc = reg_map.location(pair.first(), nullptr);
560       *(oop*)loc = handles.at(k++)();
561     }
562     if (bt == T_METADATA) {
563       continue;
564     }
565     if (bt == T_VOID &&
566         sig_vk->at(i-1)._bt != T_LONG &&
567         sig_vk->at(i-1)._bt != T_DOUBLE) {
568       continue;
569     }
570     j++;
571   }
572   assert(j == regs->length(), "missed a field?");
573 }
574 
575 // Fields are in registers. Create an instance of the inline type and
576 // initialize it with the values of the fields.
577 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
578   oop new_vt = allocate_instance(CHECK_NULL);
579   const Array<SigEntry>* sig_vk = extended_sig();
580   const Array<VMRegPair>* regs = return_regs();
581 
582   int j = 1;
583   int k = 0;
584   for (int i = 0; i < sig_vk->length(); i++) {
585     BasicType bt = sig_vk->at(i)._bt;
586     if (bt == T_METADATA) {
587       continue;
588     }
589     if (bt == T_VOID) {
590       if (sig_vk->at(i-1)._bt == T_LONG ||
591           sig_vk->at(i-1)._bt == T_DOUBLE) {
592         j++;
593       }
594       continue;
595     }
596     int off = sig_vk->at(i)._offset;
597     assert(off > 0, "offset in object should be positive");
598     VMRegPair pair = regs->at(j);
599     address loc = reg_map.location(pair.first(), nullptr);
600     switch(bt) {
601     case T_BOOLEAN: {
602       new_vt->bool_field_put(off, *(jboolean*)loc);
603       break;
604     }
605     case T_CHAR: {
606       new_vt->char_field_put(off, *(jchar*)loc);
607       break;
608     }
609     case T_BYTE: {
610       new_vt->byte_field_put(off, *(jbyte*)loc);
611       break;
612     }
613     case T_SHORT: {
614       new_vt->short_field_put(off, *(jshort*)loc);
615       break;
616     }
617     case T_INT: {
618       new_vt->int_field_put(off, *(jint*)loc);
619       break;
620     }
621     case T_LONG: {
622 #ifdef _LP64
623       new_vt->double_field_put(off,  *(jdouble*)loc);
624 #else
625       Unimplemented();
626 #endif
627       break;
628     }
629     case T_OBJECT:
630     case T_ARRAY: {
631       Handle handle = handles.at(k++);
632       new_vt->obj_field_put(off, handle());
633       break;
634     }
635     case T_FLOAT: {
636       new_vt->float_field_put(off,  *(jfloat*)loc);
637       break;
638     }
639     case T_DOUBLE: {
640       new_vt->double_field_put(off, *(jdouble*)loc);
641       break;
642     }
643     default:
644       ShouldNotReachHere();
645     }
646     *(intptr_t*)loc = 0xDEAD;
647     j++;
648   }
649   assert(j == regs->length(), "missed a field?");
650   assert(k == handles.length(), "missed an oop?");
651   return new_vt;
652 }
653 
654 // Check the return register for an InlineKlass oop
655 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map) {
656   BasicType bt = T_METADATA;
657   VMRegPair pair;
658   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
659   assert(nb == 1, "broken");
660 
661   address loc = map.location(pair.first(), nullptr);
662   intptr_t ptr = *(intptr_t*)loc;
663   if (is_set_nth_bit(ptr, 0)) {
664     // Return value is tagged, must be an InlineKlass pointer
665     clear_nth_bit(ptr, 0);
666     assert(Metaspace::contains((void*)ptr), "should be klass");
667     InlineKlass* vk = (InlineKlass*)ptr;
668     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
669     return vk;
670   }
671   // Return value is not tagged, must be a valid oop
672   assert(oopDesc::is_oop_or_null(cast_to_oop(ptr), true),
673          "Bad oop return: " PTR_FORMAT, ptr);
674   return nullptr;
675 }
676 
677 // CDS support
678 
679 void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {
680   InstanceKlass::metaspace_pointers_do(it);
681 
682   InlineKlass* this_ptr = this;
683   it->push((Klass**)adr_non_atomic_flat_array_klass());
684   it->push((Klass**)adr_atomic_flat_array_klass());
685   it->push((Klass**)adr_nullable_atomic_flat_array_klass());
686   it->push((Klass**)adr_null_free_reference_array_klass());
687 }
688 
689 void InlineKlass::remove_unshareable_info() {
690   InstanceKlass::remove_unshareable_info();
691 
692   // update it to point to the "buffered" copy of this class.
693   _adr_inlineklass_fixed_block = inlineklass_static_block();
694   ArchivePtrMarker::mark_pointer((address*)&_adr_inlineklass_fixed_block);
695 
696   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
697   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
698   *((address*)adr_pack_handler()) = nullptr;
699   *((address*)adr_pack_handler_jobject()) = nullptr;
700   *((address*)adr_unpack_handler()) = nullptr;
701   assert(pack_handler() == nullptr, "pack handler not null");
702   if (non_atomic_flat_array_klass() != nullptr) {
703     non_atomic_flat_array_klass()->remove_unshareable_info();
704   }
705   if (atomic_flat_array_klass() != nullptr) {
706     atomic_flat_array_klass()->remove_unshareable_info();
707   }
708   if (nullable_atomic_flat_array_klass() != nullptr) {
709     nullable_atomic_flat_array_klass()->remove_unshareable_info();
710   }
711   if (null_free_reference_array_klass() != nullptr) {
712     null_free_reference_array_klass()->remove_unshareable_info();
713   }
714 }
715 
716 void InlineKlass::remove_java_mirror() {
717   InstanceKlass::remove_java_mirror();
718   if (non_atomic_flat_array_klass() != nullptr) {
719     non_atomic_flat_array_klass()->remove_java_mirror();
720   }
721   if (atomic_flat_array_klass() != nullptr) {
722     atomic_flat_array_klass()->remove_java_mirror();
723   }
724   if (nullable_atomic_flat_array_klass() != nullptr) {
725     nullable_atomic_flat_array_klass()->remove_java_mirror();
726   }
727   if (null_free_reference_array_klass() != nullptr) {
728     null_free_reference_array_klass()->remove_java_mirror();
729   }
730 }
731 
732 void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) {
733   InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
734   if (non_atomic_flat_array_klass() != nullptr) {
735     non_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
736   }
737   if (atomic_flat_array_klass() != nullptr) {
738     atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
739   }
740   if (nullable_atomic_flat_array_klass() != nullptr) {
741     nullable_atomic_flat_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
742   }
743   if (null_free_reference_array_klass() != nullptr) {
744     null_free_reference_array_klass()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
745   }
746 }
747 
748 // oop verify
749 
750 void InlineKlass::verify_on(outputStream* st) {
751   InstanceKlass::verify_on(st);
752   guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
753 }
754 
755 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
756   InstanceKlass::oop_verify_on(obj, st);
757   guarantee(obj->mark().is_inline_type(), "Header is not inline type");
758 }