1 /*
  2  * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveUtils.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "classfile/vmSymbols.hpp"
 29 #include "code/codeCache.hpp"
 30 #include "gc/shared/barrierSet.hpp"
 31 #include "gc/shared/collectedHeap.inline.hpp"
 32 #include "gc/shared/gcLocker.inline.hpp"
 33 #include "interpreter/interpreter.hpp"
 34 #include "logging/log.hpp"
 35 #include "memory/metaspaceClosure.hpp"
 36 #include "memory/metadataFactory.hpp"
 37 #include "oops/access.hpp"
 38 #include "oops/compressedOops.inline.hpp"
 39 #include "oops/fieldStreams.inline.hpp"
 40 #include "oops/flatArrayKlass.hpp"
 41 #include "oops/inlineKlass.inline.hpp"
 42 #include "oops/instanceKlass.inline.hpp"
 43 #include "oops/method.hpp"
 44 #include "oops/oop.inline.hpp"
 45 #include "oops/objArrayKlass.hpp"
 46 #include "runtime/fieldDescriptor.inline.hpp"
 47 #include "runtime/handles.inline.hpp"
 48 #include "runtime/safepointVerifiers.hpp"
 49 #include "runtime/sharedRuntime.hpp"
 50 #include "runtime/signature.hpp"
 51 #include "runtime/thread.inline.hpp"
 52 #include "utilities/copy.hpp"
 53 
 54   // Constructor
 55 InlineKlass::InlineKlass(const ClassFileParser& parser)
 56     : InstanceKlass(parser, InlineKlass::Kind) {
 57   set_prototype_header(markWord::inline_type_prototype());
 58   assert(is_inline_klass(), "sanity");
 59   assert(prototype_header().is_inline_type(), "sanity");
 60 }
 61 
 62 InlineKlass::InlineKlass() {
 63   assert(CDSConfig::is_dumping_archive() || UseSharedSpaces, "only for CDS");
 64 }
 65 
 66 void InlineKlass::init_fixed_block() {
 67   _adr_inlineklass_fixed_block = inlineklass_static_block();
 68   // Addresses used for inline type calling convention
 69   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
 70   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
 71   *((address*)adr_pack_handler()) = nullptr;
 72   *((address*)adr_pack_handler_jobject()) = nullptr;
 73   *((address*)adr_unpack_handler()) = nullptr;
 74   assert(pack_handler() == nullptr, "pack handler not null");
 75   *((address*)adr_value_array_klasses()) = nullptr;
 76   set_default_value_offset(0);
 77   set_null_reset_value_offset(0);
 78   set_first_field_offset(-1);
 79   set_payload_size_in_bytes(-1);
 80   set_payload_alignment(-1);
 81   set_non_atomic_size_in_bytes(-1);
 82   set_non_atomic_alignment(-1);
 83   set_atomic_size_in_bytes(-1);
 84   set_nullable_size_in_bytes(-1);
 85   set_null_marker_offset(-1);
 86 }
 87 
 88 void InlineKlass::set_default_value(oop val) {
 89   assert(val != nullptr, "Sanity check");
 90   assert(oopDesc::is_oop(val), "Sanity check");
 91   assert(val->is_inline_type(), "Sanity check");
 92   assert(val->klass() == this, "sanity check");
 93   java_mirror()->obj_field_put(default_value_offset(), val);
 94 }
 95 
 96 void InlineKlass::set_null_reset_value(oop val) {
 97   assert(val != nullptr, "Sanity check");
 98   assert(oopDesc::is_oop(val), "Sanity check");
 99   assert(val->is_inline_type(), "Sanity check");
100   assert(val->klass() == this, "sanity check");
101   java_mirror()->obj_field_put(null_reset_value_offset(), val);
102 }
103 
104 instanceOop InlineKlass::allocate_instance(TRAPS) {
105   int size = size_helper();  // Query before forming handle.
106 
107   instanceOop oop = (instanceOop)Universe::heap()->obj_allocate(this, size, CHECK_NULL);
108   assert(oop->mark().is_inline_type(), "Expected inline type");
109   return oop;
110 }
111 
112 instanceOop InlineKlass::allocate_instance_buffer(TRAPS) {
113   int size = size_helper();  // Query before forming handle.
114 
115   instanceOop oop = (instanceOop)Universe::heap()->obj_buffer_allocate(this, size, CHECK_NULL);
116   assert(oop->mark().is_inline_type(), "Expected inline type");
117   return oop;
118 }
119 
120 int InlineKlass::nonstatic_oop_count() {
121   int oops = 0;
122   int map_count = nonstatic_oop_map_count();
123   OopMapBlock* block = start_of_nonstatic_oop_maps();
124   OopMapBlock* end = block + map_count;
125   while (block != end) {
126     oops += block->count();
127     block++;
128   }
129   return oops;
130 }
131 
132 int InlineKlass::layout_size_in_bytes(LayoutKind kind) const {
133   switch(kind) {
134     case LayoutKind::NON_ATOMIC_FLAT:
135       assert(has_non_atomic_layout(), "Layout not available");
136       return non_atomic_size_in_bytes();
137       break;
138     case LayoutKind::ATOMIC_FLAT:
139       assert(has_atomic_layout(), "Layout not available");
140       return atomic_size_in_bytes();
141       break;
142     case LayoutKind::NULLABLE_ATOMIC_FLAT:
143       assert(has_nullable_layout(), "Layout not available");
144       return nullable_size_in_bytes();
145       break;
146     case PAYLOAD:
147       return payload_size_in_bytes();
148       break;
149     default:
150       ShouldNotReachHere();
151   }
152 }
153 
154 int InlineKlass::layout_alignment(LayoutKind kind) const {
155   switch(kind) {
156     case LayoutKind::NON_ATOMIC_FLAT:
157       assert(has_non_atomic_layout(), "Layout not available");
158       return non_atomic_alignment();
159       break;
160     case LayoutKind::ATOMIC_FLAT:
161       assert(has_atomic_layout(), "Layout not available");
162       return atomic_size_in_bytes();
163       break;
164     case LayoutKind::NULLABLE_ATOMIC_FLAT:
165       assert(has_nullable_layout(), "Layout not available");
166       return nullable_size_in_bytes();
167       break;
168     case LayoutKind::PAYLOAD:
169       return payload_alignment();
170       break;
171     default:
172       ShouldNotReachHere();
173   }
174 }
175 
176 oop InlineKlass::read_flat_field(oop obj, int offset, LayoutKind lk, TRAPS) {
177 
178   if (lk == LayoutKind::NULLABLE_ATOMIC_FLAT) {
179     InstanceKlass* recv = InstanceKlass::cast(obj->klass());
180     int nm_offset = offset + (null_marker_offset() - first_field_offset());
181     jbyte nm = obj->byte_field(nm_offset);
182     if (nm == 0) {
183       return nullptr;
184     }
185   }
186   oop res = nullptr;
187   assert(is_initialized() || is_being_initialized()|| is_in_error_state(),
188         "Must be initialized, initializing or in a corner case of an escaped instance of a class that failed its initialization");
189   if (is_empty_inline_type()) {
190     res = (instanceOop)default_value();
191   } else {
192     Handle obj_h(THREAD, obj);
193     res = allocate_instance_buffer(CHECK_NULL);
194     inline_copy_payload_to_new_oop(((char*)(oopDesc*)obj_h()) + offset, res, lk);
195   }
196   assert(res != nullptr, "Must be set in one of two paths above");
197   return res;
198 }
199 
200 void InlineKlass::write_flat_field(oop obj, int offset, oop value, bool is_null_free, LayoutKind lk, TRAPS) {
201   if (is_null_free && value == nullptr) {
202     THROW(vmSymbols::java_lang_NullPointerException());
203   }
204   assert(!is_null_free || (lk == LayoutKind::ATOMIC_FLAT || lk == LayoutKind::NON_ATOMIC_FLAT || lk == LayoutKind::REFERENCE || lk == LayoutKind::PAYLOAD), "Consistency check");
205   inline_copy_oop_to_payload(value, ((char*)(oopDesc*)obj) + offset, lk);
206 }
207 
208 // Arrays of...
209 
210 bool InlineKlass::flat_array() {
211   if (!UseFlatArray) {
212     return false;
213   }
214   // Too big
215   int elem_bytes = payload_size_in_bytes();
216   if ((FlatArrayElementMaxSize >= 0) && (elem_bytes > FlatArrayElementMaxSize)) {
217     return false;
218   }
219   // Too many embedded oops
220   if ((FlatArrayElementMaxOops >= 0) && (nonstatic_oop_count() > FlatArrayElementMaxOops)) {
221     return false;
222   }
223   // Declared atomic but not naturally atomic.
224   if (must_be_atomic() && !is_naturally_atomic()) {
225     return false;
226   }
227   // VM enforcing InlineArrayAtomicAccess only...
228   if (InlineArrayAtomicAccess && (!is_naturally_atomic())) {
229     return false;
230   }
231   return true;
232 }
233 
234 Klass* InlineKlass::value_array_klass(int n, TRAPS) {
235   if (Atomic::load_acquire(adr_value_array_klasses()) == nullptr) {
236     // Atomic creation of array_klasses
237     RecursiveLocker rl(MultiArray_lock, THREAD);
238 
239     // Check if update has already taken place
240     if (value_array_klasses() == nullptr) {
241       ArrayKlass* k;
242       if (flat_array()) {
243         k = FlatArrayKlass::allocate_klass(this, CHECK_NULL);
244       } else {
245         k = ObjArrayKlass::allocate_objArray_klass(class_loader_data(), 1, this, true, CHECK_NULL);
246 
247       }
248       // use 'release' to pair with lock-free load
249       Atomic::release_store(adr_value_array_klasses(), k);
250     }
251   }
252   ArrayKlass* ak = value_array_klasses();
253   return ak->array_klass(n, THREAD);
254 }
255 
256 Klass* InlineKlass::value_array_klass_or_null(int n) {
257   // Need load-acquire for lock-free read
258   ArrayKlass* ak = Atomic::load_acquire(adr_value_array_klasses());
259   if (ak == nullptr) {
260     return nullptr;
261   } else {
262     return ak->array_klass_or_null(n);
263   }
264 }
265 
266 Klass* InlineKlass::value_array_klass(TRAPS) {
267   return value_array_klass(1, THREAD);
268 }
269 
270 Klass* InlineKlass::value_array_klass_or_null() {
271   return value_array_klass_or_null(1);
272 }
273 
274 // Inline type arguments are not passed by reference, instead each
275 // field of the inline type is passed as an argument. This helper
276 // function collects the flat field (recursively)
277 // in a list. Included with the field's type is
278 // the offset of each field in the inline type: i2c and c2i adapters
279 // need that to load or store fields. Finally, the list of fields is
280 // sorted in order of increasing offsets: the adapters and the
281 // compiled code need to agree upon the order of fields.
282 //
283 // The list of basic types that is returned starts with a T_METADATA
284 // and ends with an extra T_VOID. T_METADATA/T_VOID pairs are used as
285 // delimiters. Every entry between the two is a field of the inline
286 // type. If there's an embedded inline type in the list, it also starts
287 // with a T_METADATA and ends with a T_VOID. This is so we can
288 // generate a unique fingerprint for the method's adapters and we can
289 // generate the list of basic types from the interpreter point of view
290 // (inline types passed as reference: iterate on the list until a
291 // T_METADATA, drop everything until and including the closing
292 // T_VOID) or the compiler point of view (each field of the inline
293 // types is an argument: drop all T_METADATA/T_VOID from the list).
294 //
295 // Value classes could also have fields in abstract super value classes.
296 // Use a HierarchicalFieldStream to get them as well.
297 int InlineKlass::collect_fields(GrowableArray<SigEntry>* sig, int base_off) {
298   int count = 0;
299   SigEntry::add_entry(sig, T_METADATA, name(), base_off);
300   for (HierarchicalFieldStream<JavaFieldStream> fs(this); !fs.done(); fs.next()) {
301     if (fs.access_flags().is_static()) continue;
302     int offset = base_off + fs.offset() - (base_off > 0 ? first_field_offset() : 0);
303     // TODO 8284443 Use different heuristic to decide what should be scalarized in the calling convention
304     if (fs.is_flat()) {
305       // Resolve klass of flat field and recursively collect fields
306       Klass* vk = get_inline_type_field_klass(fs.index());
307       count += InlineKlass::cast(vk)->collect_fields(sig, offset);
308     } else {
309       BasicType bt = Signature::basic_type(fs.signature());
310       SigEntry::add_entry(sig, bt, fs.signature(), offset);
311       count += type2size[bt];
312     }
313   }
314   int offset = base_off + size_helper()*HeapWordSize - (base_off > 0 ? first_field_offset() : 0);
315   SigEntry::add_entry(sig, T_VOID, name(), offset);
316   if (base_off == 0) {
317     sig->sort(SigEntry::compare);
318   }
319   assert(sig->at(0)._bt == T_METADATA && sig->at(sig->length()-1)._bt == T_VOID, "broken structure");
320   return count;
321 }
322 
323 void InlineKlass::initialize_calling_convention(TRAPS) {
324   // Because the pack and unpack handler addresses need to be loadable from generated code,
325   // they are stored at a fixed offset in the klass metadata. Since inline type klasses do
326   // not have a vtable, the vtable offset is used to store these addresses.
327   if (InlineTypeReturnedAsFields || InlineTypePassFieldsAsArgs) {
328     ResourceMark rm;
329     GrowableArray<SigEntry> sig_vk;
330     int nb_fields = collect_fields(&sig_vk);
331     Array<SigEntry>* extended_sig = MetadataFactory::new_array<SigEntry>(class_loader_data(), sig_vk.length(), CHECK);
332     *((Array<SigEntry>**)adr_extended_sig()) = extended_sig;
333     for (int i = 0; i < sig_vk.length(); i++) {
334       extended_sig->at_put(i, sig_vk.at(i));
335     }
336     if (can_be_returned_as_fields(/* init= */ true)) {
337       nb_fields++;
338       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, nb_fields);
339       sig_bt[0] = T_METADATA;
340       SigEntry::fill_sig_bt(&sig_vk, sig_bt+1);
341       VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, nb_fields);
342       int total = SharedRuntime::java_return_convention(sig_bt, regs, nb_fields);
343 
344       if (total > 0) {
345         Array<VMRegPair>* return_regs = MetadataFactory::new_array<VMRegPair>(class_loader_data(), nb_fields, CHECK);
346         *((Array<VMRegPair>**)adr_return_regs()) = return_regs;
347         for (int i = 0; i < nb_fields; i++) {
348           return_regs->at_put(i, regs[i]);
349         }
350 
351         BufferedInlineTypeBlob* buffered_blob = SharedRuntime::generate_buffered_inline_type_adapter(this);
352         *((address*)adr_pack_handler()) = buffered_blob->pack_fields();
353         *((address*)adr_pack_handler_jobject()) = buffered_blob->pack_fields_jobject();
354         *((address*)adr_unpack_handler()) = buffered_blob->unpack_fields();
355         assert(CodeCache::find_blob(pack_handler()) == buffered_blob, "lost track of blob");
356         assert(can_be_returned_as_fields(), "sanity");
357       }
358     }
359     if (!can_be_returned_as_fields() && !can_be_passed_as_fields()) {
360       MetadataFactory::free_array<SigEntry>(class_loader_data(), extended_sig);
361       assert(return_regs() == nullptr, "sanity");
362     }
363   }
364 }
365 
366 void InlineKlass::deallocate_contents(ClassLoaderData* loader_data) {
367   if (extended_sig() != nullptr) {
368     MetadataFactory::free_array<SigEntry>(loader_data, extended_sig());
369     *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
370   }
371   if (return_regs() != nullptr) {
372     MetadataFactory::free_array<VMRegPair>(loader_data, return_regs());
373     *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
374   }
375   cleanup_blobs();
376   InstanceKlass::deallocate_contents(loader_data);
377 }
378 
379 void InlineKlass::cleanup(InlineKlass* ik) {
380   ik->cleanup_blobs();
381 }
382 
383 void InlineKlass::cleanup_blobs() {
384   if (pack_handler() != nullptr) {
385     CodeBlob* buffered_blob = CodeCache::find_blob(pack_handler());
386     assert(buffered_blob->is_buffered_inline_type_blob(), "bad blob type");
387     BufferBlob::free((BufferBlob*)buffered_blob);
388     *((address*)adr_pack_handler()) = nullptr;
389     *((address*)adr_pack_handler_jobject()) = nullptr;
390     *((address*)adr_unpack_handler()) = nullptr;
391   }
392 }
393 
394 // Can this inline type be passed as multiple values?
395 bool InlineKlass::can_be_passed_as_fields() const {
396   return InlineTypePassFieldsAsArgs;
397 }
398 
399 // Can this inline type be returned as multiple values?
400 bool InlineKlass::can_be_returned_as_fields(bool init) const {
401   return InlineTypeReturnedAsFields && (init || return_regs() != nullptr);
402 }
403 
404 // Create handles for all oop fields returned in registers that are going to be live across a safepoint
405 void InlineKlass::save_oop_fields(const RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
406   Thread* thread = Thread::current();
407   const Array<SigEntry>* sig_vk = extended_sig();
408   const Array<VMRegPair>* regs = return_regs();
409   int j = 1;
410 
411   for (int i = 0; i < sig_vk->length(); i++) {
412     BasicType bt = sig_vk->at(i)._bt;
413     if (bt == T_OBJECT || bt == T_ARRAY) {
414       VMRegPair pair = regs->at(j);
415       address loc = reg_map.location(pair.first(), nullptr);
416       oop v = *(oop*)loc;
417       assert(v == nullptr || oopDesc::is_oop(v), "not an oop?");
418       assert(Universe::heap()->is_in_or_null(v), "must be heap pointer");
419       handles.push(Handle(thread, v));
420     }
421     if (bt == T_METADATA) {
422       continue;
423     }
424     if (bt == T_VOID &&
425         sig_vk->at(i-1)._bt != T_LONG &&
426         sig_vk->at(i-1)._bt != T_DOUBLE) {
427       continue;
428     }
429     j++;
430   }
431   assert(j == regs->length(), "missed a field?");
432 }
433 
434 // Update oop fields in registers from handles after a safepoint
435 void InlineKlass::restore_oop_results(RegisterMap& reg_map, GrowableArray<Handle>& handles) const {
436   assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields");
437   const Array<SigEntry>* sig_vk = extended_sig();
438   const Array<VMRegPair>* regs = return_regs();
439   assert(regs != nullptr, "inconsistent");
440 
441   int j = 1;
442   for (int i = 0, k = 0; i < sig_vk->length(); i++) {
443     BasicType bt = sig_vk->at(i)._bt;
444     if (bt == T_OBJECT || bt == T_ARRAY) {
445       VMRegPair pair = regs->at(j);
446       address loc = reg_map.location(pair.first(), nullptr);
447       *(oop*)loc = handles.at(k++)();
448     }
449     if (bt == T_METADATA) {
450       continue;
451     }
452     if (bt == T_VOID &&
453         sig_vk->at(i-1)._bt != T_LONG &&
454         sig_vk->at(i-1)._bt != T_DOUBLE) {
455       continue;
456     }
457     j++;
458   }
459   assert(j == regs->length(), "missed a field?");
460 }
461 
462 // Fields are in registers. Create an instance of the inline type and
463 // initialize it with the values of the fields.
464 oop InlineKlass::realloc_result(const RegisterMap& reg_map, const GrowableArray<Handle>& handles, TRAPS) {
465   oop new_vt = allocate_instance(CHECK_NULL);
466   const Array<SigEntry>* sig_vk = extended_sig();
467   const Array<VMRegPair>* regs = return_regs();
468 
469   int j = 1;
470   int k = 0;
471   for (int i = 0; i < sig_vk->length(); i++) {
472     BasicType bt = sig_vk->at(i)._bt;
473     if (bt == T_METADATA) {
474       continue;
475     }
476     if (bt == T_VOID) {
477       if (sig_vk->at(i-1)._bt == T_LONG ||
478           sig_vk->at(i-1)._bt == T_DOUBLE) {
479         j++;
480       }
481       continue;
482     }
483     int off = sig_vk->at(i)._offset;
484     assert(off > 0, "offset in object should be positive");
485     VMRegPair pair = regs->at(j);
486     address loc = reg_map.location(pair.first(), nullptr);
487     switch(bt) {
488     case T_BOOLEAN: {
489       new_vt->bool_field_put(off, *(jboolean*)loc);
490       break;
491     }
492     case T_CHAR: {
493       new_vt->char_field_put(off, *(jchar*)loc);
494       break;
495     }
496     case T_BYTE: {
497       new_vt->byte_field_put(off, *(jbyte*)loc);
498       break;
499     }
500     case T_SHORT: {
501       new_vt->short_field_put(off, *(jshort*)loc);
502       break;
503     }
504     case T_INT: {
505       new_vt->int_field_put(off, *(jint*)loc);
506       break;
507     }
508     case T_LONG: {
509 #ifdef _LP64
510       new_vt->double_field_put(off,  *(jdouble*)loc);
511 #else
512       Unimplemented();
513 #endif
514       break;
515     }
516     case T_OBJECT:
517     case T_ARRAY: {
518       Handle handle = handles.at(k++);
519       new_vt->obj_field_put(off, handle());
520       break;
521     }
522     case T_FLOAT: {
523       new_vt->float_field_put(off,  *(jfloat*)loc);
524       break;
525     }
526     case T_DOUBLE: {
527       new_vt->double_field_put(off, *(jdouble*)loc);
528       break;
529     }
530     default:
531       ShouldNotReachHere();
532     }
533     *(intptr_t*)loc = 0xDEAD;
534     j++;
535   }
536   assert(j == regs->length(), "missed a field?");
537   assert(k == handles.length(), "missed an oop?");
538   return new_vt;
539 }
540 
541 // Check the return register for an InlineKlass oop
542 InlineKlass* InlineKlass::returned_inline_klass(const RegisterMap& map) {
543   BasicType bt = T_METADATA;
544   VMRegPair pair;
545   int nb = SharedRuntime::java_return_convention(&bt, &pair, 1);
546   assert(nb == 1, "broken");
547 
548   address loc = map.location(pair.first(), nullptr);
549   intptr_t ptr = *(intptr_t*)loc;
550   if (is_set_nth_bit(ptr, 0)) {
551     // Return value is tagged, must be an InlineKlass pointer
552     clear_nth_bit(ptr, 0);
553     assert(Metaspace::contains((void*)ptr), "should be klass");
554     InlineKlass* vk = (InlineKlass*)ptr;
555     assert(vk->can_be_returned_as_fields(), "must be able to return as fields");
556     return vk;
557   }
558   // Return value is not tagged, must be a valid oop
559   assert(oopDesc::is_oop_or_null(cast_to_oop(ptr), true),
560          "Bad oop return: " PTR_FORMAT, ptr);
561   return nullptr;
562 }
563 
564 // CDS support
565 
566 void InlineKlass::metaspace_pointers_do(MetaspaceClosure* it) {
567   InstanceKlass::metaspace_pointers_do(it);
568 
569   InlineKlass* this_ptr = this;
570   it->push((Klass**)adr_value_array_klasses());
571 }
572 
573 void InlineKlass::remove_unshareable_info() {
574   InstanceKlass::remove_unshareable_info();
575 
576   // update it to point to the "buffered" copy of this class.
577   _adr_inlineklass_fixed_block = inlineklass_static_block();
578   ArchivePtrMarker::mark_pointer((address*)&_adr_inlineklass_fixed_block);
579 
580   *((Array<SigEntry>**)adr_extended_sig()) = nullptr;
581   *((Array<VMRegPair>**)adr_return_regs()) = nullptr;
582   *((address*)adr_pack_handler()) = nullptr;
583   *((address*)adr_pack_handler_jobject()) = nullptr;
584   *((address*)adr_unpack_handler()) = nullptr;
585   assert(pack_handler() == nullptr, "pack handler not null");
586   if (value_array_klasses() != nullptr) {
587     value_array_klasses()->remove_unshareable_info();
588   }
589 }
590 
591 void InlineKlass::remove_java_mirror() {
592   InstanceKlass::remove_java_mirror();
593   if (value_array_klasses() != nullptr) {
594     value_array_klasses()->remove_java_mirror();
595   }
596 }
597 
598 void InlineKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, PackageEntry* pkg_entry, TRAPS) {
599   InstanceKlass::restore_unshareable_info(loader_data, protection_domain, pkg_entry, CHECK);
600   if (value_array_klasses() != nullptr) {
601     value_array_klasses()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
602   }
603 }
604 
605 // oop verify
606 
607 void InlineKlass::verify_on(outputStream* st) {
608   InstanceKlass::verify_on(st);
609   guarantee(prototype_header().is_inline_type(), "Prototype header is not inline type");
610 }
611 
612 void InlineKlass::oop_verify_on(oop obj, outputStream* st) {
613   InstanceKlass::oop_verify_on(obj, st);
614   guarantee(obj->mark().is_inline_type(), "Header is not inline type");
615 }