1 /*
   2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/graphKit.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/movenode.hpp"
  36 #include "opto/narrowptrnode.hpp"
  37 #include "opto/opcodes.hpp"
  38 #include "opto/phaseX.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/type.hpp"
  41 #include "utilities/globalDefinitions.hpp"
  42 
  43 // Clones the inline type to handle control flow merges involving multiple inline types.
  44 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  45 // init_with_top: input of phis above the returned InlineTypeNode are initialized to top.
  46 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_non_null, bool init_with_top) {
  47   InlineTypeNode* vt = clone_if_required(gvn, map);
  48   const Type* t = Type::get_const_type(inline_klass());
  49   gvn->set_type(vt, t);
  50   vt->as_InlineType()->set_type(t);
  51 
  52   Node* const top = gvn->C->top();
  53 
  54   // Create a PhiNode for merging the oop values
  55   PhiNode* oop = PhiNode::make(region, init_with_top ? top : vt->get_oop(), t);
  56   gvn->set_type(oop, t);
  57   gvn->record_for_igvn(oop);
  58   vt->set_oop(*gvn, oop);
  59 
  60   // Create a PhiNode for merging the is_buffered values
  61   t = Type::get_const_basic_type(T_BOOLEAN);
  62   Node* is_buffered_node = PhiNode::make(region, init_with_top ? top : vt->get_is_buffered(), t);;
  63   gvn->set_type(is_buffered_node, t);
  64   gvn->record_for_igvn(is_buffered_node);
  65   vt->set_req(IsBuffered, is_buffered_node);
  66 
  67   // Create a PhiNode for merging the null_marker values
  68   Node* null_marker_node;
  69   if (is_non_null) {
  70     null_marker_node = gvn->intcon(1);
  71   } else {
  72     t = Type::get_const_basic_type(T_BOOLEAN);
  73     null_marker_node = PhiNode::make(region, init_with_top ? top : vt->get_null_marker(), t);
  74     gvn->set_type(null_marker_node, t);
  75     gvn->record_for_igvn(null_marker_node);
  76   }
  77   vt->set_req(NullMarker, null_marker_node);
  78 
  79   // Create a PhiNode each for merging the field values
  80   for (uint i = 0; i < vt->field_count(); ++i) {
  81     ciType* type = vt->field_type(i);
  82     Node*  value = vt->field_value(i);
  83     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  84     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  85     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  86     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  87     if (type->is_inlinetype() && no_circularity) {
  88       // Handle inline type fields recursively
  89       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  90     } else {
  91       t = Type::get_const_type(type);
  92       value = PhiNode::make(region, init_with_top ? top : value, t);
  93       gvn->set_type(value, t);
  94       gvn->record_for_igvn(value);
  95     }
  96     vt->set_field_value(i, value);
  97   }
  98   gvn->record_for_igvn(vt);
  99   return vt;
 100 }
 101 
 102 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 103 // for the given region (see InlineTypeNode::clone_with_phis).
 104 bool InlineTypeNode::has_phi_inputs(Node* region) {
 105   // Check oop input
 106   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 107 #ifdef ASSERT
 108   if (result) {
 109     // Check all field value inputs for consistency
 110     for (uint i = Values; i < field_count(); ++i) {
 111       Node* n = in(i);
 112       if (n->is_InlineType()) {
 113         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 114       } else {
 115         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 116       }
 117     }
 118   }
 119 #endif
 120   return result;
 121 }
 122 
 123 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 124 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 125   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 126 
 127   // Merge oop inputs
 128   PhiNode* phi = get_oop()->as_Phi();
 129   phi->set_req(pnum, other->get_oop());
 130   if (transform) {
 131     set_oop(*gvn, gvn->transform(phi));
 132   }
 133 
 134   // Merge is_buffered inputs
 135   phi = get_is_buffered()->as_Phi();
 136   phi->set_req(pnum, other->get_is_buffered());
 137   if (transform) {
 138     set_req(IsBuffered, gvn->transform(phi));
 139   }
 140 
 141   // Merge null_marker inputs
 142   Node* null_marker = get_null_marker();
 143   if (null_marker->is_Phi()) {
 144     phi = null_marker->as_Phi();
 145     phi->set_req(pnum, other->get_null_marker());
 146     if (transform) {
 147       set_req(NullMarker, gvn->transform(phi));
 148     }
 149   } else {
 150     assert(null_marker->find_int_con(0) == 1, "only with a non null inline type");
 151   }
 152 
 153   // Merge field values
 154   for (uint i = 0; i < field_count(); ++i) {
 155     Node* val1 =        field_value(i);
 156     Node* val2 = other->field_value(i);
 157     if (val1->is_InlineType()) {
 158       if (val2->is_Phi()) {
 159         val2 = gvn->transform(val2);
 160       }
 161       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 162     } else {
 163       assert(val1->is_Phi(), "must be a phi node");
 164       val1->set_req(pnum, val2);
 165     }
 166     if (transform) {
 167       set_field_value(i, gvn->transform(val1));
 168     }
 169   }
 170   return this;
 171 }
 172 
 173 // Adds a new merge path to an inline type node with phi inputs
 174 void InlineTypeNode::add_new_path(Node* region) {
 175   assert(has_phi_inputs(region), "must have phi inputs");
 176 
 177   PhiNode* phi = get_oop()->as_Phi();
 178   phi->add_req(nullptr);
 179   assert(phi->req() == region->req(), "must be same size as region");
 180 
 181   phi = get_is_buffered()->as_Phi();
 182   phi->add_req(nullptr);
 183   assert(phi->req() == region->req(), "must be same size as region");
 184 
 185   phi = get_null_marker()->as_Phi();
 186   phi->add_req(nullptr);
 187   assert(phi->req() == region->req(), "must be same size as region");
 188 
 189   for (uint i = 0; i < field_count(); ++i) {
 190     Node* val = field_value(i);
 191     if (val->is_InlineType()) {
 192       val->as_InlineType()->add_new_path(region);
 193     } else {
 194       val->as_Phi()->add_req(nullptr);
 195       assert(val->req() == region->req(), "must be same size as region");
 196     }
 197   }
 198 }
 199 
 200 Node* InlineTypeNode::field_value(uint index) const {
 201   assert(index < field_count(), "index out of bounds");
 202   return in(Values + index);
 203 }
 204 
 205 // Get the value of the field at the given offset.
 206 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 207 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 208   // Find the declared field which contains the field we are looking for
 209   int index = inline_klass()->field_index_by_offset(offset);
 210   Node* value = field_value(index);
 211   assert(value != nullptr, "field value not found");
 212 
 213   if (!recursive || !field_is_flat(index) || value->is_top()) {
 214     assert(offset == field_offset(index), "offset mismatch");
 215     return value;
 216   }
 217 
 218   // Flat inline type field
 219   InlineTypeNode* vt = value->as_InlineType();
 220   if (offset == field_null_marker_offset(index)) {
 221     return vt->get_null_marker();
 222   } else {
 223     int sub_offset = offset - field_offset(index); // Offset of the flattened field inside the declared field
 224     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 225     return vt->field_value_by_offset(sub_offset, recursive);
 226   }
 227 }
 228 
 229 void InlineTypeNode::set_field_value(uint index, Node* value) {
 230   assert(index < field_count(), "index out of bounds");
 231   set_req(Values + index, value);
 232 }
 233 
 234 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 235   set_field_value(field_index(offset), value);
 236 }
 237 
 238 int InlineTypeNode::field_offset(uint index) const {
 239   assert(index < field_count(), "index out of bounds");
 240   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 241 }
 242 
 243 uint InlineTypeNode::field_index(int offset) const {
 244   uint i = 0;
 245   for (; i < field_count() && field_offset(i) != offset; i++) { }
 246   assert(i < field_count(), "field not found");
 247   return i;
 248 }
 249 
 250 ciType* InlineTypeNode::field_type(uint index) const {
 251   assert(index < field_count(), "index out of bounds");
 252   return inline_klass()->declared_nonstatic_field_at(index)->type();
 253 }
 254 
 255 bool InlineTypeNode::field_is_flat(uint index) const {
 256   assert(index < field_count(), "index out of bounds");
 257   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 258   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 259   return field->is_flat();
 260 }
 261 
 262 bool InlineTypeNode::field_is_null_free(uint index) const {
 263   assert(index < field_count(), "index out of bounds");
 264   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 265   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 266   return field->is_null_free();
 267 }
 268 
 269 bool InlineTypeNode::field_is_volatile(uint index) const {
 270   assert(index < field_count(), "index out of bounds");
 271   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 272   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 273   return field->is_volatile();
 274 }
 275 
 276 int InlineTypeNode::field_null_marker_offset(uint index) const {
 277   assert(index < field_count(), "index out of bounds");
 278   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 279   assert(field->is_flat(), "must be an inline type");
 280   return field->null_marker_offset();
 281 }
 282 
 283 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) {
 284   uint cnt = 0;
 285   for (uint i = 0; i < field_count(); ++i) {
 286     Node* value = field_value(i);
 287     if (field_is_flat(i)) {
 288       InlineTypeNode* vt = value->as_InlineType();
 289       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 290       if (!field_is_null_free(i)) {
 291         // The null marker of a flat field is added right after we scalarize that field
 292         sfpt->add_req(vt->get_null_marker());
 293         cnt++;
 294       }
 295       continue;
 296     }
 297     if (value->is_InlineType()) {
 298       // Add inline type to the worklist to process later
 299       worklist.push(value);
 300     }
 301     sfpt->add_req(value);
 302     cnt++;
 303   }
 304   return cnt;
 305 }
 306 
 307 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 308   JVMState* jvms = sfpt->jvms();
 309   assert(jvms != nullptr, "missing JVMS");
 310   uint first_ind = (sfpt->req() - jvms->scloff());
 311 
 312   // Iterate over the inline type fields in order of increasing offset and add the
 313   // field values to the safepoint. Nullable inline types have an null marker field that
 314   // needs to be checked before using the field values.
 315   sfpt->add_req(get_null_marker());
 316   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 317   jvms->set_endoff(sfpt->req());
 318   // Replace safepoint edge by SafePointScalarObjectNode
 319   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 320                                                                   nullptr,
 321                                                                   first_ind,
 322                                                                   sfpt->jvms()->depth(),
 323                                                                   nfields);
 324   sobj->init_req(0, igvn->C->root());
 325   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 326   igvn->rehash_node_delayed(sfpt);
 327   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 328     Node* debug = sfpt->in(i);
 329     if (debug != nullptr && debug->uncast() == this) {
 330       sfpt->set_req(i, sobj);
 331     }
 332   }
 333 }
 334 
 335 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 336   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 337   // in the safepoint to avoid keeping field loads live just for the debug info.
 338   Node* oop = get_oop();
 339   bool use_oop = false;
 340   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 341     Unique_Node_List worklist;
 342     VectorSet visited;
 343     visited.set(oop->_idx);
 344     worklist.push(oop);
 345     use_oop = true;
 346     while (worklist.size() > 0 && use_oop) {
 347       Node* n = worklist.pop();
 348       for (uint i = 1; i < n->req(); i++) {
 349         Node* in = n->in(i);
 350         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 351           worklist.push(in);
 352         } else if (!(in->is_Con() || in->is_Parm())) {
 353           use_oop = false;
 354           break;
 355         }
 356       }
 357     }
 358   } else {
 359     use_oop = allow_oop && is_allocated(igvn) &&
 360               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 361   }
 362 
 363   ResourceMark rm;
 364   Unique_Node_List safepoints;
 365   Unique_Node_List vt_worklist;
 366   Unique_Node_List worklist;
 367   worklist.push(this);
 368   while (worklist.size() > 0) {
 369     Node* n = worklist.pop();
 370     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 371       Node* use = n->fast_out(i);
 372       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 373         safepoints.push(use);
 374       } else if (use->is_ConstraintCast()) {
 375         worklist.push(use);
 376       }
 377     }
 378   }
 379 
 380   // Process all safepoint uses and scalarize inline type
 381   while (safepoints.size() > 0) {
 382     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 383     if (use_oop) {
 384       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 385         Node* debug = sfpt->in(i);
 386         if (debug != nullptr && debug->uncast() == this) {
 387           sfpt->set_req(i, get_oop());
 388         }
 389       }
 390       igvn->rehash_node_delayed(sfpt);
 391     } else {
 392       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 393     }
 394   }
 395   // Now scalarize non-flat fields
 396   for (uint i = 0; i < vt_worklist.size(); ++i) {
 397     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 398     vt->make_scalar_in_safepoints(igvn);
 399   }
 400   if (outcnt() == 0) {
 401     igvn->record_for_igvn(this);
 402   }
 403 }
 404 
 405 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 406 // of the same type but with different scalarization depth during GVN. This method adjusts the
 407 // scalarization depth to avoid inconsistencies during merging.
 408 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 409   if (!kit->C->has_circular_inline_type()) {
 410     return this;
 411   }
 412   GrowableArray<ciType*> visited;
 413   visited.push(inline_klass());
 414   return adjust_scalarization_depth_impl(kit, visited);
 415 }
 416 
 417 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 418   InlineTypeNode* val = this;
 419   for (uint i = 0; i < field_count(); ++i) {
 420     Node* value = field_value(i);
 421     Node* new_value = value;
 422     ciType* ft = field_type(i);
 423     if (value->is_InlineType()) {
 424       if (!field_is_flat(i) && visited.contains(ft)) {
 425         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 426       } else {
 427         int old_len = visited.length();
 428         visited.push(ft);
 429         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 430         visited.trunc_to(old_len);
 431       }
 432     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 433       int old_len = visited.length();
 434       visited.push(ft);
 435       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 436       visited.trunc_to(old_len);
 437     }
 438     if (value != new_value) {
 439       if (val == this) {
 440         val = clone_if_required(&kit->gvn(), kit->map());
 441       }
 442       val->set_field_value(i, new_value);
 443     }
 444   }
 445   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 446 }
 447 
 448 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 449   // Initialize the inline type by loading its field values from
 450   // memory and adding the values as input edges to the node.
 451   ciInlineKlass* vk = inline_klass();
 452   for (uint i = 0; i < field_count(); ++i) {
 453     int field_off = field_offset(i) - vk->payload_offset();
 454     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 455     Node* value = nullptr;
 456     ciType* ft = field_type(i);
 457     bool field_null_free = field_is_null_free(i);
 458     if (field_is_flat(i)) {
 459       // Recursively load the flat inline type field
 460       ciInlineKlass* fvk = ft->as_inline_klass();
 461       // Atomic if nullable or not LooselyConsistentValue
 462       bool atomic = !field_null_free || fvk->must_be_atomic();
 463 
 464       int old_len = visited.length();
 465       visited.push(ft);
 466       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 467                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 468       visited.trunc_to(old_len);
 469     } else {
 470       // Load field value from memory
 471       BasicType bt = type2field[ft->basic_type()];
 472       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 473       const Type* val_type = Type::get_const_type(ft);
 474       if (trust_null_free_oop && field_null_free) {
 475         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 476       }
 477       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 478       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 479       // Loading a non-flattened inline type from memory
 480       if (visited.contains(ft)) {
 481         kit->C->set_has_circular_inline_type(true);
 482       } else if (ft->is_inlinetype()) {
 483         int old_len = visited.length();
 484         visited.push(ft);
 485         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 486         visited.trunc_to(old_len);
 487       }
 488     }
 489     set_field_value(i, value);
 490   }
 491 }
 492 
 493 // Get a field value from the payload by shifting it according to the offset
 494 static Node* get_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, BasicType val_bt, int offset) {
 495   // Shift to the right position in the long value
 496   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 497   Node* value = nullptr;
 498   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 499   if (bt == T_LONG) {
 500     value = gvn->transform(new URShiftLNode(payload, shift_val));
 501     value = gvn->transform(new ConvL2INode(value));
 502   } else {
 503     value = gvn->transform(new URShiftINode(payload, shift_val));
 504   }
 505 
 506   if (val_bt == T_INT || val_bt == T_OBJECT || val_bt == T_ARRAY) {
 507     return value;
 508   } else {
 509     // Make sure to zero unused bits in the 32-bit value
 510     return Compile::narrow_value(val_bt, value, nullptr, gvn, true);
 511   }
 512 }
 513 
 514 // Convert a payload value to field values
 515 void InlineTypeNode::convert_from_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, bool trust_null_free_oop) {
 516   PhaseGVN* gvn = &kit->gvn();
 517   ciInlineKlass* vk = inline_klass();
 518   Node* value = nullptr;
 519   if (!null_free) {
 520     // Get the null marker
 521     value = get_payload_value(gvn, payload, bt, T_BOOLEAN, holder_offset + vk->null_marker_offset_in_payload());
 522     set_req(NullMarker, value);
 523   }
 524   // Iterate over the fields and get their values from the payload
 525   for (uint i = 0; i < field_count(); ++i) {
 526     ciType* ft = field_type(i);
 527     bool field_null_free = field_is_null_free(i);
 528     int offset = holder_offset + field_offset(i) - vk->payload_offset();
 529     if (field_is_flat(i)) {
 530       InlineTypeNode* vt = make_uninitialized(*gvn, ft->as_inline_klass(), field_null_free);
 531       vt->convert_from_payload(kit, bt, payload, offset, field_null_free, trust_null_free_oop && field_null_free);
 532       value = gvn->transform(vt);
 533     } else {
 534       value = get_payload_value(gvn, payload, bt, ft->basic_type(), offset);
 535       if (!ft->is_primitive_type()) {
 536         // Narrow oop field
 537         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 538         const Type* val_type = Type::get_const_type(ft);
 539         if (trust_null_free_oop && field_null_free) {
 540           val_type = val_type->join_speculative(TypePtr::NOTNULL);
 541         }
 542         value = gvn->transform(new CastI2NNode(kit->control(), value, val_type->make_narrowoop()));
 543         value = gvn->transform(new DecodeNNode(value, val_type->make_narrowoop()));
 544 
 545         // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
 546         // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
 547         // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
 548         // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
 549 
 550         if (ft->is_inlinetype()) {
 551           GrowableArray<ciType*> visited;
 552           value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 553         }
 554       }
 555     }
 556     set_field_value(i, value);
 557   }
 558 }
 559 
 560 // Set a field value in the payload by shifting it according to the offset
 561 static Node* set_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, Node* value, BasicType val_bt, int offset) {
 562   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 563 
 564   // Make sure to zero unused bits in the 32-bit value
 565   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
 566     value = gvn->transform(new AndINode(value, gvn->intcon(0xFF)));
 567   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
 568     value = gvn->transform(new AndINode(value, gvn->intcon(0xFFFF)));
 569   } else if (val_bt == T_FLOAT) {
 570     value = gvn->transform(new MoveF2INode(value));
 571   } else {
 572     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
 573   }
 574 
 575   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 576   if (bt == T_LONG) {
 577     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
 578     value = gvn->transform(new ConvI2LNode(value));
 579     value = gvn->transform(new AndLNode(value, gvn->longcon(0xFFFFFFFF)));
 580 
 581     Node* shift_value = gvn->transform(new LShiftLNode(value, shift_val));
 582     payload = new OrLNode(shift_value, payload);
 583   } else {
 584     Node* shift_value = gvn->transform(new LShiftINode(value, shift_val));
 585     payload = new OrINode(shift_value, payload);
 586   }
 587   return gvn->transform(payload);
 588 }
 589 
 590 // Convert the field values to a payload value of type 'bt'
 591 Node* InlineTypeNode::convert_to_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset, int& oop_off_1, int& oop_off_2) const {
 592   PhaseGVN* gvn = &kit->gvn();
 593   Node* value = nullptr;
 594   if (!null_free) {
 595     // Set the null marker
 596     value = get_null_marker();
 597     payload = set_payload_value(gvn, payload, bt, value, T_BOOLEAN, null_marker_offset);
 598   }
 599   // Iterate over the fields and add their values to the payload
 600   for (uint i = 0; i < field_count(); ++i) {
 601     value = field_value(i);
 602     int inner_offset = field_offset(i) - inline_klass()->payload_offset();
 603     int offset = holder_offset + inner_offset;
 604     if (field_is_flat(i)) {
 605       null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset();
 606       payload = value->as_InlineType()->convert_to_payload(kit, bt, payload, offset, field_is_null_free(i), null_marker_offset, oop_off_1, oop_off_2);
 607     } else {
 608       ciType* ft = field_type(i);
 609       BasicType field_bt = ft->basic_type();
 610       if (!ft->is_primitive_type()) {
 611         // Narrow oop field
 612         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 613         assert(inner_offset != -1, "sanity");
 614         if (oop_off_1 == -1) {
 615           oop_off_1 = inner_offset;
 616         } else {
 617           assert(oop_off_2 == -1, "already set");
 618           oop_off_2 = inner_offset;
 619         }
 620         const Type* val_type = Type::get_const_type(ft)->make_narrowoop();
 621         if (value->is_InlineType()) {
 622           PreserveReexecuteState preexecs(kit);
 623           kit->jvms()->set_should_reexecute(true);
 624           value = value->as_InlineType()->buffer(kit, false);
 625         }
 626         value = gvn->transform(new EncodePNode(value, val_type));
 627         value = gvn->transform(new CastP2XNode(kit->control(), value));
 628         value = gvn->transform(new ConvL2INode(value));
 629         field_bt = T_INT;
 630       }
 631       payload = set_payload_value(gvn, payload, bt, value, field_bt, offset);
 632     }
 633   }
 634   return payload;
 635 }
 636 
 637 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) const {
 638   ciInlineKlass* vk = inline_klass();
 639   bool do_atomic = atomic;
 640   // With immutable memory, a non-atomic load and an atomic load are the same
 641   if (immutable_memory) {
 642     do_atomic = false;
 643   }
 644   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 645   if (vk->is_naturally_atomic(null_free)) {
 646     do_atomic = false;
 647   }
 648 
 649   if (!do_atomic) {
 650     if (!null_free) {
 651       int nm_offset = vk->null_marker_offset_in_payload();
 652       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 653       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 654       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_null_marker(), TypeInt::BOOL, T_BOOLEAN, decorators);
 655     }
 656     store(kit, base, ptr, immutable_memory, decorators);
 657     return;
 658   }
 659 
 660   // Convert to a payload value <= 64-bit and write atomically.
 661   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
 662   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
 663   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
 664   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
 665   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
 666   BasicType store_bt = vk->atomic_size_to_basic_type(null_free);
 667   Node* payload = (store_bt == T_LONG) ? kit->longcon(0) : kit->intcon(0);
 668   int oop_off_1 = -1;
 669   int oop_off_2 = -1;
 670   payload = convert_to_payload(kit, store_bt, payload, 0, null_free, vk->null_marker_offset_in_payload(), oop_off_1, oop_off_2);
 671   if (!UseG1GC || oop_off_1 == -1) {
 672     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
 673     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
 674     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
 675     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
 676     const Type* val_type = Type::get_const_basic_type(store_bt);
 677     kit->insert_mem_bar(Op_MemBarCPUOrder);
 678     kit->access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, val_type, store_bt, decorators | C2_MISMATCHED, true, this);
 679     kit->insert_mem_bar(Op_MemBarCPUOrder);
 680   } else {
 681     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
 682     assert(UseG1GC, "Unexpected GC");
 683     assert(store_bt == T_LONG, "Unexpected payload type");
 684     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
 685     Node* oop_offset = (oop_off_2 == -1) ? kit->intcon(oop_off_1) : nullptr;
 686     kit->insert_mem_bar(Op_MemBarCPUOrder);
 687     Node* mem = kit->reset_memory();
 688     kit->set_all_memory(mem);
 689     Node* st = kit->gvn().transform(new StoreLSpecialNode(kit->control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
 690     kit->set_memory(st, TypeRawPtr::BOTTOM);
 691     kit->insert_mem_bar(Op_MemBarCPUOrder);
 692   }
 693 }
 694 
 695 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) const {
 696   PhaseGVN& gvn = kit->gvn();
 697   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 698   kit->C->set_flat_accesses();
 699   ciInlineKlass* vk = inline_klass();
 700   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 701 
 702   RegionNode* region = new RegionNode(4);
 703   gvn.set_type(region, Type::CONTROL);
 704   kit->record_for_igvn(region);
 705 
 706   Node* input_memory_state = kit->reset_memory();
 707   kit->set_all_memory(input_memory_state);
 708 
 709   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 710   gvn.set_type(mem, Type::MEMORY);
 711   kit->record_for_igvn(mem);
 712 
 713   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 714   gvn.set_type(io, Type::ABIO);
 715   kit->record_for_igvn(io);
 716 
 717   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 718   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 719 
 720   // Nullable
 721   kit->set_control(kit->IfFalse(iff_null_free));
 722   if (!kit->stopped()) {
 723     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 724     kit->set_all_memory(input_memory_state);
 725     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
 726     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 727     store_flat(kit, cast, ptr, true, false, false, decorators);
 728 
 729     region->init_req(1, kit->control());
 730     mem->set_req(1, kit->reset_memory());
 731     io->set_req(1, kit->i_o());
 732   }
 733 
 734   // Null-free
 735   kit->set_control(kit->IfTrue(iff_null_free));
 736   if (!kit->stopped()) {
 737     kit->set_all_memory(input_memory_state);
 738 
 739     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 740     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 741 
 742     // Atomic
 743     kit->set_control(kit->IfTrue(iff_atomic));
 744     if (!kit->stopped()) {
 745       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 746       kit->set_all_memory(input_memory_state);
 747       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
 748       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 749       store_flat(kit, cast, ptr, true, false, true, decorators);
 750 
 751       region->init_req(2, kit->control());
 752       mem->set_req(2, kit->reset_memory());
 753       io->set_req(2, kit->i_o());
 754     }
 755 
 756     // Non-atomic
 757     kit->set_control(kit->IfFalse(iff_atomic));
 758     if (!kit->stopped()) {
 759       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 760       kit->set_all_memory(input_memory_state);
 761       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
 762       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 763       store_flat(kit, cast, ptr, false, false, true, decorators);
 764 
 765       region->init_req(3, kit->control());
 766       mem->set_req(3, kit->reset_memory());
 767       io->set_req(3, kit->i_o());
 768     }
 769   }
 770 
 771   kit->set_control(gvn.transform(region));
 772   kit->set_all_memory(gvn.transform(mem));
 773   kit->set_i_o(gvn.transform(io));
 774 }
 775 
 776 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 777   // Write field values to memory
 778   ciInlineKlass* vk = inline_klass();
 779   for (uint i = 0; i < field_count(); ++i) {
 780     int field_off = field_offset(i) - vk->payload_offset();
 781     Node* field_val = field_value(i);
 782     bool field_null_free = field_is_null_free(i);
 783     ciType* ft = field_type(i);
 784     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 785     if (field_is_flat(i)) {
 786       // Recursively store the flat inline type field
 787       ciInlineKlass* fvk = ft->as_inline_klass();
 788       // Atomic if nullable or not LooselyConsistentValue
 789       bool atomic = !field_null_free || fvk->must_be_atomic();
 790 
 791       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 792     } else {
 793       // Store field value to memory
 794       BasicType bt = type2field[ft->basic_type()];
 795       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 796       const Type* val_type = Type::get_const_type(ft);
 797       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 798     }
 799   }
 800 }
 801 
 802 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 803   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 804     // Already buffered
 805     return this;
 806   }
 807 
 808   // Check if inline type is already buffered
 809   Node* not_buffered_ctl = kit->top();
 810   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 811   if (not_buffered_ctl->is_top()) {
 812     // Already buffered
 813     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 814     vt->set_is_buffered(kit->gvn());
 815     vt = kit->gvn().transform(vt)->as_InlineType();
 816     if (safe_for_replace) {
 817       kit->replace_in_map(this, vt);
 818     }
 819     return vt;
 820   }
 821   Node* buffered_ctl = kit->control();
 822   kit->set_control(not_buffered_ctl);
 823 
 824   // Inline type is not buffered, check if it is null.
 825   Node* null_ctl = kit->top();
 826   kit->null_check_common(get_null_marker(), T_INT, false, &null_ctl);
 827   bool null_free = null_ctl->is_top();
 828 
 829   RegionNode* region = new RegionNode(4);
 830   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 831 
 832   // InlineType is already buffered
 833   region->init_req(1, buffered_ctl);
 834   oop->init_req(1, not_null_oop);
 835 
 836   // InlineType is null
 837   region->init_req(2, null_ctl);
 838   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 839 
 840   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 841   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 842 
 843   if (!kit->stopped()) {
 844     assert(!is_allocated(&kit->gvn()), "already buffered");
 845     PreserveJVMState pjvms(kit);
 846     ciInlineKlass* vk = inline_klass();
 847     // Allocate and initialize buffer, re-execute on deoptimization.
 848     kit->jvms()->set_bci(kit->bci());
 849     kit->jvms()->set_should_reexecute(true);
 850     kit->kill_dead_locals();
 851     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 852     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 853     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 854     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 855 
 856     // Do not let stores that initialize this buffer be reordered with a subsequent
 857     // store that would make this buffer accessible by other threads.
 858     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 859     assert(alloc != nullptr, "must have an allocation node");
 860     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 861     oop->init_req(3, alloc_oop);
 862     region->init_req(3, kit->control());
 863     io    ->init_req(3, kit->i_o());
 864     mem   ->init_req(3, kit->merged_memory());
 865   }
 866 
 867   // Update GraphKit
 868   kit->set_control(kit->gvn().transform(region));
 869   kit->set_i_o(kit->gvn().transform(io));
 870   kit->set_all_memory(kit->gvn().transform(mem));
 871   kit->record_for_igvn(region);
 872   kit->record_for_igvn(oop);
 873   kit->record_for_igvn(io);
 874   kit->record_for_igvn(mem);
 875 
 876   // Use cloned InlineTypeNode to propagate oop from now on
 877   Node* res_oop = kit->gvn().transform(oop);
 878   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 879   vt->set_oop(kit->gvn(), res_oop);
 880   vt->set_is_buffered(kit->gvn());
 881   vt = kit->gvn().transform(vt)->as_InlineType();
 882   if (safe_for_replace) {
 883     kit->replace_in_map(this, vt);
 884   }
 885   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 886   // Make sure it gets a chance to remove this allocation.
 887   kit->C->set_has_split_ifs(true);
 888   return vt;
 889 }
 890 
 891 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 892   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 893     return true;
 894   }
 895   Node* oop = get_oop();
 896   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 897   return !oop_type->maybe_null();
 898 }
 899 
 900 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 901   ProjNode* pn = call->proj_out_or_null(proj_idx);
 902   if (pn != nullptr) {
 903     C->gvn_replace_by(pn, value);
 904     C->initial_gvn()->hash_delete(pn);
 905     pn->set_req(0, C->top());
 906   }
 907   proj_idx += type2size[bt];
 908 }
 909 
 910 // When a call returns multiple values, it has several result
 911 // projections, one per field. Replacing the result of the call by an
 912 // inline type node (after late inlining) requires that for each result
 913 // projection, we find the corresponding inline type field.
 914 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 915   uint proj_idx = TypeFunc::Parms;
 916   // Replace oop projection
 917   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 918   // Replace field projections
 919   replace_field_projs(C, call, proj_idx);
 920   // Replace null_marker projection
 921   replace_proj(C, call, proj_idx, get_null_marker(), T_BOOLEAN);
 922   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 923 }
 924 
 925 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
 926   for (uint i = 0; i < field_count(); ++i) {
 927     Node* value = field_value(i);
 928     if (field_is_flat(i)) {
 929       InlineTypeNode* vt = value->as_InlineType();
 930       // Replace field projections for flat field
 931       vt->replace_field_projs(C, call, proj_idx);
 932       if (!field_is_null_free(i)) {
 933         // Replace null_marker projection for nullable field
 934         replace_proj(C, call, proj_idx, vt->get_null_marker(), T_BOOLEAN);
 935       }
 936       continue;
 937     }
 938     // Replace projection for field value
 939     replace_proj(C, call, proj_idx, value, field_type(i)->basic_type());
 940   }
 941 }
 942 
 943 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 944   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 945   for (uint i = 0; i < field_count(); i++) {
 946      Node* value = field_value(i);
 947      if (field_is_flat(i)) {
 948        // Flat inline type field
 949        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 950      } else if (value->is_InlineType()) {
 951        // Non-flat inline type field
 952        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 953      }
 954   }
 955   vt = kit->gvn().transform(vt)->as_InlineType();
 956   kit->replace_in_map(this, vt);
 957   return vt;
 958 }
 959 
 960 // Replace a buffer allocation by a dominating allocation
 961 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 962   // Remove initializing stores and GC barriers
 963   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 964     Node* use = res->fast_out(i);
 965     if (use->is_AddP()) {
 966       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 967         Node* store = use->fast_out(j)->isa_Store();
 968         if (store != nullptr) {
 969           igvn->rehash_node_delayed(store);
 970           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 971         }
 972       }
 973     } else if (use->Opcode() == Op_CastP2X) {
 974       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 975         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 976         // we store into, as well as the value we are storing. Skip if this is a
 977         // barrier for storing 'res' into another object.
 978         continue;
 979       }
 980       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 981       bs->eliminate_gc_barrier(igvn, use);
 982       --i; --imax;
 983     }
 984   }
 985   igvn->replace_node(res, dom);
 986 }
 987 
 988 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 989   Node* oop = get_oop();
 990   Node* is_buffered = get_is_buffered();
 991 
 992   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 993     InlineTypeNode* vtptr = oop->as_InlineType();
 994     set_oop(*phase, vtptr->get_oop());
 995     set_is_buffered(*phase);
 996     set_null_marker(*phase);
 997     for (uint i = Values; i < vtptr->req(); ++i) {
 998       set_req(i, vtptr->in(i));
 999     }
1000     return this;
1001   }
1002 
1003   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
1004   // allocation because the only case we load from a naked CheckCastPP is when we exit a
1005   // constructor of an inline type and we want to relinquish the larval oop there. This has a
1006   // couple of benefits:
1007   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
1008   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
1009   //   when we try to clone a value object.
1010   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
1011   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
1012   //   future, we can move them around more freely such as hoisting out of loops. This is not true
1013   //   for the old allocation since larval value objects do have unique identities.
1014   Node* base = is_loaded(phase);
1015   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) {
1016     if (oop != base || phase->type(is_buffered) != TypeInt::ONE) {
1017       set_oop(*phase, base);
1018       set_is_buffered(*phase);
1019       return this;
1020     }
1021   }
1022 
1023   if (can_reshape) {
1024     PhaseIterGVN* igvn = phase->is_IterGVN();
1025     if (is_allocated(phase)) {
1026       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1027       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1028       // This can happen with late inlining when we first allocate an inline type argument
1029       // but later decide to inline the call after the callee code also triggered allocation.
1030       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1031         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1032         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1033           // Found a re-allocation
1034           Node* res = alloc->result_cast();
1035           if (res != nullptr && res->is_CheckCastPP()) {
1036             // Replace allocation by oop and unlink AllocateNode
1037             replace_allocation(igvn, res, oop);
1038             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1039             --i; --imax;
1040           }
1041         }
1042       }
1043     }
1044   }
1045 
1046   return nullptr;
1047 }
1048 
1049 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1050   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1051   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1052   vt->set_is_buffered(gvn, false);
1053   vt->set_null_marker(gvn);
1054   return vt;
1055 }
1056 
1057 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
1058   GrowableArray<ciType*> visited;
1059   visited.push(vk);
1060   return make_all_zero_impl(gvn, vk, visited);
1061 }
1062 
1063 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1064   // Create a new InlineTypeNode initialized with all zero
1065   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1066   vt->set_is_buffered(gvn, false);
1067   vt->set_null_marker(gvn);
1068   for (uint i = 0; i < vt->field_count(); ++i) {
1069     ciType* ft = vt->field_type(i);
1070     Node* value = gvn.zerocon(ft->basic_type());
1071     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1072       gvn.C->set_has_circular_inline_type(true);
1073     } else if (ft->is_inlinetype()) {
1074       int old_len = visited.length();
1075       visited.push(ft);
1076       ciInlineKlass* vk = ft->as_inline_klass();
1077       if (vt->field_is_null_free(i)) {
1078         value = make_all_zero_impl(gvn, vk, visited);
1079       } else {
1080         value = make_null_impl(gvn, vk, visited);
1081       }
1082       visited.trunc_to(old_len);
1083     }
1084     vt->set_field_value(i, value);
1085   }
1086   vt = gvn.transform(vt)->as_InlineType();
1087   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1088   return vt;
1089 }
1090 
1091 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1092   const TypeInt* tinit = gvn->type(get_null_marker())->isa_int();
1093   if (tinit == nullptr || !tinit->is_con(1)) {
1094     return false; // May be null
1095   }
1096   for (uint i = 0; i < field_count(); ++i) {
1097     Node* value = field_value(i);
1098     if (field_is_null_free(i)) {
1099       // Null-free value class field must have the all-zero value. If 'flat' is set,
1100       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1101       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) {
1102         return false;
1103       }
1104       continue;
1105     } else if (value->is_InlineType()) {
1106       // Nullable value class field must be null
1107       tinit = gvn->type(value->as_InlineType()->get_null_marker())->isa_int();
1108       if (tinit != nullptr && tinit->is_con(0)) {
1109         continue;
1110       }
1111       return false;
1112     } else if (!gvn->type(value)->is_zero_type()) {
1113       return false;
1114     }
1115   }
1116   return true;
1117 }
1118 
1119 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
1120   GrowableArray<ciType*> visited;
1121   visited.push(vk);
1122   return make_from_oop_impl(kit, oop, vk, visited);
1123 }
1124 
1125 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1126   PhaseGVN& gvn = kit->gvn();
1127 
1128   // Create and initialize an InlineTypeNode by loading all field
1129   // values from a heap-allocated version and also save the oop.
1130   InlineTypeNode* vt = nullptr;
1131 
1132   if (oop->isa_InlineType()) {
1133     return oop->as_InlineType();
1134   }
1135 
1136   if (gvn.type(oop)->maybe_null()) {
1137     // Add a null check because the oop may be null
1138     Node* null_ctl = kit->top();
1139     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1140     if (kit->stopped()) {
1141       // Constant null
1142       kit->set_control(null_ctl);
1143       vt = make_null_impl(gvn, vk, visited);
1144       kit->record_for_igvn(vt);
1145       return vt;
1146     }
1147     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1148     vt->set_is_buffered(gvn);
1149     vt->set_null_marker(gvn);
1150     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
1151     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1152 
1153     if (null_ctl != kit->top()) {
1154       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1155       Node* region = new RegionNode(3);
1156       region->init_req(1, kit->control());
1157       region->init_req(2, null_ctl);
1158       vt = vt->clone_with_phis(&gvn, region, kit->map());
1159       vt->merge_with(&gvn, null_vt, 2, true);
1160       vt->set_oop(gvn, oop);
1161       kit->set_control(gvn.transform(region));
1162     }
1163   } else {
1164     // Oop can never be null
1165     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1166     Node* init_ctl = kit->control();
1167     vt->set_is_buffered(gvn);
1168     vt->set_null_marker(gvn);
1169     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1170     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1171 // TODO 8284443
1172 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1173 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1174   }
1175   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1176   kit->record_for_igvn(vt);
1177   return gvn.transform(vt)->as_InlineType();
1178 }
1179 
1180 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1181                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1182   GrowableArray<ciType*> visited;
1183   visited.push(vk);
1184   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1185 }
1186 
1187 // GraphKit wrapper for the 'make_from_flat' method
1188 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1189                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1190   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1191   PhaseGVN& gvn = kit->gvn();
1192   bool do_atomic = atomic;
1193   // With immutable memory, a non-atomic load and an atomic load are the same
1194   if (immutable_memory) {
1195     do_atomic = false;
1196   }
1197   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1198   if (vk->is_naturally_atomic(null_free)) {
1199     do_atomic = false;
1200   }
1201 
1202   if (!do_atomic) {
1203     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1204     if (!null_free) {
1205       int nm_offset = vk->null_marker_offset_in_payload();
1206       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1207       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1208       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1209       vt->set_req(NullMarker, nm_value);
1210     }
1211 
1212     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1213     return gvn.transform(vt)->as_InlineType();
1214   }
1215 
1216   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1217   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1218   BasicType load_bt = vk->atomic_size_to_basic_type(null_free);
1219   decorators |= C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD;
1220   const Type* val_type = Type::get_const_basic_type(load_bt);
1221   kit->insert_mem_bar(Op_MemBarCPUOrder);
1222   Node* payload = kit->access_load_at(base, ptr, TypeRawPtr::BOTTOM, val_type, load_bt, decorators, kit->control());
1223   kit->insert_mem_bar(Op_MemBarCPUOrder);
1224   vt->convert_from_payload(kit, load_bt, kit->gvn().transform(payload), 0, null_free, trust_null_free_oop);
1225   return gvn.transform(vt)->as_InlineType();
1226 }
1227 
1228 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1229   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1230   PhaseGVN& gvn = kit->gvn();
1231   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1232   kit->C->set_flat_accesses();
1233   InlineTypeNode* vt_nullable = nullptr;
1234   InlineTypeNode* vt_null_free = nullptr;
1235   InlineTypeNode* vt_non_atomic = nullptr;
1236 
1237   RegionNode* region = new RegionNode(4);
1238   gvn.set_type(region, Type::CONTROL);
1239   kit->record_for_igvn(region);
1240 
1241   Node* input_memory_state = kit->reset_memory();
1242   kit->set_all_memory(input_memory_state);
1243 
1244   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1245   gvn.set_type(mem, Type::MEMORY);
1246   kit->record_for_igvn(mem);
1247 
1248   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1249   gvn.set_type(io, Type::ABIO);
1250   kit->record_for_igvn(io);
1251 
1252   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1253   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1254 
1255   // Nullable
1256   kit->set_control(kit->IfFalse(iff_null_free));
1257   if (!kit->stopped()) {
1258     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1259     kit->set_all_memory(input_memory_state);
1260     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
1261     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1262     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1263 
1264     region->init_req(1, kit->control());
1265     mem->set_req(1, kit->reset_memory());
1266     io->set_req(1, kit->i_o());
1267   }
1268 
1269   // Null-free
1270   kit->set_control(kit->IfTrue(iff_null_free));
1271   if (!kit->stopped()) {
1272     kit->set_all_memory(input_memory_state);
1273 
1274     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1275     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1276 
1277     // Atomic
1278     kit->set_control(kit->IfTrue(iff_atomic));
1279     if (!kit->stopped()) {
1280       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1281       kit->set_all_memory(input_memory_state);
1282       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
1283       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1284       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1285 
1286       region->init_req(2, kit->control());
1287       mem->set_req(2, kit->reset_memory());
1288       io->set_req(2, kit->i_o());
1289     }
1290 
1291     // Non-Atomic
1292     kit->set_control(kit->IfFalse(iff_atomic));
1293     if (!kit->stopped()) {
1294       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1295       kit->set_all_memory(input_memory_state);
1296       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
1297       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1298       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1299 
1300       region->init_req(3, kit->control());
1301       mem->set_req(3, kit->reset_memory());
1302       io->set_req(3, kit->i_o());
1303     }
1304   }
1305 
1306   InlineTypeNode* vt = nullptr;
1307   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1308     // All paths are dead
1309     vt = make_null(gvn, vk);
1310   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1311     vt = vt_non_atomic;
1312   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1313     vt = vt_null_free;
1314   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1315     vt = vt_nullable;
1316   }
1317   if (vt != nullptr) {
1318     kit->set_control(kit->gvn().transform(region));
1319     kit->set_all_memory(kit->gvn().transform(mem));
1320     kit->set_i_o(kit->gvn().transform(io));
1321     return vt;
1322   }
1323 
1324   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1325   vt = zero->clone_with_phis(&gvn, region);
1326   if (vt_nullable != nullptr) {
1327     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1328   }
1329   if (vt_null_free != nullptr) {
1330     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1331   }
1332   if (vt_non_atomic != nullptr) {
1333     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1334   }
1335 
1336   kit->set_control(kit->gvn().transform(region));
1337   kit->set_all_memory(kit->gvn().transform(mem));
1338   kit->set_i_o(kit->gvn().transform(io));
1339   return gvn.transform(vt)->as_InlineType();
1340 }
1341 
1342 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1343   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1344   if (!in) {
1345     // Keep track of the oop. The returned inline type might already be buffered.
1346     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1347     vt->set_oop(kit->gvn(), oop);
1348   }
1349   GrowableArray<ciType*> visited;
1350   visited.push(vk);
1351   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1352   return kit->gvn().transform(vt)->as_InlineType();
1353 }
1354 
1355 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1356   if (vk == nullptr) {
1357     vk = inline_klass();
1358   }
1359   for (uint i = 0; i < field_count(); ++i) {
1360     int offset = holder_offset + field_offset(i);
1361     Node* value = field_value(i);
1362     if (value->is_InlineType()) {
1363       InlineTypeNode* vt = value->as_InlineType();
1364       if (vt->type()->inline_klass()->is_empty()) {
1365         continue;
1366       } else if (field_is_flat(i) && vt->is_InlineType()) {
1367         // Check inline type field load recursively
1368         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1369         if (base == nullptr) {
1370           return nullptr;
1371         }
1372         continue;
1373       } else {
1374         value = vt->get_oop();
1375         if (value->Opcode() == Op_CastPP) {
1376           // Skip CastPP
1377           value = value->in(1);
1378         }
1379       }
1380     }
1381     if (value->isa_DecodeN()) {
1382       // Skip DecodeN
1383       value = value->in(1);
1384     }
1385     if (value->isa_Load()) {
1386       // Check if base and offset of field load matches inline type layout
1387       intptr_t loffset = 0;
1388       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1389       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1390         return nullptr;
1391       } else if (base == nullptr) {
1392         // Set base and check if pointer type matches
1393         base = lbase;
1394         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1395         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1396           return nullptr;
1397         }
1398       }
1399     } else {
1400       return nullptr;
1401     }
1402   }
1403   return base;
1404 }
1405 
1406 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1407   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1408   intptr_t bits = tk->get_con();
1409   set_nth_bit(bits, 0);
1410   return gvn.longcon((jlong)bits);
1411 }
1412 
1413 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1414   if (!null_free && in) {
1415     n->init_req(base_input++, get_null_marker());
1416   }
1417   for (uint i = 0; i < field_count(); i++) {
1418     Node* arg = field_value(i);
1419     if (field_is_flat(i)) {
1420       // Flat inline type field
1421       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1422       if (!field_is_null_free(i)) {
1423         assert(field_null_marker_offset(i) != -1, "inconsistency");
1424         n->init_req(base_input++, arg->as_InlineType()->get_null_marker());
1425       }
1426     } else {
1427       if (arg->is_InlineType()) {
1428         // Non-flat inline type field
1429         InlineTypeNode* vt = arg->as_InlineType();
1430         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1431         arg = vt->buffer(kit);
1432       }
1433       // Initialize call/return arguments
1434       n->init_req(base_input++, arg);
1435       if (field_type(i)->size() == 2) {
1436         n->init_req(base_input++, kit->top());
1437       }
1438     }
1439   }
1440   // The last argument is used to pass the null marker to compiled code and not required here.
1441   if (!null_free && !in) {
1442     n->init_req(base_input++, kit->top());
1443   }
1444 }
1445 
1446 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1447   PhaseGVN& gvn = kit->gvn();
1448   Node* null_marker = nullptr;
1449   if (!null_free) {
1450     // Nullable inline type
1451     if (in) {
1452       // Set null marker
1453       if (multi->is_Start()) {
1454         null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1455       } else {
1456         null_marker = multi->as_Call()->in(base_input);
1457       }
1458       set_req(NullMarker, null_marker);
1459       base_input++;
1460     }
1461     // Add a null check to make subsequent loads dependent on
1462     assert(null_check_region == nullptr, "already set");
1463     if (null_marker == nullptr) {
1464       // Will only be initialized below, use dummy node for now
1465       null_marker = new Node(1);
1466       null_marker->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1467       gvn.set_type_bottom(null_marker);
1468     }
1469     Node* null_ctrl = kit->top();
1470     kit->null_check_common(null_marker, T_INT, false, &null_ctrl);
1471     Node* non_null_ctrl = kit->control();
1472     null_check_region = new RegionNode(3);
1473     null_check_region->init_req(1, non_null_ctrl);
1474     null_check_region->init_req(2, null_ctrl);
1475     null_check_region = gvn.transform(null_check_region);
1476     kit->set_control(null_check_region);
1477   }
1478 
1479   for (uint i = 0; i < field_count(); ++i) {
1480     ciType* type = field_type(i);
1481     Node* parm = nullptr;
1482     if (field_is_flat(i)) {
1483       // Flat inline type field
1484       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i));
1485       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1486       if (!field_is_null_free(i)) {
1487         assert(field_null_marker_offset(i) != -1, "inconsistency");
1488         Node* null_marker = nullptr;
1489         if (multi->is_Start()) {
1490           null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1491         } else if (in) {
1492           null_marker = multi->as_Call()->in(base_input);
1493         } else {
1494           null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1495         }
1496         vt->set_req(NullMarker, null_marker);
1497         base_input++;
1498       }
1499       parm = gvn.transform(vt);
1500     } else {
1501       if (multi->is_Start()) {
1502         assert(in, "return from start?");
1503         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1504       } else if (in) {
1505         parm = multi->as_Call()->in(base_input);
1506       } else {
1507         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1508       }
1509       bool null_free = field_is_null_free(i);
1510       // Non-flat inline type field
1511       if (type->is_inlinetype()) {
1512         if (null_check_region != nullptr) {
1513           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1514           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1515           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1516           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1517             parm = parm->as_InlineType()->get_oop();
1518           }
1519           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1520           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1521           parm->set_req(2, kit->zerocon(T_OBJECT));
1522           parm = gvn.transform(parm);
1523           null_free = false;
1524         }
1525         if (visited.contains(type)) {
1526           kit->C->set_has_circular_inline_type(true);
1527         } else if (!parm->is_InlineType()) {
1528           int old_len = visited.length();
1529           visited.push(type);
1530           if (null_free) {
1531             parm = kit->cast_not_null(parm);
1532           }
1533           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1534           visited.trunc_to(old_len);
1535         }
1536       }
1537       base_input += type->size();
1538     }
1539     assert(parm != nullptr, "should never be null");
1540     assert(field_value(i) == nullptr, "already set");
1541     set_field_value(i, parm);
1542     gvn.record_for_igvn(parm);
1543   }
1544   // The last argument is used to pass the null marker to compiled code
1545   if (!null_free && !in) {
1546     Node* cmp = null_marker->raw_out(0);
1547     null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1548     set_req(NullMarker, null_marker);
1549     gvn.hash_delete(cmp);
1550     cmp->set_req(1, null_marker);
1551     gvn.hash_find_insert(cmp);
1552     gvn.record_for_igvn(cmp);
1553     base_input++;
1554   }
1555 }
1556 
1557 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1558 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1559 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1560   PhaseIterGVN* igvn = &phase->igvn();
1561   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1562   // will be removed anyway and changing the memory chain will confuse other optimizations.
1563   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1564     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1565     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1566       Node* res = alloc->result_cast();
1567       if (res == nullptr || !res->is_CheckCastPP()) {
1568         break; // No unique CheckCastPP
1569       }
1570       // Search for a dominating allocation of the same inline type
1571       Node* res_dom = res;
1572       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1573         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1574         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1575           Node* res_other = alloc_other->result_cast();
1576           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1577               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1578             res_dom = res_other;
1579           }
1580         }
1581       }
1582       if (res_dom != res) {
1583         // Replace allocation by dominating one.
1584         replace_allocation(igvn, res, res_dom);
1585         // The result of the dominated allocation is now unused and will be removed
1586         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1587         igvn->_worklist.push(alloc);
1588       }
1589     }
1590   }
1591 }
1592 
1593 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1594   GrowableArray<ciType*> visited;
1595   visited.push(vk);
1596   return make_null_impl(gvn, vk, visited, transform);
1597 }
1598 
1599 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1600   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1601   vt->set_is_buffered(gvn);
1602   vt->set_null_marker(gvn, gvn.intcon(0));
1603   for (uint i = 0; i < vt->field_count(); i++) {
1604     ciType* ft = vt->field_type(i);
1605     Node* value = gvn.zerocon(ft->basic_type());
1606     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1607       gvn.C->set_has_circular_inline_type(true);
1608     } else if (ft->is_inlinetype()) {
1609       int old_len = visited.length();
1610       visited.push(ft);
1611       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1612       visited.trunc_to(old_len);
1613     }
1614     vt->set_field_value(i, value);
1615   }
1616   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1617 }
1618 
1619 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1620   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1621     return clone()->as_InlineType();
1622   }
1623   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1624     if (fast_out(i) != map) {
1625       return clone()->as_InlineType();
1626     }
1627   }
1628   gvn->hash_delete(this);
1629   return this;
1630 }
1631 
1632 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1633   Node* oop = get_oop();
1634   const Type* toop = phase->type(oop);
1635 #ifdef ASSERT
1636   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1637     // We are not allocated (anymore) and should therefore not have an instance id
1638     dump(1);
1639     assert(false, "Unbuffered inline type should not have known instance id");
1640   }
1641 #endif
1642   if (toop == Type::TOP) {
1643     return Type::TOP;
1644   }
1645   const Type* t = toop->filter_speculative(_type);
1646   if (t->singleton()) {
1647     // Don't replace InlineType by a constant
1648     t = _type;
1649   }
1650   const Type* tinit = phase->type(in(NullMarker));
1651   if (tinit == Type::TOP) {
1652     return Type::TOP;
1653   }
1654   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1655     t = t->join_speculative(TypePtr::NOTNULL);
1656   }
1657   return t;
1658 }