1 /*
   2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/convertnode.hpp"
  33 #include "opto/graphKit.hpp"
  34 #include "opto/inlinetypenode.hpp"
  35 #include "opto/movenode.hpp"
  36 #include "opto/narrowptrnode.hpp"
  37 #include "opto/opcodes.hpp"
  38 #include "opto/rootnode.hpp"
  39 #include "opto/phaseX.hpp"
  40 #include "opto/type.hpp"
  41 #include "utilities/globalDefinitions.hpp"
  42 
  43 // Clones the inline type to handle control flow merges involving multiple inline types.
  44 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  45 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_non_null) {
  46   InlineTypeNode* vt = clone_if_required(gvn, map);
  47   const Type* t = Type::get_const_type(inline_klass());
  48   gvn->set_type(vt, t);
  49   vt->as_InlineType()->set_type(t);
  50 
  51   // Create a PhiNode for merging the oop values
  52   PhiNode* oop = PhiNode::make(region, vt->get_oop(), t);
  53   gvn->set_type(oop, t);
  54   gvn->record_for_igvn(oop);
  55   vt->set_oop(*gvn, oop);
  56 
  57   // Create a PhiNode for merging the is_buffered values
  58   t = Type::get_const_basic_type(T_BOOLEAN);
  59   Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t);
  60   gvn->set_type(is_buffered_node, t);
  61   gvn->record_for_igvn(is_buffered_node);
  62   vt->set_req(IsBuffered, is_buffered_node);
  63 
  64   // Create a PhiNode for merging the null_marker values
  65   Node* null_marker_node;
  66   if (is_non_null) {
  67     null_marker_node = gvn->intcon(1);
  68   } else {
  69     t = Type::get_const_basic_type(T_BOOLEAN);
  70     null_marker_node = PhiNode::make(region, vt->get_null_marker(), t);
  71     gvn->set_type(null_marker_node, t);
  72     gvn->record_for_igvn(null_marker_node);
  73   }
  74   vt->set_req(NullMarker, null_marker_node);
  75 
  76   // Create a PhiNode each for merging the field values
  77   for (uint i = 0; i < vt->field_count(); ++i) {
  78     ciType* type = vt->field_type(i);
  79     Node*  value = vt->field_value(i);
  80     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  81     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  82     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  83     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  84     if (type->is_inlinetype() && no_circularity) {
  85       // Handle inline type fields recursively
  86       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  87     } else {
  88       t = Type::get_const_type(type);
  89       value = PhiNode::make(region, value, t);
  90       gvn->set_type(value, t);
  91       gvn->record_for_igvn(value);
  92     }
  93     vt->set_field_value(i, value);
  94   }
  95   gvn->record_for_igvn(vt);
  96   return vt;
  97 }
  98 
  99 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 100 // for the given region (see InlineTypeNode::clone_with_phis).
 101 bool InlineTypeNode::has_phi_inputs(Node* region) {
 102   // Check oop input
 103   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 104 #ifdef ASSERT
 105   if (result) {
 106     // Check all field value inputs for consistency
 107     for (uint i = Values; i < field_count(); ++i) {
 108       Node* n = in(i);
 109       if (n->is_InlineType()) {
 110         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 111       } else {
 112         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 113       }
 114     }
 115   }
 116 #endif
 117   return result;
 118 }
 119 
 120 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 121 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) {
 122   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 123 
 124   // Merge oop inputs
 125   PhiNode* phi = get_oop()->as_Phi();
 126   phi->set_req(pnum, other->get_oop());
 127   if (transform) {
 128     set_oop(*gvn, gvn->transform(phi));
 129   }
 130 
 131   // Merge is_buffered inputs
 132   phi = get_is_buffered()->as_Phi();
 133   phi->set_req(pnum, other->get_is_buffered());
 134   if (transform) {
 135     set_req(IsBuffered, gvn->transform(phi));
 136   }
 137 
 138   // Merge null_marker inputs
 139   Node* null_marker = get_null_marker();
 140   if (null_marker->is_Phi()) {
 141     phi = null_marker->as_Phi();
 142     phi->set_req(pnum, other->get_null_marker());
 143     if (transform) {
 144       set_req(NullMarker, gvn->transform(phi));
 145     }
 146   } else {
 147     assert(null_marker->find_int_con(0) == 1, "only with a non null inline type");
 148   }
 149 
 150   // Merge field values
 151   for (uint i = 0; i < field_count(); ++i) {
 152     Node* val1 =        field_value(i);
 153     Node* val2 = other->field_value(i);
 154     if (val1->is_InlineType()) {
 155       if (val2->is_Phi()) {
 156         val2 = gvn->transform(val2);
 157       }
 158       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform);
 159     } else {
 160       assert(val1->is_Phi(), "must be a phi node");
 161       val1->set_req(pnum, val2);
 162     }
 163     if (transform) {
 164       set_field_value(i, gvn->transform(val1));
 165     }
 166   }
 167   return this;
 168 }
 169 
 170 // Adds a new merge path to an inline type node with phi inputs
 171 void InlineTypeNode::add_new_path(Node* region) {
 172   assert(has_phi_inputs(region), "must have phi inputs");
 173 
 174   PhiNode* phi = get_oop()->as_Phi();
 175   phi->add_req(nullptr);
 176   assert(phi->req() == region->req(), "must be same size as region");
 177 
 178   phi = get_is_buffered()->as_Phi();
 179   phi->add_req(nullptr);
 180   assert(phi->req() == region->req(), "must be same size as region");
 181 
 182   phi = get_null_marker()->as_Phi();
 183   phi->add_req(nullptr);
 184   assert(phi->req() == region->req(), "must be same size as region");
 185 
 186   for (uint i = 0; i < field_count(); ++i) {
 187     Node* val = field_value(i);
 188     if (val->is_InlineType()) {
 189       val->as_InlineType()->add_new_path(region);
 190     } else {
 191       val->as_Phi()->add_req(nullptr);
 192       assert(val->req() == region->req(), "must be same size as region");
 193     }
 194   }
 195 }
 196 
 197 Node* InlineTypeNode::field_value(uint index) const {
 198   assert(index < field_count(), "index out of bounds");
 199   return in(Values + index);
 200 }
 201 
 202 // Get the value of the field at the given offset.
 203 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 204 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 205   // Find the declared field which contains the field we are looking for
 206   int index = inline_klass()->field_index_by_offset(offset);
 207   Node* value = field_value(index);
 208   assert(value != nullptr, "field value not found");
 209 
 210   if (!recursive || !field_is_flat(index)) {
 211     assert(offset == field_offset(index), "offset mismatch");
 212     return value;
 213   }
 214 
 215   // Flat inline type field
 216   InlineTypeNode* vt = value->as_InlineType();
 217   if (offset == field_null_marker_offset(index)) {
 218     return vt->get_null_marker();
 219   } else {
 220     int sub_offset = offset - field_offset(index); // Offset of the flattened field inside the declared field
 221     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 222     return vt->field_value_by_offset(sub_offset, recursive);
 223   }
 224 }
 225 
 226 void InlineTypeNode::set_field_value(uint index, Node* value) {
 227   assert(index < field_count(), "index out of bounds");
 228   set_req(Values + index, value);
 229 }
 230 
 231 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 232   set_field_value(field_index(offset), value);
 233 }
 234 
 235 int InlineTypeNode::field_offset(uint index) const {
 236   assert(index < field_count(), "index out of bounds");
 237   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 238 }
 239 
 240 uint InlineTypeNode::field_index(int offset) const {
 241   uint i = 0;
 242   for (; i < field_count() && field_offset(i) != offset; i++) { }
 243   assert(i < field_count(), "field not found");
 244   return i;
 245 }
 246 
 247 ciType* InlineTypeNode::field_type(uint index) const {
 248   assert(index < field_count(), "index out of bounds");
 249   return inline_klass()->declared_nonstatic_field_at(index)->type();
 250 }
 251 
 252 bool InlineTypeNode::field_is_flat(uint index) const {
 253   assert(index < field_count(), "index out of bounds");
 254   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 255   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 256   return field->is_flat();
 257 }
 258 
 259 bool InlineTypeNode::field_is_null_free(uint index) const {
 260   assert(index < field_count(), "index out of bounds");
 261   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 262   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 263   return field->is_null_free();
 264 }
 265 
 266 bool InlineTypeNode::field_is_volatile(uint index) const {
 267   assert(index < field_count(), "index out of bounds");
 268   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 269   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 270   return field->is_volatile();
 271 }
 272 
 273 int InlineTypeNode::field_null_marker_offset(uint index) const {
 274   assert(index < field_count(), "index out of bounds");
 275   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 276   assert(field->is_flat(), "must be an inline type");
 277   return field->null_marker_offset();
 278 }
 279 
 280 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) {
 281   uint cnt = 0;
 282   for (uint i = 0; i < field_count(); ++i) {
 283     Node* value = field_value(i);
 284     if (field_is_flat(i)) {
 285       InlineTypeNode* vt = value->as_InlineType();
 286       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 287       if (!field_is_null_free(i)) {
 288         // The null marker of a flat field is added right after we scalarize that field
 289         sfpt->add_req(vt->get_null_marker());
 290         cnt++;
 291       }
 292       continue;
 293     }
 294     if (value->is_InlineType()) {
 295       // Add inline type to the worklist to process later
 296       worklist.push(value);
 297     }
 298     sfpt->add_req(value);
 299     cnt++;
 300   }
 301   return cnt;
 302 }
 303 
 304 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 305   JVMState* jvms = sfpt->jvms();
 306   assert(jvms != nullptr, "missing JVMS");
 307   uint first_ind = (sfpt->req() - jvms->scloff());
 308 
 309   // Iterate over the inline type fields in order of increasing offset and add the
 310   // field values to the safepoint. Nullable inline types have an null marker field that
 311   // needs to be checked before using the field values.
 312   sfpt->add_req(get_null_marker());
 313   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 314   jvms->set_endoff(sfpt->req());
 315   // Replace safepoint edge by SafePointScalarObjectNode
 316   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 317                                                                   nullptr,
 318                                                                   first_ind,
 319                                                                   sfpt->jvms()->depth(),
 320                                                                   nfields);
 321   sobj->init_req(0, igvn->C->root());
 322   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 323   igvn->rehash_node_delayed(sfpt);
 324   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 325     Node* debug = sfpt->in(i);
 326     if (debug != nullptr && debug->uncast() == this) {
 327       sfpt->set_req(i, sobj);
 328     }
 329   }
 330 }
 331 
 332 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 333   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 334   // in the safepoint to avoid keeping field loads live just for the debug info.
 335   Node* oop = get_oop();
 336   bool use_oop = false;
 337   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 338     Unique_Node_List worklist;
 339     VectorSet visited;
 340     visited.set(oop->_idx);
 341     worklist.push(oop);
 342     use_oop = true;
 343     while (worklist.size() > 0 && use_oop) {
 344       Node* n = worklist.pop();
 345       for (uint i = 1; i < n->req(); i++) {
 346         Node* in = n->in(i);
 347         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 348           worklist.push(in);
 349         } else if (!(in->is_Con() || in->is_Parm())) {
 350           use_oop = false;
 351           break;
 352         }
 353       }
 354     }
 355   } else {
 356     use_oop = allow_oop && is_allocated(igvn) &&
 357               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 358   }
 359 
 360   ResourceMark rm;
 361   Unique_Node_List safepoints;
 362   Unique_Node_List vt_worklist;
 363   Unique_Node_List worklist;
 364   worklist.push(this);
 365   while (worklist.size() > 0) {
 366     Node* n = worklist.pop();
 367     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 368       Node* use = n->fast_out(i);
 369       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 370         safepoints.push(use);
 371       } else if (use->is_ConstraintCast()) {
 372         worklist.push(use);
 373       }
 374     }
 375   }
 376 
 377   // Process all safepoint uses and scalarize inline type
 378   while (safepoints.size() > 0) {
 379     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 380     if (use_oop) {
 381       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 382         Node* debug = sfpt->in(i);
 383         if (debug != nullptr && debug->uncast() == this) {
 384           sfpt->set_req(i, get_oop());
 385         }
 386       }
 387       igvn->rehash_node_delayed(sfpt);
 388     } else {
 389       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 390     }
 391   }
 392   // Now scalarize non-flat fields
 393   for (uint i = 0; i < vt_worklist.size(); ++i) {
 394     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 395     vt->make_scalar_in_safepoints(igvn);
 396   }
 397   if (outcnt() == 0) {
 398     igvn->record_for_igvn(this);
 399   }
 400 }
 401 
 402 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 403 // of the same type but with different scalarization depth during GVN. This method adjusts the
 404 // scalarization depth to avoid inconsistencies during merging.
 405 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 406   if (!kit->C->has_circular_inline_type()) {
 407     return this;
 408   }
 409   GrowableArray<ciType*> visited;
 410   visited.push(inline_klass());
 411   return adjust_scalarization_depth_impl(kit, visited);
 412 }
 413 
 414 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 415   InlineTypeNode* val = this;
 416   for (uint i = 0; i < field_count(); ++i) {
 417     Node* value = field_value(i);
 418     Node* new_value = value;
 419     ciType* ft = field_type(i);
 420     if (value->is_InlineType()) {
 421       if (!field_is_flat(i) && visited.contains(ft)) {
 422         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 423       } else {
 424         int old_len = visited.length();
 425         visited.push(ft);
 426         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 427         visited.trunc_to(old_len);
 428       }
 429     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 430       int old_len = visited.length();
 431       visited.push(ft);
 432       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 433       visited.trunc_to(old_len);
 434     }
 435     if (value != new_value) {
 436       if (val == this) {
 437         val = clone_if_required(&kit->gvn(), kit->map());
 438       }
 439       val->set_field_value(i, new_value);
 440     }
 441   }
 442   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 443 }
 444 
 445 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 446   // Initialize the inline type by loading its field values from
 447   // memory and adding the values as input edges to the node.
 448   ciInlineKlass* vk = inline_klass();
 449   for (uint i = 0; i < field_count(); ++i) {
 450     int field_off = field_offset(i) - vk->payload_offset();
 451     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 452     Node* value = nullptr;
 453     ciType* ft = field_type(i);
 454     bool field_null_free = field_is_null_free(i);
 455     if (field_is_flat(i)) {
 456       // Recursively load the flat inline type field
 457       ciInlineKlass* fvk = ft->as_inline_klass();
 458       // Atomic if nullable or not LooselyConsistentValue
 459       bool atomic = !field_null_free || fvk->must_be_atomic();
 460 
 461       int old_len = visited.length();
 462       visited.push(ft);
 463       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 464                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 465       visited.trunc_to(old_len);
 466     } else {
 467       // Load field value from memory
 468       BasicType bt = type2field[ft->basic_type()];
 469       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 470       const Type* val_type = Type::get_const_type(ft);
 471       if (trust_null_free_oop && field_null_free) {
 472         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 473       }
 474       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 475       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 476       // Loading a non-flattened inline type from memory
 477       if (visited.contains(ft)) {
 478         kit->C->set_has_circular_inline_type(true);
 479       } else if (ft->is_inlinetype()) {
 480         int old_len = visited.length();
 481         visited.push(ft);
 482         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 483         visited.trunc_to(old_len);
 484       }
 485     }
 486     set_field_value(i, value);
 487   }
 488 }
 489 
 490 // Get a field value from the payload by shifting it according to the offset
 491 static Node* get_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, BasicType val_bt, int offset) {
 492   // Shift to the right position in the long value
 493   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 494   Node* value = nullptr;
 495   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 496   if (bt == T_LONG) {
 497     value = gvn->transform(new URShiftLNode(payload, shift_val));
 498     value = gvn->transform(new ConvL2INode(value));
 499   } else {
 500     value = gvn->transform(new URShiftINode(payload, shift_val));
 501   }
 502 
 503   if (val_bt == T_INT || val_bt == T_OBJECT || val_bt == T_ARRAY) {
 504     return value;
 505   } else {
 506     // Make sure to zero unused bits in the 32-bit value
 507     return Compile::narrow_value(val_bt, value, nullptr, gvn, true);
 508   }
 509 }
 510 
 511 // Convert a payload value to field values
 512 void InlineTypeNode::convert_from_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, bool trust_null_free_oop) {
 513   PhaseGVN* gvn = &kit->gvn();
 514   ciInlineKlass* vk = inline_klass();
 515   Node* value = nullptr;
 516   if (!null_free) {
 517     // Get the null marker
 518     value = get_payload_value(gvn, payload, bt, T_BOOLEAN, holder_offset + vk->null_marker_offset_in_payload());
 519     set_req(NullMarker, value);
 520   }
 521   // Iterate over the fields and get their values from the payload
 522   for (uint i = 0; i < field_count(); ++i) {
 523     ciType* ft = field_type(i);
 524     bool field_null_free = field_is_null_free(i);
 525     int offset = holder_offset + field_offset(i) - vk->payload_offset();
 526     if (field_is_flat(i)) {
 527       InlineTypeNode* vt = make_uninitialized(*gvn, ft->as_inline_klass(), field_null_free);
 528       vt->convert_from_payload(kit, bt, payload, offset, field_null_free, trust_null_free_oop && field_null_free);
 529       value = gvn->transform(vt);
 530     } else {
 531       value = get_payload_value(gvn, payload, bt, ft->basic_type(), offset);
 532       if (!ft->is_primitive_type()) {
 533         // Narrow oop field
 534         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 535         const Type* val_type = Type::get_const_type(ft);
 536         if (trust_null_free_oop && field_null_free) {
 537           val_type = val_type->join_speculative(TypePtr::NOTNULL);
 538         }
 539         value = gvn->transform(new CastI2NNode(kit->control(), value, val_type->make_narrowoop()));
 540         value = gvn->transform(new DecodeNNode(value, val_type->make_narrowoop()));
 541 
 542         // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
 543         // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
 544         // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
 545         // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
 546 
 547         if (ft->is_inlinetype()) {
 548           GrowableArray<ciType*> visited;
 549           value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 550         }
 551       }
 552     }
 553     set_field_value(i, value);
 554   }
 555 }
 556 
 557 // Set a field value in the payload by shifting it according to the offset
 558 static Node* set_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, Node* value, BasicType val_bt, int offset) {
 559   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload");
 560 
 561   // Make sure to zero unused bits in the 32-bit value
 562   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
 563     value = gvn->transform(new AndINode(value, gvn->intcon(0xFF)));
 564   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
 565     value = gvn->transform(new AndINode(value, gvn->intcon(0xFFFF)));
 566   } else if (val_bt == T_FLOAT) {
 567     value = gvn->transform(new MoveF2INode(value));
 568   } else {
 569     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
 570   }
 571 
 572   Node* shift_val = gvn->intcon(offset << LogBitsPerByte);
 573   if (bt == T_LONG) {
 574     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
 575     value = gvn->transform(new ConvI2LNode(value));
 576     value = gvn->transform(new AndLNode(value, gvn->longcon(0xFFFFFFFF)));
 577 
 578     Node* shift_value = gvn->transform(new LShiftLNode(value, shift_val));
 579     payload = new OrLNode(shift_value, payload);
 580   } else {
 581     Node* shift_value = gvn->transform(new LShiftINode(value, shift_val));
 582     payload = new OrINode(shift_value, payload);
 583   }
 584   return gvn->transform(payload);
 585 }
 586 
 587 // Convert the field values to a payload value of type 'bt'
 588 Node* InlineTypeNode::convert_to_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset, int& oop_off_1, int& oop_off_2) const {
 589   PhaseGVN* gvn = &kit->gvn();
 590   Node* value = nullptr;
 591   if (!null_free) {
 592     // Set the null marker
 593     value = get_null_marker();
 594     payload = set_payload_value(gvn, payload, bt, value, T_BOOLEAN, null_marker_offset);
 595   }
 596   // Iterate over the fields and add their values to the payload
 597   for (uint i = 0; i < field_count(); ++i) {
 598     value = field_value(i);
 599     int inner_offset = field_offset(i) - inline_klass()->payload_offset();
 600     int offset = holder_offset + inner_offset;
 601     if (field_is_flat(i)) {
 602       null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset();
 603       payload = value->as_InlineType()->convert_to_payload(kit, bt, payload, offset, field_is_null_free(i), null_marker_offset, oop_off_1, oop_off_2);
 604     } else {
 605       ciType* ft = field_type(i);
 606       BasicType field_bt = ft->basic_type();
 607       if (!ft->is_primitive_type()) {
 608         // Narrow oop field
 609         assert(UseCompressedOops && bt == T_LONG, "Naturally atomic");
 610         assert(inner_offset != -1, "sanity");
 611         if (oop_off_1 == -1) {
 612           oop_off_1 = inner_offset;
 613         } else {
 614           assert(oop_off_2 == -1, "already set");
 615           oop_off_2 = inner_offset;
 616         }
 617         const Type* val_type = Type::get_const_type(ft)->make_narrowoop();
 618         if (value->is_InlineType()) {
 619           PreserveReexecuteState preexecs(kit);
 620           kit->jvms()->set_should_reexecute(true);
 621           value = value->as_InlineType()->buffer(kit, false);
 622         }
 623         value = gvn->transform(new EncodePNode(value, val_type));
 624         value = gvn->transform(new CastP2XNode(kit->control(), value));
 625         value = gvn->transform(new ConvL2INode(value));
 626         field_bt = T_INT;
 627       }
 628       payload = set_payload_value(gvn, payload, bt, value, field_bt, offset);
 629     }
 630   }
 631   return payload;
 632 }
 633 
 634 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) const {
 635   ciInlineKlass* vk = inline_klass();
 636   bool do_atomic = atomic;
 637   // With immutable memory, a non-atomic load and an atomic load are the same
 638   if (immutable_memory) {
 639     do_atomic = false;
 640   }
 641   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 642   if (vk->is_naturally_atomic(null_free)) {
 643     do_atomic = false;
 644   }
 645 
 646   if (!do_atomic) {
 647     if (!null_free) {
 648       int nm_offset = vk->null_marker_offset_in_payload();
 649       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 650       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 651       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_null_marker(), TypeInt::BOOL, T_BOOLEAN, decorators);
 652     }
 653     store(kit, base, ptr, immutable_memory, decorators);
 654     return;
 655   }
 656 
 657   // Convert to a payload value <= 64-bit and write atomically.
 658   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
 659   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
 660   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
 661   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
 662   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
 663   BasicType store_bt = vk->atomic_size_to_basic_type(null_free);
 664   Node* payload = (store_bt == T_LONG) ? kit->longcon(0) : kit->intcon(0);
 665   int oop_off_1 = -1;
 666   int oop_off_2 = -1;
 667   payload = convert_to_payload(kit, store_bt, payload, 0, null_free, vk->null_marker_offset_in_payload(), oop_off_1, oop_off_2);
 668   if (!UseG1GC || oop_off_1 == -1) {
 669     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
 670     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
 671     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
 672     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
 673     const Type* val_type = Type::get_const_basic_type(store_bt);
 674     kit->insert_mem_bar(Op_MemBarCPUOrder);
 675     kit->access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, val_type, store_bt, decorators | C2_MISMATCHED, true, this);
 676     kit->insert_mem_bar(Op_MemBarCPUOrder);
 677   } else {
 678     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
 679     assert(UseG1GC, "Unexpected GC");
 680     assert(store_bt == T_LONG, "Unexpected payload type");
 681     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
 682     Node* oop_offset = (oop_off_2 == -1) ? kit->intcon(oop_off_1) : nullptr;
 683     kit->insert_mem_bar(Op_MemBarCPUOrder);
 684     Node* mem = kit->reset_memory();
 685     kit->set_all_memory(mem);
 686     Node* st = kit->gvn().transform(new StoreLSpecialNode(kit->control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
 687     kit->set_memory(st, TypeRawPtr::BOTTOM);
 688     kit->insert_mem_bar(Op_MemBarCPUOrder);
 689   }
 690 }
 691 
 692 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) const {
 693   PhaseGVN& gvn = kit->gvn();
 694   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 695   kit->C->set_flat_accesses();
 696   ciInlineKlass* vk = inline_klass();
 697   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 698 
 699   RegionNode* region = new RegionNode(4);
 700   gvn.set_type(region, Type::CONTROL);
 701   kit->record_for_igvn(region);
 702 
 703   Node* input_memory_state = kit->reset_memory();
 704   kit->set_all_memory(input_memory_state);
 705 
 706   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 707   gvn.set_type(mem, Type::MEMORY);
 708   kit->record_for_igvn(mem);
 709 
 710   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 711   gvn.set_type(io, Type::ABIO);
 712   kit->record_for_igvn(io);
 713 
 714   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 715   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 716 
 717   // Nullable
 718   kit->set_control(kit->IfFalse(iff_null_free));
 719   if (!kit->stopped()) {
 720     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 721     kit->set_all_memory(input_memory_state);
 722     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
 723     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 724     store_flat(kit, cast, ptr, true, false, false, decorators);
 725 
 726     region->init_req(1, kit->control());
 727     mem->set_req(1, kit->reset_memory());
 728     io->set_req(1, kit->i_o());
 729   }
 730 
 731   // Null-free
 732   kit->set_control(kit->IfTrue(iff_null_free));
 733   if (!kit->stopped()) {
 734     kit->set_all_memory(input_memory_state);
 735 
 736     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 737     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 738 
 739     // Atomic
 740     kit->set_control(kit->IfTrue(iff_atomic));
 741     if (!kit->stopped()) {
 742       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 743       kit->set_all_memory(input_memory_state);
 744       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
 745       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 746       store_flat(kit, cast, ptr, true, false, true, decorators);
 747 
 748       region->init_req(2, kit->control());
 749       mem->set_req(2, kit->reset_memory());
 750       io->set_req(2, kit->i_o());
 751     }
 752 
 753     // Non-atomic
 754     kit->set_control(kit->IfFalse(iff_atomic));
 755     if (!kit->stopped()) {
 756       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 757       kit->set_all_memory(input_memory_state);
 758       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
 759       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 760       store_flat(kit, cast, ptr, false, false, true, decorators);
 761 
 762       region->init_req(3, kit->control());
 763       mem->set_req(3, kit->reset_memory());
 764       io->set_req(3, kit->i_o());
 765     }
 766   }
 767 
 768   kit->set_control(gvn.transform(region));
 769   kit->set_all_memory(gvn.transform(mem));
 770   kit->set_i_o(gvn.transform(io));
 771 }
 772 
 773 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 774   // Write field values to memory
 775   ciInlineKlass* vk = inline_klass();
 776   for (uint i = 0; i < field_count(); ++i) {
 777     int field_off = field_offset(i) - vk->payload_offset();
 778     Node* field_val = field_value(i);
 779     bool field_null_free = field_is_null_free(i);
 780     ciType* ft = field_type(i);
 781     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 782     if (field_is_flat(i)) {
 783       // Recursively store the flat inline type field
 784       ciInlineKlass* fvk = ft->as_inline_klass();
 785       // Atomic if nullable or not LooselyConsistentValue
 786       bool atomic = !field_null_free || fvk->must_be_atomic();
 787 
 788       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 789     } else {
 790       // Store field value to memory
 791       BasicType bt = type2field[ft->basic_type()];
 792       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 793       const Type* val_type = Type::get_const_type(ft);
 794       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 795     }
 796   }
 797 }
 798 
 799 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 800   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 801     // Already buffered
 802     return this;
 803   }
 804 
 805   // Check if inline type is already buffered
 806   Node* not_buffered_ctl = kit->top();
 807   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 808   if (not_buffered_ctl->is_top()) {
 809     // Already buffered
 810     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 811     vt->set_is_buffered(kit->gvn());
 812     vt = kit->gvn().transform(vt)->as_InlineType();
 813     if (safe_for_replace) {
 814       kit->replace_in_map(this, vt);
 815     }
 816     return vt;
 817   }
 818   Node* buffered_ctl = kit->control();
 819   kit->set_control(not_buffered_ctl);
 820 
 821   // Inline type is not buffered, check if it is null.
 822   Node* null_ctl = kit->top();
 823   kit->null_check_common(get_null_marker(), T_INT, false, &null_ctl);
 824   bool null_free = null_ctl->is_top();
 825 
 826   RegionNode* region = new RegionNode(4);
 827   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 828 
 829   // InlineType is already buffered
 830   region->init_req(1, buffered_ctl);
 831   oop->init_req(1, not_null_oop);
 832 
 833   // InlineType is null
 834   region->init_req(2, null_ctl);
 835   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 836 
 837   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 838   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 839 
 840   if (!kit->stopped()) {
 841     assert(!is_allocated(&kit->gvn()), "already buffered");
 842     PreserveJVMState pjvms(kit);
 843     ciInlineKlass* vk = inline_klass();
 844     // Allocate and initialize buffer, re-execute on deoptimization.
 845     kit->jvms()->set_bci(kit->bci());
 846     kit->jvms()->set_should_reexecute(true);
 847     kit->kill_dead_locals();
 848     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 849     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 850     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 851     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 852 
 853     // Do not let stores that initialize this buffer be reordered with a subsequent
 854     // store that would make this buffer accessible by other threads.
 855     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 856     assert(alloc != nullptr, "must have an allocation node");
 857     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 858     oop->init_req(3, alloc_oop);
 859     region->init_req(3, kit->control());
 860     io    ->init_req(3, kit->i_o());
 861     mem   ->init_req(3, kit->merged_memory());
 862   }
 863 
 864   // Update GraphKit
 865   kit->set_control(kit->gvn().transform(region));
 866   kit->set_i_o(kit->gvn().transform(io));
 867   kit->set_all_memory(kit->gvn().transform(mem));
 868   kit->record_for_igvn(region);
 869   kit->record_for_igvn(oop);
 870   kit->record_for_igvn(io);
 871   kit->record_for_igvn(mem);
 872 
 873   // Use cloned InlineTypeNode to propagate oop from now on
 874   Node* res_oop = kit->gvn().transform(oop);
 875   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 876   vt->set_oop(kit->gvn(), res_oop);
 877   vt->set_is_buffered(kit->gvn());
 878   vt = kit->gvn().transform(vt)->as_InlineType();
 879   if (safe_for_replace) {
 880     kit->replace_in_map(this, vt);
 881   }
 882   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 883   // Make sure it gets a chance to remove this allocation.
 884   kit->C->set_has_split_ifs(true);
 885   return vt;
 886 }
 887 
 888 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 889   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 890     return true;
 891   }
 892   Node* oop = get_oop();
 893   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 894   return !oop_type->maybe_null();
 895 }
 896 
 897 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 898   ProjNode* pn = call->proj_out_or_null(proj_idx);
 899   if (pn != nullptr) {
 900     C->gvn_replace_by(pn, value);
 901     C->initial_gvn()->hash_delete(pn);
 902     pn->set_req(0, C->top());
 903   }
 904   proj_idx += type2size[bt];
 905 }
 906 
 907 // When a call returns multiple values, it has several result
 908 // projections, one per field. Replacing the result of the call by an
 909 // inline type node (after late inlining) requires that for each result
 910 // projection, we find the corresponding inline type field.
 911 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 912   uint proj_idx = TypeFunc::Parms;
 913   // Replace oop projection
 914   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 915   // Replace field projections
 916   replace_field_projs(C, call, proj_idx);
 917   // Replace null_marker projection
 918   replace_proj(C, call, proj_idx, get_null_marker(), T_BOOLEAN);
 919   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 920 }
 921 
 922 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
 923   for (uint i = 0; i < field_count(); ++i) {
 924     Node* value = field_value(i);
 925     if (field_is_flat(i)) {
 926       InlineTypeNode* vt = value->as_InlineType();
 927       // Replace field projections for flat field
 928       vt->replace_field_projs(C, call, proj_idx);
 929       if (!field_is_null_free(i)) {
 930         // Replace null_marker projection for nullable field
 931         replace_proj(C, call, proj_idx, vt->get_null_marker(), T_BOOLEAN);
 932       }
 933       continue;
 934     }
 935     // Replace projection for field value
 936     replace_proj(C, call, proj_idx, value, field_type(i)->basic_type());
 937   }
 938 }
 939 
 940 Node* InlineTypeNode::allocate_fields(GraphKit* kit) {
 941   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 942   for (uint i = 0; i < field_count(); i++) {
 943      Node* value = field_value(i);
 944      if (field_is_flat(i)) {
 945        // Flat inline type field
 946        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 947      } else if (value->is_InlineType()) {
 948        // Non-flat inline type field
 949        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 950      }
 951   }
 952   vt = kit->gvn().transform(vt)->as_InlineType();
 953   kit->replace_in_map(this, vt);
 954   return vt;
 955 }
 956 
 957 // Replace a buffer allocation by a dominating allocation
 958 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 959   // Remove initializing stores and GC barriers
 960   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 961     Node* use = res->fast_out(i);
 962     if (use->is_AddP()) {
 963       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 964         Node* store = use->fast_out(j)->isa_Store();
 965         if (store != nullptr) {
 966           igvn->rehash_node_delayed(store);
 967           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 968         }
 969       }
 970     } else if (use->Opcode() == Op_CastP2X) {
 971       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 972         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 973         // we store into, as well as the value we are storing. Skip if this is a
 974         // barrier for storing 'res' into another object.
 975         continue;
 976       }
 977       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 978       bs->eliminate_gc_barrier(igvn, use);
 979       --i; --imax;
 980     }
 981   }
 982   igvn->replace_node(res, dom);
 983 }
 984 
 985 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 986   Node* oop = get_oop();
 987   Node* is_buffered = get_is_buffered();
 988 
 989   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 990     InlineTypeNode* vtptr = oop->as_InlineType();
 991     set_oop(*phase, vtptr->get_oop());
 992     set_is_buffered(*phase);
 993     set_null_marker(*phase);
 994     for (uint i = Values; i < vtptr->req(); ++i) {
 995       set_req(i, vtptr->in(i));
 996     }
 997     return this;
 998   }
 999 
1000   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
1001   // allocation because the only case we load from a naked CheckCastPP is when we exit a
1002   // constructor of an inline type and we want to relinquish the larval oop there. This has a
1003   // couple of benefits:
1004   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
1005   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
1006   //   when we try to clone a value object.
1007   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
1008   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
1009   //   future, we can move them around more freely such as hoisting out of loops. This is not true
1010   //   for the old allocation since larval value objects do have unique identities.
1011   Node* base = is_loaded(phase);
1012   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) {
1013     if (oop != base || phase->type(is_buffered) != TypeInt::ONE) {
1014       set_oop(*phase, base);
1015       set_is_buffered(*phase);
1016       return this;
1017     }
1018   }
1019 
1020   if (can_reshape) {
1021     PhaseIterGVN* igvn = phase->is_IterGVN();
1022     if (is_allocated(phase)) {
1023       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1024       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1025       // This can happen with late inlining when we first allocate an inline type argument
1026       // but later decide to inline the call after the callee code also triggered allocation.
1027       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1028         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1029         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1030           // Found a re-allocation
1031           Node* res = alloc->result_cast();
1032           if (res != nullptr && res->is_CheckCastPP()) {
1033             // Replace allocation by oop and unlink AllocateNode
1034             replace_allocation(igvn, res, oop);
1035             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1036             --i; --imax;
1037           }
1038         }
1039       }
1040     }
1041   }
1042 
1043   return nullptr;
1044 }
1045 
1046 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1047   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1048   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1049   vt->set_is_buffered(gvn, false);
1050   vt->set_null_marker(gvn);
1051   return vt;
1052 }
1053 
1054 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
1055   GrowableArray<ciType*> visited;
1056   visited.push(vk);
1057   return make_all_zero_impl(gvn, vk, visited);
1058 }
1059 
1060 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1061   // Create a new InlineTypeNode initialized with all zero
1062   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1063   vt->set_is_buffered(gvn, false);
1064   vt->set_null_marker(gvn);
1065   for (uint i = 0; i < vt->field_count(); ++i) {
1066     ciType* ft = vt->field_type(i);
1067     Node* value = gvn.zerocon(ft->basic_type());
1068     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1069       gvn.C->set_has_circular_inline_type(true);
1070     } else if (ft->is_inlinetype()) {
1071       int old_len = visited.length();
1072       visited.push(ft);
1073       ciInlineKlass* vk = ft->as_inline_klass();
1074       if (vt->field_is_null_free(i)) {
1075         value = make_all_zero_impl(gvn, vk, visited);
1076       } else {
1077         value = make_null_impl(gvn, vk, visited);
1078       }
1079       visited.trunc_to(old_len);
1080     }
1081     vt->set_field_value(i, value);
1082   }
1083   vt = gvn.transform(vt)->as_InlineType();
1084   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1085   return vt;
1086 }
1087 
1088 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1089   const TypeInt* tinit = gvn->type(get_null_marker())->isa_int();
1090   if (tinit == nullptr || !tinit->is_con(1)) {
1091     return false; // May be null
1092   }
1093   for (uint i = 0; i < field_count(); ++i) {
1094     Node* value = field_value(i);
1095     if (field_is_null_free(i)) {
1096       // Null-free value class field must have the all-zero value. If 'flat' is set,
1097       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1098       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) {
1099         return false;
1100       }
1101       continue;
1102     } else if (value->is_InlineType()) {
1103       // Nullable value class field must be null
1104       tinit = gvn->type(value->as_InlineType()->get_null_marker())->isa_int();
1105       if (tinit != nullptr && tinit->is_con(0)) {
1106         continue;
1107       }
1108       return false;
1109     } else if (!gvn->type(value)->is_zero_type()) {
1110       return false;
1111     }
1112   }
1113   return true;
1114 }
1115 
1116 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
1117   GrowableArray<ciType*> visited;
1118   visited.push(vk);
1119   return make_from_oop_impl(kit, oop, vk, visited);
1120 }
1121 
1122 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1123   PhaseGVN& gvn = kit->gvn();
1124 
1125   // Create and initialize an InlineTypeNode by loading all field
1126   // values from a heap-allocated version and also save the oop.
1127   InlineTypeNode* vt = nullptr;
1128 
1129   if (oop->isa_InlineType()) {
1130     return oop->as_InlineType();
1131   }
1132 
1133   if (gvn.type(oop)->maybe_null()) {
1134     // Add a null check because the oop may be null
1135     Node* null_ctl = kit->top();
1136     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1137     if (kit->stopped()) {
1138       // Constant null
1139       kit->set_control(null_ctl);
1140       vt = make_null_impl(gvn, vk, visited);
1141       kit->record_for_igvn(vt);
1142       return vt;
1143     }
1144     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1145     vt->set_is_buffered(gvn);
1146     vt->set_null_marker(gvn);
1147     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
1148     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1149 
1150     if (null_ctl != kit->top()) {
1151       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1152       Node* region = new RegionNode(3);
1153       region->init_req(1, kit->control());
1154       region->init_req(2, null_ctl);
1155       vt = vt->clone_with_phis(&gvn, region, kit->map());
1156       vt->merge_with(&gvn, null_vt, 2, true);
1157       vt->set_oop(gvn, oop);
1158       kit->set_control(gvn.transform(region));
1159     }
1160   } else {
1161     // Oop can never be null
1162     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1163     Node* init_ctl = kit->control();
1164     vt->set_is_buffered(gvn);
1165     vt->set_null_marker(gvn);
1166     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1167     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1168 // TODO 8284443
1169 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1170 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1171   }
1172   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1173   kit->record_for_igvn(vt);
1174   return gvn.transform(vt)->as_InlineType();
1175 }
1176 
1177 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1178                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1179   GrowableArray<ciType*> visited;
1180   visited.push(vk);
1181   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1182 }
1183 
1184 // GraphKit wrapper for the 'make_from_flat' method
1185 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1186                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1187   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1188   PhaseGVN& gvn = kit->gvn();
1189   bool do_atomic = atomic;
1190   // With immutable memory, a non-atomic load and an atomic load are the same
1191   if (immutable_memory) {
1192     do_atomic = false;
1193   }
1194   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1195   if (vk->is_naturally_atomic(null_free)) {
1196     do_atomic = false;
1197   }
1198 
1199   if (!do_atomic) {
1200     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1201     if (!null_free) {
1202       int nm_offset = vk->null_marker_offset_in_payload();
1203       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1204       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1205       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1206       vt->set_req(NullMarker, nm_value);
1207     }
1208 
1209     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1210     return gvn.transform(vt)->as_InlineType();
1211   }
1212 
1213   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1214   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1215   BasicType load_bt = vk->atomic_size_to_basic_type(null_free);
1216   decorators |= C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD;
1217   const Type* val_type = Type::get_const_basic_type(load_bt);
1218   kit->insert_mem_bar(Op_MemBarCPUOrder);
1219   Node* payload = kit->access_load_at(base, ptr, TypeRawPtr::BOTTOM, val_type, load_bt, decorators, kit->control());
1220   kit->insert_mem_bar(Op_MemBarCPUOrder);
1221   vt->convert_from_payload(kit, load_bt, kit->gvn().transform(payload), 0, null_free, trust_null_free_oop);
1222   return gvn.transform(vt)->as_InlineType();
1223 }
1224 
1225 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1226   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1227   PhaseGVN& gvn = kit->gvn();
1228   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1229   kit->C->set_flat_accesses();
1230   InlineTypeNode* vt_nullable = nullptr;
1231   InlineTypeNode* vt_null_free = nullptr;
1232   InlineTypeNode* vt_non_atomic = nullptr;
1233 
1234   RegionNode* region = new RegionNode(4);
1235   gvn.set_type(region, Type::CONTROL);
1236   kit->record_for_igvn(region);
1237 
1238   Node* input_memory_state = kit->reset_memory();
1239   kit->set_all_memory(input_memory_state);
1240 
1241   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1242   gvn.set_type(mem, Type::MEMORY);
1243   kit->record_for_igvn(mem);
1244 
1245   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1246   gvn.set_type(io, Type::ABIO);
1247   kit->record_for_igvn(io);
1248 
1249   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1250   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1251 
1252   // Nullable
1253   kit->set_control(kit->IfFalse(iff_null_free));
1254   if (!kit->stopped()) {
1255     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1256     kit->set_all_memory(input_memory_state);
1257     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
1258     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1259     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1260 
1261     region->init_req(1, kit->control());
1262     mem->set_req(1, kit->reset_memory());
1263     io->set_req(1, kit->i_o());
1264   }
1265 
1266   // Null-free
1267   kit->set_control(kit->IfTrue(iff_null_free));
1268   if (!kit->stopped()) {
1269     kit->set_all_memory(input_memory_state);
1270 
1271     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1272     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1273 
1274     // Atomic
1275     kit->set_control(kit->IfTrue(iff_atomic));
1276     if (!kit->stopped()) {
1277       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1278       kit->set_all_memory(input_memory_state);
1279       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
1280       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1281       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1282 
1283       region->init_req(2, kit->control());
1284       mem->set_req(2, kit->reset_memory());
1285       io->set_req(2, kit->i_o());
1286     }
1287 
1288     // Non-Atomic
1289     kit->set_control(kit->IfFalse(iff_atomic));
1290     if (!kit->stopped()) {
1291       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1292       kit->set_all_memory(input_memory_state);
1293       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
1294       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1295       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1296 
1297       region->init_req(3, kit->control());
1298       mem->set_req(3, kit->reset_memory());
1299       io->set_req(3, kit->i_o());
1300     }
1301   }
1302 
1303   InlineTypeNode* vt = nullptr;
1304   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1305     // All paths are dead
1306     vt = make_null(gvn, vk);
1307   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1308     vt = vt_non_atomic;
1309   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1310     vt = vt_null_free;
1311   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1312     vt = vt_nullable;
1313   }
1314   if (vt != nullptr) {
1315     kit->set_control(kit->gvn().transform(region));
1316     kit->set_all_memory(kit->gvn().transform(mem));
1317     kit->set_i_o(kit->gvn().transform(io));
1318     return vt;
1319   }
1320 
1321   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1322   vt = zero->clone_with_phis(&gvn, region);
1323   if (vt_nullable != nullptr) {
1324     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1325   }
1326   if (vt_null_free != nullptr) {
1327     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1328   }
1329   if (vt_non_atomic != nullptr) {
1330     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1331   }
1332 
1333   kit->set_control(kit->gvn().transform(region));
1334   kit->set_all_memory(kit->gvn().transform(mem));
1335   kit->set_i_o(kit->gvn().transform(io));
1336   return gvn.transform(vt)->as_InlineType();
1337 }
1338 
1339 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1340   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1341   if (!in) {
1342     // Keep track of the oop. The returned inline type might already be buffered.
1343     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1344     vt->set_oop(kit->gvn(), oop);
1345   }
1346   GrowableArray<ciType*> visited;
1347   visited.push(vk);
1348   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1349   return kit->gvn().transform(vt)->as_InlineType();
1350 }
1351 
1352 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1353   if (vk == nullptr) {
1354     vk = inline_klass();
1355   }
1356   for (uint i = 0; i < field_count(); ++i) {
1357     int offset = holder_offset + field_offset(i);
1358     Node* value = field_value(i);
1359     if (value->is_InlineType()) {
1360       InlineTypeNode* vt = value->as_InlineType();
1361       if (vt->type()->inline_klass()->is_empty()) {
1362         continue;
1363       } else if (field_is_flat(i) && vt->is_InlineType()) {
1364         // Check inline type field load recursively
1365         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1366         if (base == nullptr) {
1367           return nullptr;
1368         }
1369         continue;
1370       } else {
1371         value = vt->get_oop();
1372         if (value->Opcode() == Op_CastPP) {
1373           // Skip CastPP
1374           value = value->in(1);
1375         }
1376       }
1377     }
1378     if (value->isa_DecodeN()) {
1379       // Skip DecodeN
1380       value = value->in(1);
1381     }
1382     if (value->isa_Load()) {
1383       // Check if base and offset of field load matches inline type layout
1384       intptr_t loffset = 0;
1385       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1386       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1387         return nullptr;
1388       } else if (base == nullptr) {
1389         // Set base and check if pointer type matches
1390         base = lbase;
1391         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1392         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1393           return nullptr;
1394         }
1395       }
1396     } else {
1397       return nullptr;
1398     }
1399   }
1400   return base;
1401 }
1402 
1403 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1404   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1405   intptr_t bits = tk->get_con();
1406   set_nth_bit(bits, 0);
1407   return gvn.longcon((jlong)bits);
1408 }
1409 
1410 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1411   if (!null_free && in) {
1412     n->init_req(base_input++, get_null_marker());
1413   }
1414   for (uint i = 0; i < field_count(); i++) {
1415     Node* arg = field_value(i);
1416     if (field_is_flat(i)) {
1417       // Flat inline type field
1418       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1419       if (!field_is_null_free(i)) {
1420         assert(field_null_marker_offset(i) != -1, "inconsistency");
1421         n->init_req(base_input++, arg->as_InlineType()->get_null_marker());
1422       }
1423     } else {
1424       if (arg->is_InlineType()) {
1425         // Non-flat inline type field
1426         InlineTypeNode* vt = arg->as_InlineType();
1427         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1428         arg = vt->buffer(kit);
1429       }
1430       // Initialize call/return arguments
1431       n->init_req(base_input++, arg);
1432       if (field_type(i)->size() == 2) {
1433         n->init_req(base_input++, kit->top());
1434       }
1435     }
1436   }
1437   // The last argument is used to pass the null marker to compiled code and not required here.
1438   if (!null_free && !in) {
1439     n->init_req(base_input++, kit->top());
1440   }
1441 }
1442 
1443 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1444   PhaseGVN& gvn = kit->gvn();
1445   Node* null_marker = nullptr;
1446   if (!null_free) {
1447     // Nullable inline type
1448     if (in) {
1449       // Set null marker
1450       if (multi->is_Start()) {
1451         null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1452       } else {
1453         null_marker = multi->as_Call()->in(base_input);
1454       }
1455       set_req(NullMarker, null_marker);
1456       base_input++;
1457     }
1458     // Add a null check to make subsequent loads dependent on
1459     assert(null_check_region == nullptr, "already set");
1460     if (null_marker == nullptr) {
1461       // Will only be initialized below, use dummy node for now
1462       null_marker = new Node(1);
1463       null_marker->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1464       gvn.set_type_bottom(null_marker);
1465     }
1466     Node* null_ctrl = kit->top();
1467     kit->null_check_common(null_marker, T_INT, false, &null_ctrl);
1468     Node* non_null_ctrl = kit->control();
1469     null_check_region = new RegionNode(3);
1470     null_check_region->init_req(1, non_null_ctrl);
1471     null_check_region->init_req(2, null_ctrl);
1472     null_check_region = gvn.transform(null_check_region);
1473     kit->set_control(null_check_region);
1474   }
1475 
1476   for (uint i = 0; i < field_count(); ++i) {
1477     ciType* type = field_type(i);
1478     Node* parm = nullptr;
1479     if (field_is_flat(i)) {
1480       // Flat inline type field
1481       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i));
1482       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1483       if (!field_is_null_free(i)) {
1484         assert(field_null_marker_offset(i) != -1, "inconsistency");
1485         Node* null_marker = nullptr;
1486         if (multi->is_Start()) {
1487           null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1488         } else if (in) {
1489           null_marker = multi->as_Call()->in(base_input);
1490         } else {
1491           null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1492         }
1493         vt->set_req(NullMarker, null_marker);
1494         base_input++;
1495       }
1496       parm = gvn.transform(vt);
1497     } else {
1498       if (multi->is_Start()) {
1499         assert(in, "return from start?");
1500         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1501       } else if (in) {
1502         parm = multi->as_Call()->in(base_input);
1503       } else {
1504         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1505       }
1506       bool null_free = field_is_null_free(i);
1507       // Non-flat inline type field
1508       if (type->is_inlinetype()) {
1509         if (null_check_region != nullptr) {
1510           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1511           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1512           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1513           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1514             parm = parm->as_InlineType()->get_oop();
1515           }
1516           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1517           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1518           parm->set_req(2, kit->zerocon(T_OBJECT));
1519           parm = gvn.transform(parm);
1520           null_free = false;
1521         }
1522         if (visited.contains(type)) {
1523           kit->C->set_has_circular_inline_type(true);
1524         } else if (!parm->is_InlineType()) {
1525           int old_len = visited.length();
1526           visited.push(type);
1527           if (null_free) {
1528             parm = kit->cast_not_null(parm);
1529           }
1530           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1531           visited.trunc_to(old_len);
1532         }
1533       }
1534       base_input += type->size();
1535     }
1536     assert(parm != nullptr, "should never be null");
1537     assert(field_value(i) == nullptr, "already set");
1538     set_field_value(i, parm);
1539     gvn.record_for_igvn(parm);
1540   }
1541   // The last argument is used to pass the null marker to compiled code
1542   if (!null_free && !in) {
1543     Node* cmp = null_marker->raw_out(0);
1544     null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1545     set_req(NullMarker, null_marker);
1546     gvn.hash_delete(cmp);
1547     cmp->set_req(1, null_marker);
1548     gvn.hash_find_insert(cmp);
1549     gvn.record_for_igvn(cmp);
1550     base_input++;
1551   }
1552 }
1553 
1554 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1555 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1556 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1557   PhaseIterGVN* igvn = &phase->igvn();
1558   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1559   // will be removed anyway and changing the memory chain will confuse other optimizations.
1560   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1561     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1562     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1563       Node* res = alloc->result_cast();
1564       if (res == nullptr || !res->is_CheckCastPP()) {
1565         break; // No unique CheckCastPP
1566       }
1567       // Search for a dominating allocation of the same inline type
1568       Node* res_dom = res;
1569       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1570         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1571         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1572           Node* res_other = alloc_other->result_cast();
1573           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1574               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1575             res_dom = res_other;
1576           }
1577         }
1578       }
1579       if (res_dom != res) {
1580         // Replace allocation by dominating one.
1581         replace_allocation(igvn, res, res_dom);
1582         // The result of the dominated allocation is now unused and will be removed
1583         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1584         igvn->_worklist.push(alloc);
1585       }
1586     }
1587   }
1588 }
1589 
1590 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1591   GrowableArray<ciType*> visited;
1592   visited.push(vk);
1593   return make_null_impl(gvn, vk, visited, transform);
1594 }
1595 
1596 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1597   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1598   vt->set_is_buffered(gvn);
1599   vt->set_null_marker(gvn, gvn.intcon(0));
1600   for (uint i = 0; i < vt->field_count(); i++) {
1601     ciType* ft = vt->field_type(i);
1602     Node* value = gvn.zerocon(ft->basic_type());
1603     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1604       gvn.C->set_has_circular_inline_type(true);
1605     } else if (ft->is_inlinetype()) {
1606       int old_len = visited.length();
1607       visited.push(ft);
1608       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1609       visited.trunc_to(old_len);
1610     }
1611     vt->set_field_value(i, value);
1612   }
1613   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1614 }
1615 
1616 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1617   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1618     return clone()->as_InlineType();
1619   }
1620   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1621     if (fast_out(i) != map) {
1622       return clone()->as_InlineType();
1623     }
1624   }
1625   gvn->hash_delete(this);
1626   return this;
1627 }
1628 
1629 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1630   Node* oop = get_oop();
1631   const Type* toop = phase->type(oop);
1632 #ifdef ASSERT
1633   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1634     // We are not allocated (anymore) and should therefore not have an instance id
1635     dump(1);
1636     assert(false, "Unbuffered inline type should not have known instance id");
1637   }
1638 #endif
1639   const Type* t = toop->filter_speculative(_type);
1640   if (t->singleton()) {
1641     // Don't replace InlineType by a constant
1642     t = _type;
1643   }
1644   const Type* tinit = phase->type(in(NullMarker));
1645   if (tinit == Type::TOP) {
1646     return Type::TOP;
1647   }
1648   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1649     t = t->join_speculative(TypePtr::NOTNULL);
1650   }
1651   return t;
1652 }