1 /*
   2  * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/convertnode.hpp"
  34 #include "opto/graphKit.hpp"
  35 #include "opto/inlinetypenode.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/multnode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/opcodes.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/type.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 
  46 // Clones the inline type to handle control flow merges involving multiple inline types.
  47 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  48 // init_with_top: input of phis above the returned InlineTypeNode are initialized to top.
  49 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_non_null, bool init_with_top) {
  50   InlineTypeNode* vt = clone_if_required(gvn, map);
  51   const Type* t = Type::get_const_type(inline_klass());
  52   gvn->set_type(vt, t);
  53   vt->as_InlineType()->set_type(t);
  54 
  55   Node* const top = gvn->C->top();
  56 
  57   // Create a PhiNode for merging the oop values
  58   PhiNode* oop = PhiNode::make(region, init_with_top ? top : vt->get_oop(), t);
  59   gvn->set_type(oop, t);
  60   gvn->record_for_igvn(oop);
  61   vt->set_oop(*gvn, oop);
  62 
  63   // Create a PhiNode for merging the is_buffered values
  64   t = Type::get_const_basic_type(T_BOOLEAN);
  65   Node* is_buffered_node = PhiNode::make(region, init_with_top ? top : vt->get_is_buffered(), t);
  66   gvn->set_type(is_buffered_node, t);
  67   gvn->record_for_igvn(is_buffered_node);
  68   vt->set_req(IsBuffered, is_buffered_node);
  69 
  70   // Create a PhiNode for merging the null_marker values
  71   Node* null_marker_node;
  72   if (is_non_null) {
  73     null_marker_node = gvn->intcon(1);
  74   } else {
  75     t = Type::get_const_basic_type(T_BOOLEAN);
  76     null_marker_node = PhiNode::make(region, init_with_top ? top : vt->get_null_marker(), t);
  77     gvn->set_type(null_marker_node, t);
  78     gvn->record_for_igvn(null_marker_node);
  79   }
  80   vt->set_req(NullMarker, null_marker_node);
  81 
  82   // Create a PhiNode each for merging the field values
  83   for (uint i = 0; i < vt->field_count(); ++i) {
  84     ciType* type = vt->field(i)->type();
  85     Node*  value = vt->field_value(i);
  86     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  87     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  88     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  89     ciField* field = this->field(i);
  90     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
  91     bool no_circularity = !gvn->C->has_circular_inline_type() || field->is_flat();
  92     if (type->is_inlinetype() && no_circularity) {
  93       // Handle inline type fields recursively
  94       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  95     } else {
  96       t = Type::get_const_type(type);
  97       value = PhiNode::make(region, init_with_top ? top : value, t);
  98       gvn->set_type(value, t);
  99       gvn->record_for_igvn(value);
 100     }
 101     vt->set_field_value(i, value);
 102   }
 103   gvn->record_for_igvn(vt);
 104   return vt;
 105 }
 106 
 107 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 108 // for the given region (see InlineTypeNode::clone_with_phis).
 109 bool InlineTypeNode::has_phi_inputs(Node* region) const {
 110   // Check oop input
 111   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 112 #ifdef ASSERT
 113   if (result) {
 114     // Check all field value inputs for consistency
 115     for (uint i = 0; i < field_count(); ++i) {
 116       Node* n = field_value(i);
 117       if (n->is_InlineType()) {
 118         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 119       } else {
 120         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 121       }
 122     }
 123   }
 124 #endif
 125   return result;
 126 }
 127 
 128 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 129 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int phi_index, bool transform) {
 130   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 131 
 132   // Merge oop inputs
 133   PhiNode* phi = get_oop()->as_Phi();
 134   phi->set_req(phi_index, other->get_oop());
 135   if (transform) {
 136     set_oop(*gvn, gvn->transform(phi));
 137   }
 138 
 139   // Merge is_buffered inputs
 140   phi = get_is_buffered()->as_Phi();
 141   phi->set_req(phi_index, other->get_is_buffered());
 142   if (transform) {
 143     set_req(IsBuffered, gvn->transform(phi));
 144   }
 145 
 146   // Merge null_marker inputs
 147   Node* null_marker = get_null_marker();
 148   if (null_marker->is_Phi()) {
 149     phi = null_marker->as_Phi();
 150     phi->set_req(phi_index, other->get_null_marker());
 151     if (transform) {
 152       set_req(NullMarker, gvn->transform(phi));
 153     }
 154   } else {
 155     assert(null_marker->find_int_con(0) == 1, "only with a non null inline type");
 156   }
 157 
 158   // Merge field values
 159   for (uint i = 0; i < field_count(); ++i) {
 160     Node* val1 =        field_value(i);
 161     Node* val2 = other->field_value(i);
 162     if (val1->is_InlineType()) {
 163       if (val2->is_Phi()) {
 164         val2 = gvn->transform(val2);
 165       }
 166       if (val2->is_top()) {
 167         // The path where 'other' is used is dying. Therefore, we do not need to process the merge with 'other' further.
 168         // The phi inputs of 'this' at 'phi_index' will eventually be removed.
 169         break;
 170       }
 171       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), phi_index, transform);
 172     } else {
 173       assert(val1->is_Phi(), "must be a phi node");
 174       val1->set_req(phi_index, val2);
 175     }
 176     if (transform) {
 177       set_field_value(i, gvn->transform(val1));
 178     }
 179   }
 180   return this;
 181 }
 182 
 183 // Adds a new merge path to an inline type node with phi inputs
 184 void InlineTypeNode::add_new_path(Node* region) const {
 185   assert(has_phi_inputs(region), "must have phi inputs");
 186 
 187   PhiNode* phi = get_oop()->as_Phi();
 188   phi->add_req(nullptr);
 189   assert(phi->req() == region->req(), "must be same size as region");
 190 
 191   phi = get_is_buffered()->as_Phi();
 192   phi->add_req(nullptr);
 193   assert(phi->req() == region->req(), "must be same size as region");
 194 
 195   phi = get_null_marker()->as_Phi();
 196   phi->add_req(nullptr);
 197   assert(phi->req() == region->req(), "must be same size as region");
 198 
 199   for (uint i = 0; i < field_count(); ++i) {
 200     Node* val = field_value(i);
 201     if (val->is_InlineType()) {
 202       val->as_InlineType()->add_new_path(region);
 203     } else {
 204       val->as_Phi()->add_req(nullptr);
 205       assert(val->req() == region->req(), "must be same size as region");
 206     }
 207   }
 208 }
 209 
 210 Node* InlineTypeNode::field_value(uint index) const {
 211   assert(index < field_count(), "index out of bounds");
 212   return in(Values + index);
 213 }
 214 
 215 // Get the value of the field at the given offset.
 216 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 217 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 218   // Find the declared field which contains the field we are looking for
 219   int index = inline_klass()->field_index_by_offset(offset);
 220   Node* value = field_value(index);
 221   assert(value != nullptr, "field value not found");
 222   ciField* field = this->field(index);
 223   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 224 
 225   if (!recursive || !field->is_flat() || value->is_top()) {
 226     assert(offset == field->offset_in_bytes(), "offset mismatch");
 227     return value;
 228   }
 229 
 230   // Flat inline type field
 231   InlineTypeNode* vt = value->as_InlineType();
 232   assert(field->is_flat(), "must be flat");
 233   if (offset == field->null_marker_offset()) {
 234     return vt->get_null_marker();
 235   } else {
 236     int sub_offset = offset - field->offset_in_bytes(); // Offset of the flattened field inside the declared field
 237     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 238     return vt->field_value_by_offset(sub_offset, recursive);
 239   }
 240 }
 241 
 242 void InlineTypeNode::set_field_value(uint index, Node* value) {
 243   assert(index < field_count(), "index out of bounds");
 244   set_req(Values + index, value);
 245 }
 246 
 247 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 248   set_field_value(field_index(offset), value);
 249 }
 250 
 251 uint InlineTypeNode::field_index(int offset) const {
 252   uint i = 0;
 253   for (; i < field_count() && field(i)->offset_in_bytes() != offset; i++) { }
 254   assert(i < field_count(), "field not found");
 255   return i;
 256 }
 257 
 258 ciField* InlineTypeNode::field(uint index) const {
 259   assert(index < field_count(), "index out of bounds");
 260   return inline_klass()->declared_nonstatic_field_at(index);
 261 }
 262 
 263 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) const {
 264   uint cnt = 0;
 265   for (uint i = 0; i < field_count(); ++i) {
 266     Node* value = field_value(i);
 267     ciField* field = this->field(i);
 268     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 269     if (field->is_flat()) {
 270       InlineTypeNode* vt = value->as_InlineType();
 271       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 272       if (!field->is_null_free()) {
 273         // The null marker of a flat field is added right after we scalarize that field
 274         sfpt->add_req(vt->get_null_marker());
 275         cnt++;
 276       }
 277       continue;
 278     }
 279     if (value->is_InlineType()) {
 280       // Add inline type to the worklist to process later
 281       worklist.push(value);
 282     }
 283     sfpt->add_req(value);
 284     cnt++;
 285   }
 286   return cnt;
 287 }
 288 
 289 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) const {
 290   JVMState* jvms = sfpt->jvms();
 291   assert(jvms != nullptr, "missing JVMS");
 292   uint first_ind = (sfpt->req() - jvms->scloff());
 293 
 294   // Iterate over the inline type fields in order of increasing offset and add the
 295   // field values to the safepoint. Nullable inline types have a null marker field that
 296   // needs to be checked before using the field values.
 297   sfpt->add_req(get_null_marker());
 298   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 299   jvms->set_endoff(sfpt->req());
 300   // Replace safepoint edge by SafePointScalarObjectNode
 301   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 302                                                                   nullptr,
 303                                                                   first_ind,
 304                                                                   sfpt->jvms()->depth(),
 305                                                                   nfields);
 306   sobj->init_req(0, igvn->C->root());
 307   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 308   igvn->rehash_node_delayed(sfpt);
 309   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 310     Node* debug = sfpt->in(i);
 311     if (debug != nullptr && debug->uncast() == this) {
 312       sfpt->set_req(i, sobj);
 313     }
 314   }
 315 }
 316 
 317 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 318   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 319   // in the safepoint to avoid keeping field loads live just for the debug info.
 320   Node* oop = get_oop();
 321   bool use_oop = false;
 322   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 323     Unique_Node_List worklist;
 324     VectorSet visited;
 325     visited.set(oop->_idx);
 326     worklist.push(oop);
 327     use_oop = true;
 328     while (worklist.size() > 0 && use_oop) {
 329       Node* n = worklist.pop();
 330       for (uint i = 1; i < n->req(); i++) {
 331         Node* in = n->in(i);
 332         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 333           worklist.push(in);
 334         } else if (!(in->is_Con() || in->is_Parm())) {
 335           use_oop = false;
 336           break;
 337         }
 338       }
 339     }
 340   } else {
 341     use_oop = allow_oop && is_allocated(igvn) &&
 342               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 343   }
 344 
 345   ResourceMark rm;
 346   Unique_Node_List safepoints;
 347   Unique_Node_List vt_worklist;
 348   Unique_Node_List worklist;
 349   worklist.push(this);
 350   while (worklist.size() > 0) {
 351     Node* n = worklist.pop();
 352     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 353       Node* use = n->fast_out(i);
 354       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 355         safepoints.push(use);
 356       } else if (use->is_ConstraintCast()) {
 357         worklist.push(use);
 358       }
 359     }
 360   }
 361 
 362   // Process all safepoint uses and scalarize inline type
 363   while (safepoints.size() > 0) {
 364     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 365     if (use_oop) {
 366       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 367         Node* debug = sfpt->in(i);
 368         if (debug != nullptr && debug->uncast() == this) {
 369           sfpt->set_req(i, get_oop());
 370         }
 371       }
 372       igvn->rehash_node_delayed(sfpt);
 373     } else {
 374       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 375     }
 376   }
 377   // Now scalarize non-flat fields
 378   for (uint i = 0; i < vt_worklist.size(); ++i) {
 379     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 380     vt->make_scalar_in_safepoints(igvn);
 381   }
 382   if (outcnt() == 0) {
 383     igvn->record_for_igvn(this);
 384   }
 385 }
 386 
 387 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 388 // of the same type but with different scalarization depth during GVN. This method adjusts the
 389 // scalarization depth to avoid inconsistencies during merging.
 390 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 391   if (!kit->C->has_circular_inline_type()) {
 392     return this;
 393   }
 394   GrowableArray<ciType*> visited;
 395   visited.push(inline_klass());
 396   return adjust_scalarization_depth_impl(kit, visited);
 397 }
 398 
 399 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 400   InlineTypeNode* val = this;
 401   for (uint i = 0; i < field_count(); ++i) {
 402     Node* value = field_value(i);
 403     Node* new_value = value;
 404     ciField* field = this->field(i);
 405     ciType* ft = field->type();
 406     if (value->is_InlineType()) {
 407       assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 408       if (!field->is_flat() && visited.contains(ft)) {
 409         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 410       } else {
 411         int old_len = visited.length();
 412         visited.push(ft);
 413         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 414         visited.trunc_to(old_len);
 415       }
 416     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 417       int old_len = visited.length();
 418       visited.push(ft);
 419       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 420       visited.trunc_to(old_len);
 421     }
 422     if (value != new_value) {
 423       if (val == this) {
 424         val = clone_if_required(&kit->gvn(), kit->map());
 425       }
 426       val->set_field_value(i, new_value);
 427     }
 428   }
 429   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 430 }
 431 
 432 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 433   // Initialize the inline type by loading its field values from
 434   // memory and adding the values as input edges to the node.
 435   ciInlineKlass* vk = inline_klass();
 436   for (uint i = 0; i < field_count(); ++i) {
 437     ciField* field = this->field(i);
 438     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 439     int field_off = field->offset_in_bytes() - vk->payload_offset();
 440     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 441     Node* value = nullptr;
 442     ciType* ft = field->type();
 443     bool field_null_free = field->is_null_free();
 444     if (field->is_flat()) {
 445       // Recursively load the flat inline type field
 446       ciInlineKlass* fvk = ft->as_inline_klass();
 447       bool atomic = field->is_atomic();
 448 
 449       int old_len = visited.length();
 450       visited.push(ft);
 451       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 452                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 453       visited.trunc_to(old_len);
 454     } else {
 455       // Load field value from memory
 456       BasicType bt = type2field[ft->basic_type()];
 457       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 458       const Type* val_type = Type::get_const_type(ft);
 459       if (trust_null_free_oop && field_null_free) {
 460         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 461       }
 462       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 463       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 464       // Loading a non-flattened inline type from memory
 465       if (visited.contains(ft)) {
 466         kit->C->set_has_circular_inline_type(true);
 467       } else if (ft->is_inlinetype()) {
 468         int old_len = visited.length();
 469         visited.push(ft);
 470         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 471         visited.trunc_to(old_len);
 472       }
 473     }
 474     set_field_value(i, value);
 475   }
 476 }
 477 
 478 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
 479   ciInlineKlass* vk = inline_klass();
 480   bool do_atomic = atomic;
 481   // With immutable memory, a non-atomic load and an atomic load are the same
 482   if (immutable_memory) {
 483     do_atomic = false;
 484   }
 485   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 486   if (vk->is_naturally_atomic(null_free)) {
 487     do_atomic = false;
 488   }
 489 
 490   if (!do_atomic) {
 491     if (!null_free) {
 492       int nm_offset = vk->null_marker_offset_in_payload();
 493       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 494       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 495       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_null_marker(), TypeInt::BOOL, T_BOOLEAN, decorators);
 496     }
 497     store(kit, base, ptr, immutable_memory, decorators);
 498     return;
 499   }
 500 
 501   StoreFlatNode::store(kit, base, ptr, this, null_free, decorators);
 502 }
 503 
 504 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) {
 505   PhaseGVN& gvn = kit->gvn();
 506   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 507   kit->C->set_flat_accesses();
 508   ciInlineKlass* vk = inline_klass();
 509   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 510 
 511   RegionNode* region = new RegionNode(4);
 512   gvn.set_type(region, Type::CONTROL);
 513   kit->record_for_igvn(region);
 514 
 515   Node* input_memory_state = kit->reset_memory();
 516   kit->set_all_memory(input_memory_state);
 517 
 518   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 519   gvn.set_type(mem, Type::MEMORY);
 520   kit->record_for_igvn(mem);
 521 
 522   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 523   gvn.set_type(io, Type::ABIO);
 524   kit->record_for_igvn(io);
 525 
 526   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 527   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 528 
 529   // Nullable
 530   kit->set_control(kit->IfFalse(iff_null_free));
 531   if (!kit->stopped()) {
 532     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 533     kit->set_all_memory(input_memory_state);
 534     Node* cast = kit->cast_to_flat_array_exact(base, vk, false, true);
 535     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 536     store_flat(kit, cast, ptr, true, false, false, decorators);
 537 
 538     region->init_req(1, kit->control());
 539     mem->set_req(1, kit->reset_memory());
 540     io->set_req(1, kit->i_o());
 541   }
 542 
 543   // Null-free
 544   kit->set_control(kit->IfTrue(iff_null_free));
 545   if (!kit->stopped()) {
 546     kit->set_all_memory(input_memory_state);
 547 
 548     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 549     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 550 
 551     // Atomic
 552     kit->set_control(kit->IfTrue(iff_atomic));
 553     if (!kit->stopped()) {
 554       assert(vk->has_null_free_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 555       kit->set_all_memory(input_memory_state);
 556       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, true);
 557       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 558       store_flat(kit, cast, ptr, true, false, true, decorators);
 559 
 560       region->init_req(2, kit->control());
 561       mem->set_req(2, kit->reset_memory());
 562       io->set_req(2, kit->i_o());
 563     }
 564 
 565     // Non-atomic
 566     kit->set_control(kit->IfFalse(iff_atomic));
 567     if (!kit->stopped()) {
 568       assert(vk->has_null_free_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 569       kit->set_all_memory(input_memory_state);
 570       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, false);
 571       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 572       store_flat(kit, cast, ptr, false, false, true, decorators);
 573 
 574       region->init_req(3, kit->control());
 575       mem->set_req(3, kit->reset_memory());
 576       io->set_req(3, kit->i_o());
 577     }
 578   }
 579 
 580   kit->set_control(gvn.transform(region));
 581   kit->set_all_memory(gvn.transform(mem));
 582   kit->set_i_o(gvn.transform(io));
 583 }
 584 
 585 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 586   // Write field values to memory
 587   ciInlineKlass* vk = inline_klass();
 588   for (uint i = 0; i < field_count(); ++i) {
 589     ciField* field = this->field(i);
 590     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 591     int field_off = field->offset_in_bytes() - vk->payload_offset();
 592     Node* field_val = field_value(i);
 593     bool field_null_free = field->is_null_free();
 594     ciType* ft = field->type();
 595     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 596     if (field->is_flat()) {
 597       // Recursively store the flat inline type field
 598       bool atomic = field->is_atomic();
 599       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 600     } else {
 601       // Store field value to memory
 602       BasicType bt = type2field[ft->basic_type()];
 603       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 604       const Type* val_type = Type::get_const_type(ft);
 605       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 606     }
 607   }
 608 }
 609 
 610 // Adds a check between val1 and val2. Jumps to 'region' if check passes and optionally sets the corresponding phi input to false.
 611 static void acmp_val_guard(PhaseIterGVN* igvn, RegionNode* region, Node* phi, Node** ctrl, BasicType bt, BoolTest::mask test, Node* val1, Node* val2) {
 612   Node* cmp = nullptr;
 613   switch (bt) {
 614   case T_FLOAT:
 615     val1 = igvn->register_new_node_with_optimizer(new MoveF2INode(val1));
 616     val2 = igvn->register_new_node_with_optimizer(new MoveF2INode(val2));
 617     // Fall-through to the int case
 618   case T_BOOLEAN:
 619   case T_CHAR:
 620   case T_BYTE:
 621   case T_SHORT:
 622   case T_INT:
 623     cmp = igvn->register_new_node_with_optimizer(new CmpINode(val1, val2));
 624     break;
 625   case T_DOUBLE:
 626     val1 = igvn->register_new_node_with_optimizer(new MoveD2LNode(val1));
 627     val2 = igvn->register_new_node_with_optimizer(new MoveD2LNode(val2));
 628     // Fall-through to the long case
 629   case T_LONG:
 630     cmp = igvn->register_new_node_with_optimizer(new CmpLNode(val1, val2));
 631     break;
 632   default:
 633     assert(is_reference_type(bt), "must be");
 634     cmp = igvn->register_new_node_with_optimizer(new CmpPNode(val1, val2));
 635   }
 636   Node* bol = igvn->register_new_node_with_optimizer(new BoolNode(cmp, test));
 637   IfNode* iff = igvn->register_new_node_with_optimizer(new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN))->as_If();
 638   Node* if_f = igvn->register_new_node_with_optimizer(new IfFalseNode(iff));
 639   Node* if_t = igvn->register_new_node_with_optimizer(new IfTrueNode(iff));
 640 
 641   region->add_req(if_t);
 642   if (phi != nullptr) {
 643     phi->add_req(igvn->intcon(0));
 644   }
 645   *ctrl = if_f;
 646 }
 647 
 648 // Check if a substitutability check between 'this' and 'other' can be implemented in IR
 649 bool InlineTypeNode::can_emit_substitutability_check(Node* other) const {
 650   if (other != nullptr && other->is_InlineType() && bottom_type() != other->bottom_type()) {
 651     // Different types, this is dead code because there's a check above that guarantees this.
 652     return false;
 653   }
 654   for (uint i = 0; i < field_count(); i++) {
 655     ciType* ft = field(i)->type();
 656     Node* fv = field_value(i);
 657     if (ft->is_inlinetype() && fv->is_InlineType()) {
 658       // Check recursively
 659       if (!fv->as_InlineType()->can_emit_substitutability_check(nullptr)){
 660         return false;
 661       }
 662     } else if (ft->can_be_inline_klass()) {
 663       // Comparing this field might require (another) substitutability check, bail out
 664       return false;
 665     }
 666   }
 667   return true;
 668 }
 669 
 670 // Emit IR to check substitutability between 'this' (left operand) and the value object referred to by 'other' (right operand).
 671 // Parse-time checks guarantee that both operands have the same type. If 'other' is not an InlineTypeNode, we need to emit loads for the field values.
 672 void InlineTypeNode::check_substitutability(PhaseIterGVN* igvn, RegionNode* region, Node* phi, Node** ctrl, Node* mem, Node* base, Node* other, bool flat) const {
 673   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 674   DecoratorSet decorators = IN_HEAP | MO_UNORDERED | C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD;
 675   MergeMemNode* local_mem = igvn->register_new_node_with_optimizer(MergeMemNode::make(mem))->as_MergeMem();
 676 
 677   ciInlineKlass* vk = inline_klass();
 678   for (uint i = 0; i < field_count(); i++) {
 679     ciField* field = this->field(i);
 680     int field_off = field->offset_in_bytes();
 681     if (flat) {
 682       // Flat access, no header
 683       field_off -= vk->payload_offset();
 684     }
 685     Node* this_field = field_value(i);
 686     ciType* ft = field->type();
 687     BasicType bt = ft->basic_type();
 688 
 689     Node* other_base = base;
 690     Node* other_field = other;
 691 
 692     // Get field value of the other operand
 693     if (other->is_InlineType()) {
 694       other_field = other->as_InlineType()->field_value(i);
 695       other_base = nullptr;
 696     } else {
 697       // 'other' is an oop, compute address of the field
 698       other_field = igvn->register_new_node_with_optimizer(AddPNode::make_with_base(base, other, igvn->MakeConX(field_off)));
 699       if (field->is_flat()) {
 700         // Flat field, load is handled recursively below
 701         assert(this_field->is_InlineType(), "inconsistent field value");
 702       } else {
 703         // Non-flat field, load the field value and update the base because we are now operating on a different object
 704         assert(is_java_primitive(bt) || other_field->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent field type");
 705         C2AccessValuePtr addr(other_field, other_field->bottom_type()->is_ptr());
 706         C2OptAccess access(*igvn, *ctrl, local_mem, decorators, bt, base, addr);
 707         other_field = bs->load_at(access, Type::get_const_type(ft));
 708         other_base = other_field;
 709       }
 710     }
 711 
 712     if (this_field->is_InlineType()) {
 713       RegionNode* done_region = new RegionNode(1);
 714       assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 715       if (!field->is_null_free()) {
 716         // Nullable field, check null marker before accessing the fields
 717         if (field->is_flat()) {
 718           // Flat field, check embedded null marker
 719           Node* null_marker = nullptr;
 720           if (other_field->is_InlineType()) {
 721             // TODO 8350865 Should we add an IGVN optimization to fold null marker loads from InlineTypeNodes?
 722             null_marker = other_field->as_InlineType()->get_null_marker();
 723           } else {
 724             Node* nm_offset = igvn->MakeConX(ft->as_inline_klass()->null_marker_offset_in_payload());
 725             Node* nm_adr = igvn->register_new_node_with_optimizer(AddPNode::make_with_base(base, other_field, nm_offset));
 726             C2AccessValuePtr addr(nm_adr, nm_adr->bottom_type()->is_ptr());
 727             C2OptAccess access(*igvn, *ctrl, local_mem, decorators, T_BOOLEAN, base, addr);
 728             null_marker = bs->load_at(access, TypeInt::BOOL);
 729           }
 730           // Return false if null markers are not equal
 731           acmp_val_guard(igvn, region, phi, ctrl, T_INT, BoolTest::ne, this_field->as_InlineType()->get_null_marker(), null_marker);
 732 
 733           // Null markers are equal. If both operands are null, skip the comparison of the fields.
 734           acmp_val_guard(igvn, done_region, nullptr, ctrl, T_INT, BoolTest::eq, this_field->as_InlineType()->get_null_marker(), igvn->intcon(0));
 735         } else {
 736           // Non-flat field, check if oop is null
 737 
 738           // Check if 'this' is null
 739           RegionNode* not_null_region = new RegionNode(1);
 740           acmp_val_guard(igvn, not_null_region, nullptr, ctrl, T_INT, BoolTest::ne, this_field->as_InlineType()->get_null_marker(), igvn->intcon(0));
 741 
 742           // 'this' is null. If 'other' is non-null, return false.
 743           acmp_val_guard(igvn, region, phi, ctrl, T_OBJECT, BoolTest::ne, other_field, igvn->zerocon(T_OBJECT));
 744 
 745           // Both are null, skip comparing the fields
 746           done_region->add_req(*ctrl);
 747 
 748           // 'this' is not null. If 'other' is null, return false.
 749           *ctrl = igvn->register_new_node_with_optimizer(not_null_region);
 750           acmp_val_guard(igvn, region, phi, ctrl, T_OBJECT, BoolTest::eq, other_field, igvn->zerocon(T_OBJECT));
 751         }
 752       }
 753       // Both operands are non-null, compare all the fields recursively
 754       this_field->as_InlineType()->check_substitutability(igvn, region, phi, ctrl, mem, other_base, other_field, field->is_flat());
 755 
 756       done_region->add_req(*ctrl);
 757       *ctrl = igvn->register_new_node_with_optimizer(done_region);
 758     } else {
 759       assert(!ft->can_be_inline_klass(), "Needs substitutability test");
 760       acmp_val_guard(igvn, region, phi, ctrl, bt, BoolTest::ne, this_field, other_field);
 761     }
 762   }
 763 }
 764 
 765 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 766   if (is_allocated(&kit->gvn())) {
 767     // Already buffered
 768     return this;
 769   }
 770 
 771   // Check if inline type is already buffered
 772   Node* not_buffered_ctl = kit->top();
 773   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 774   if (not_buffered_ctl->is_top()) {
 775     // Already buffered
 776     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 777     vt->set_is_buffered(kit->gvn());
 778     vt = kit->gvn().transform(vt)->as_InlineType();
 779     if (safe_for_replace) {
 780       kit->replace_in_map(this, vt);
 781     }
 782     return vt;
 783   }
 784   Node* buffered_ctl = kit->control();
 785   kit->set_control(not_buffered_ctl);
 786 
 787   // Inline type is not buffered, check if it is null.
 788   Node* null_ctl = kit->top();
 789   kit->null_check_common(get_null_marker(), T_INT, false, &null_ctl);
 790   bool null_free = null_ctl->is_top();
 791 
 792   RegionNode* region = new RegionNode(4);
 793   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 794 
 795   // InlineType is already buffered
 796   region->init_req(1, buffered_ctl);
 797   oop->init_req(1, not_null_oop);
 798 
 799   // InlineType is null
 800   region->init_req(2, null_ctl);
 801   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 802 
 803   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 804   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 805 
 806   if (!kit->stopped()) {
 807     assert(!is_allocated(&kit->gvn()), "already buffered");
 808     PreserveJVMState pjvms(kit);
 809     ciInlineKlass* vk = inline_klass();
 810     // Allocate and initialize buffer, re-execute on deoptimization.
 811     kit->jvms()->set_bci(kit->bci());
 812     kit->jvms()->set_should_reexecute(true);
 813     kit->kill_dead_locals();
 814     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 815     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 816     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 817     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 818 
 819     // Do not let stores that initialize this buffer be reordered with a subsequent
 820     // store that would make this buffer accessible by other threads.
 821     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 822     assert(alloc != nullptr, "must have an allocation node");
 823     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 824     oop->init_req(3, alloc_oop);
 825     region->init_req(3, kit->control());
 826     io    ->init_req(3, kit->i_o());
 827     mem   ->init_req(3, kit->merged_memory());
 828   }
 829 
 830   // Update GraphKit
 831   kit->set_control(kit->gvn().transform(region));
 832   kit->set_i_o(kit->gvn().transform(io));
 833   kit->set_all_memory(kit->gvn().transform(mem));
 834   kit->record_for_igvn(region);
 835   kit->record_for_igvn(oop);
 836   kit->record_for_igvn(io);
 837   kit->record_for_igvn(mem);
 838 
 839   // Use cloned InlineTypeNode to propagate oop from now on
 840   Node* res_oop = kit->gvn().transform(oop);
 841   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 842   vt->set_oop(kit->gvn(), res_oop);
 843   vt->set_is_buffered(kit->gvn());
 844   vt = kit->gvn().transform(vt)->as_InlineType();
 845   kit->record_for_igvn(vt);
 846   if (safe_for_replace) {
 847     kit->replace_in_map(this, vt);
 848   }
 849   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 850   // Make sure it gets a chance to remove this allocation.
 851   kit->C->set_has_split_ifs(true);
 852   return vt;
 853 }
 854 
 855 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 856   if (phase->type(get_is_buffered()) == TypeInt::ONE) {
 857     return true;
 858   }
 859   Node* oop = get_oop();
 860   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 861   return !oop_type->maybe_null();
 862 }
 863 
 864 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 865   ProjNode* pn = call->proj_out_or_null(proj_idx);
 866   if (pn != nullptr) {
 867     C->gvn_replace_by(pn, value);
 868     C->initial_gvn()->hash_delete(pn);
 869     pn->set_req(0, C->top());
 870   }
 871   proj_idx += type2size[bt];
 872 }
 873 
 874 // When a call returns multiple values, it has several result
 875 // projections, one per field. Replacing the result of the call by an
 876 // inline type node (after late inlining) requires that for each result
 877 // projection, we find the corresponding inline type field.
 878 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) const {
 879   uint proj_idx = TypeFunc::Parms;
 880   // Replace oop projection
 881   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 882   // Replace field projections
 883   replace_field_projs(C, call, proj_idx);
 884   // Replace null_marker projection
 885   replace_proj(C, call, proj_idx, get_null_marker(), T_BOOLEAN);
 886   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 887 }
 888 
 889 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) const {
 890   for (uint i = 0; i < field_count(); ++i) {
 891     Node* value = field_value(i);
 892     ciField* field = this->field(i);
 893     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 894     if (field->is_flat()) {
 895       InlineTypeNode* vt = value->as_InlineType();
 896       // Replace field projections for flat field
 897       vt->replace_field_projs(C, call, proj_idx);
 898       if (!field->is_null_free()) {
 899         // Replace null_marker projection for nullable field
 900         replace_proj(C, call, proj_idx, vt->get_null_marker(), T_BOOLEAN);
 901       }
 902       continue;
 903     }
 904     // Replace projection for field value
 905     replace_proj(C, call, proj_idx, value, field->type()->basic_type());
 906   }
 907 }
 908 
 909 InlineTypeNode* InlineTypeNode::allocate_fields(GraphKit* kit) {
 910   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 911   for (uint i = 0; i < field_count(); i++) {
 912     Node* value = field_value(i);
 913     ciField* field = this->field(i);
 914     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 915      if (field->is_flat()) {
 916        // Flat inline type field
 917        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 918      } else if (value->is_InlineType()) {
 919        // Non-flat inline type field
 920        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 921      }
 922   }
 923   vt = kit->gvn().transform(vt)->as_InlineType();
 924   kit->replace_in_map(this, vt);
 925   return vt;
 926 }
 927 
 928 // Replace a buffer allocation by a dominating allocation
 929 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 930   // Remove initializing stores and GC barriers
 931   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 932     Node* use = res->fast_out(i);
 933     if (use->is_AddP()) {
 934       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 935         Node* store = use->fast_out(j)->isa_Store();
 936         if (store != nullptr) {
 937           igvn->rehash_node_delayed(store);
 938           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 939         }
 940       }
 941     } else if (use->Opcode() == Op_CastP2X) {
 942       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 943         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 944         // we store into, as well as the value we are storing. Skip if this is a
 945         // barrier for storing 'res' into another object.
 946         continue;
 947       }
 948       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 949       bs->eliminate_gc_barrier(igvn, use);
 950       --i; --imax;
 951     }
 952   }
 953   igvn->replace_node(res, dom);
 954 }
 955 
 956 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 957   Node* oop = get_oop();
 958   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 959     InlineTypeNode* vtptr = oop->as_InlineType();
 960     set_oop(*phase, vtptr->get_oop());
 961     set_is_buffered(*phase);
 962     set_null_marker(*phase);
 963     for (uint i = Values; i < vtptr->req(); ++i) {
 964       set_req(i, vtptr->in(i));
 965     }
 966     return this;
 967   }
 968 
 969   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
 970   // allocation because the only case we load from a naked CheckCastPP is when we exit a
 971   // constructor of an inline type and we want to relinquish the larval oop there. This has a
 972   // couple of benefits:
 973   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
 974   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
 975   //   when we try to clone a value object.
 976   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
 977   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
 978   //   future, we can move them around more freely such as hoisting out of loops. This is not true
 979   //   for the old allocation since larval value objects do have unique identities.
 980   Node* base = is_loaded(phase);
 981   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && phase->C->allow_macro_nodes() && AllocateNode::Ideal_allocation(base) == nullptr) {
 982     if (oop != base || !is_allocated(phase)) {
 983       set_oop(*phase, base);
 984       set_is_buffered(*phase);
 985       return this;
 986     }
 987   }
 988 
 989   if (can_reshape) {
 990     PhaseIterGVN* igvn = phase->is_IterGVN();
 991     if (is_allocated(phase)) {
 992       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 993       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 994       // This can happen with late inlining when we first allocate an inline type argument
 995       // but later decide to inline the call after the callee code also triggered allocation.
 996       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 997         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 998         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 999           // Found a re-allocation
1000           Node* res = alloc->result_cast();
1001           if (res != nullptr && res->is_CheckCastPP()) {
1002             // Replace allocation by oop and unlink AllocateNode
1003             replace_allocation(igvn, res, oop);
1004             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1005             --i; --imax;
1006           }
1007         }
1008       }
1009     }
1010   }
1011 
1012   return nullptr;
1013 }
1014 
1015 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1016   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1017   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1018   vt->set_is_buffered(gvn, false);
1019   vt->set_null_marker(gvn);
1020   return vt;
1021 }
1022 
1023 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
1024   GrowableArray<ciType*> visited;
1025   visited.push(vk);
1026   return make_all_zero_impl(gvn, vk, visited);
1027 }
1028 
1029 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1030   // Create a new InlineTypeNode initialized with all zero
1031   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1032   vt->set_is_buffered(gvn, false);
1033   vt->set_null_marker(gvn);
1034   for (uint i = 0; i < vt->field_count(); ++i) {
1035     ciField* field = vt->field(i);
1036     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1037     ciType* ft = field->type();
1038     Node* value = gvn.zerocon(ft->basic_type());
1039     if (!field->is_flat() && visited.contains(ft)) {
1040       gvn.C->set_has_circular_inline_type(true);
1041     } else if (ft->is_inlinetype()) {
1042       int old_len = visited.length();
1043       visited.push(ft);
1044       ciInlineKlass* vk_field = ft->as_inline_klass();
1045       if (field->is_null_free()) {
1046         value = make_all_zero_impl(gvn, vk_field, visited);
1047       } else {
1048         value = make_null_impl(gvn, vk_field, visited);
1049       }
1050       visited.trunc_to(old_len);
1051     }
1052     vt->set_field_value(i, value);
1053   }
1054   vt = gvn.transform(vt)->as_InlineType();
1055   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1056   return vt;
1057 }
1058 
1059 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1060   const TypeInt* tinit = gvn->type(get_null_marker())->isa_int();
1061   if (tinit == nullptr || !tinit->is_con(1)) {
1062     return false; // May be null
1063   }
1064   for (uint i = 0; i < field_count(); ++i) {
1065     Node* value = field_value(i);
1066     ciField* field = this->field(i);
1067     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1068     if (field->is_null_free()) {
1069       // Null-free value class field must have the all-zero value. If 'flat' is set,
1070       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1071       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field->is_flat())) {
1072         return false;
1073       }
1074       continue;
1075     } else if (value->is_InlineType()) {
1076       // Nullable value class field must be null
1077       tinit = gvn->type(value->as_InlineType()->get_null_marker())->isa_int();
1078       if (tinit != nullptr && tinit->is_con(0)) {
1079         continue;
1080       }
1081       return false;
1082     } else if (!gvn->type(value)->is_zero_type()) {
1083       return false;
1084     }
1085   }
1086   return true;
1087 }
1088 
1089 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
1090   GrowableArray<ciType*> visited;
1091   visited.push(vk);
1092   return make_from_oop_impl(kit, oop, vk, visited);
1093 }
1094 
1095 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1096   PhaseGVN& gvn = kit->gvn();
1097 
1098   // Create and initialize an InlineTypeNode by loading all field
1099   // values from a heap-allocated version and also save the oop.
1100   InlineTypeNode* vt = oop->isa_InlineType();
1101   if (vt != nullptr) {
1102     return vt;
1103   }
1104 
1105   if (gvn.type(oop)->maybe_null()) {
1106     // Add a null check because the oop may be null
1107     Node* null_ctl = kit->top();
1108     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1109     if (kit->stopped()) {
1110       // Constant null
1111       kit->set_control(null_ctl);
1112       vt = make_null_impl(gvn, vk, visited);
1113       kit->record_for_igvn(vt);
1114       return vt;
1115     }
1116     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1117     vt->set_is_buffered(gvn);
1118     vt->set_null_marker(gvn);
1119     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
1120     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1121 
1122     if (null_ctl != kit->top()) {
1123       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1124       Node* region = new RegionNode(3);
1125       region->init_req(1, kit->control());
1126       region->init_req(2, null_ctl);
1127       vt = vt->clone_with_phis(&gvn, region, kit->map());
1128       vt->merge_with(&gvn, null_vt, 2, true);
1129       vt->set_oop(gvn, oop);
1130       kit->set_control(gvn.transform(region));
1131     }
1132   } else {
1133     // Oop can never be null
1134     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1135     vt->set_is_buffered(gvn);
1136     vt->set_null_marker(gvn);
1137     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1138     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1139   }
1140   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1141   kit->record_for_igvn(vt);
1142   return gvn.transform(vt)->as_InlineType();
1143 }
1144 
1145 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1146                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1147   GrowableArray<ciType*> visited;
1148   visited.push(vk);
1149   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1150 }
1151 
1152 // GraphKit wrapper for the 'make_from_flat' method
1153 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1154                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1155   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1156   PhaseGVN& gvn = kit->gvn();
1157   bool do_atomic = atomic;
1158   // With immutable memory, a non-atomic load and an atomic load are the same
1159   if (immutable_memory) {
1160     do_atomic = false;
1161   }
1162   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1163   if (vk->is_naturally_atomic(null_free)) {
1164     do_atomic = false;
1165   }
1166 
1167   if (!do_atomic) {
1168     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1169     if (!null_free) {
1170       int nm_offset = vk->null_marker_offset_in_payload();
1171       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1172       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1173       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1174       vt->set_req(NullMarker, nm_value);
1175     }
1176 
1177     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1178     return gvn.transform(vt)->as_InlineType();
1179   }
1180 
1181   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1182   return LoadFlatNode::load(kit, vk, base, ptr, null_free, trust_null_free_oop, decorators);
1183 }
1184 
1185 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1186   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1187   PhaseGVN& gvn = kit->gvn();
1188   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1189   kit->C->set_flat_accesses();
1190   InlineTypeNode* vt_nullable = nullptr;
1191   InlineTypeNode* vt_null_free = nullptr;
1192   InlineTypeNode* vt_non_atomic = nullptr;
1193 
1194   RegionNode* region = new RegionNode(4);
1195   gvn.set_type(region, Type::CONTROL);
1196   kit->record_for_igvn(region);
1197 
1198   Node* input_memory_state = kit->reset_memory();
1199   kit->set_all_memory(input_memory_state);
1200 
1201   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1202   gvn.set_type(mem, Type::MEMORY);
1203   kit->record_for_igvn(mem);
1204 
1205   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1206   gvn.set_type(io, Type::ABIO);
1207   kit->record_for_igvn(io);
1208 
1209   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1210   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1211 
1212   // Nullable
1213   kit->set_control(kit->IfFalse(iff_null_free));
1214   if (!kit->stopped()) {
1215     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1216     kit->set_all_memory(input_memory_state);
1217     Node* cast = kit->cast_to_flat_array_exact(base, vk, false, true);
1218     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1219     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1220 
1221     region->init_req(1, kit->control());
1222     mem->set_req(1, kit->reset_memory());
1223     io->set_req(1, kit->i_o());
1224   }
1225 
1226   // Null-free
1227   kit->set_control(kit->IfTrue(iff_null_free));
1228   if (!kit->stopped()) {
1229     kit->set_all_memory(input_memory_state);
1230 
1231     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1232     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1233 
1234     // Atomic
1235     kit->set_control(kit->IfTrue(iff_atomic));
1236     if (!kit->stopped()) {
1237       assert(vk->has_null_free_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1238       kit->set_all_memory(input_memory_state);
1239       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, true);
1240       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1241       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1242 
1243       region->init_req(2, kit->control());
1244       mem->set_req(2, kit->reset_memory());
1245       io->set_req(2, kit->i_o());
1246     }
1247 
1248     // Non-Atomic
1249     kit->set_control(kit->IfFalse(iff_atomic));
1250     if (!kit->stopped()) {
1251       assert(vk->has_null_free_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1252       kit->set_all_memory(input_memory_state);
1253       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, false);
1254       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1255       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1256 
1257       region->init_req(3, kit->control());
1258       mem->set_req(3, kit->reset_memory());
1259       io->set_req(3, kit->i_o());
1260     }
1261   }
1262 
1263   InlineTypeNode* vt = nullptr;
1264   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1265     // All paths are dead
1266     vt = make_null(gvn, vk);
1267   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1268     vt = vt_non_atomic;
1269   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1270     vt = vt_null_free;
1271   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1272     vt = vt_nullable;
1273   }
1274   if (vt != nullptr) {
1275     kit->set_control(kit->gvn().transform(region));
1276     kit->set_all_memory(kit->gvn().transform(mem));
1277     kit->set_i_o(kit->gvn().transform(io));
1278     return vt;
1279   }
1280 
1281   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1282   vt = zero->clone_with_phis(&gvn, region);
1283   if (vt_nullable != nullptr) {
1284     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1285   }
1286   if (vt_null_free != nullptr) {
1287     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1288   }
1289   if (vt_non_atomic != nullptr) {
1290     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1291   }
1292 
1293   kit->set_control(kit->gvn().transform(region));
1294   kit->set_all_memory(kit->gvn().transform(mem));
1295   kit->set_i_o(kit->gvn().transform(io));
1296   return gvn.transform(vt)->as_InlineType();
1297 }
1298 
1299 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1300   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1301   if (!in || multi->is_Start()) {
1302     // Keep track of the oop. The inline type might already be buffered.
1303     Node* oop = nullptr;
1304     if (multi->is_Start()) {
1305       oop = kit->gvn().transform(new ParmNode(multi->as_Start(), base_input++));
1306     } else {
1307       oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1308     }
1309     vt->set_oop(kit->gvn(), oop);
1310   } else {
1311     Node* oop = multi->as_Call()->in(base_input++);
1312     vt->set_oop(kit->gvn(), oop);
1313   }
1314   GrowableArray<ciType*> visited;
1315   visited.push(vk);
1316   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1317   return kit->gvn().transform(vt)->as_InlineType();
1318 }
1319 
1320 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) const {
1321   if (vk == nullptr) {
1322     vk = inline_klass();
1323   }
1324   for (uint i = 0; i < field_count(); ++i) {
1325     ciField* field = this->field(i);
1326     int offset = holder_offset + field->offset_in_bytes();
1327     Node* value = field_value(i);
1328     if (value->is_InlineType()) {
1329       assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1330       InlineTypeNode* vt = value->as_InlineType();
1331       if (vt->type()->inline_klass()->is_empty()) {
1332         continue;
1333       } else if (field->is_flat() && vt->is_InlineType()) {
1334         // Check inline type field load recursively
1335         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1336         if (base == nullptr) {
1337           return nullptr;
1338         }
1339         continue;
1340       } else {
1341         value = vt->get_oop();
1342         if (value->Opcode() == Op_CastPP) {
1343           // Skip CastPP
1344           value = value->in(1);
1345         }
1346       }
1347     }
1348     if (value->isa_DecodeN()) {
1349       // Skip DecodeN
1350       value = value->in(1);
1351     }
1352     if (value->isa_Load()) {
1353       // Check if base and offset of field load matches inline type layout
1354       intptr_t loffset = 0;
1355       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1356       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1357         return nullptr;
1358       } else if (base == nullptr) {
1359         // Set base and check if pointer type matches
1360         base = lbase;
1361         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1362         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1363           return nullptr;
1364         }
1365       }
1366     } else {
1367       return nullptr;
1368     }
1369   }
1370   return base;
1371 }
1372 
1373 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1374   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1375   intptr_t bits = tk->get_con();
1376   set_nth_bit(bits, 0);
1377   return gvn.longcon((jlong)bits);
1378 }
1379 
1380 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free, bool root) {
1381   if (root) {
1382     if (is_allocated(&kit->gvn())) {
1383       // Keep the information that 'this' is buffered
1384       n->init_req(base_input++, this);
1385     } else {
1386       n->init_req(base_input++, get_oop());
1387     }
1388   }
1389   if (!null_free && in) {
1390     n->init_req(base_input++, get_null_marker());
1391   }
1392   for (uint i = 0; i < field_count(); i++) {
1393     Node* arg = field_value(i);
1394     ciField* field = this->field(i);
1395     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1396     if (field->is_flat()) {
1397       // Flat inline type field
1398       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1399       if (!field->is_null_free()) {
1400         assert(field->null_marker_offset() != -1, "inconsistency");
1401         n->init_req(base_input++, arg->as_InlineType()->get_null_marker());
1402       }
1403     } else {
1404       if (arg->is_InlineType()) {
1405         // Non-flat inline type field
1406         InlineTypeNode* vt = arg->as_InlineType();
1407         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1408         arg = vt->buffer(kit);
1409       }
1410       // Initialize call/return arguments
1411       n->init_req(base_input++, arg);
1412       if (field->type()->size() == 2) {
1413         n->init_req(base_input++, kit->top());
1414       }
1415     }
1416   }
1417 }
1418 
1419 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool no_null_marker, Node* null_check_region, GrowableArray<ciType*>& visited) {
1420   PhaseGVN& gvn = kit->gvn();
1421   Node* null_marker = nullptr;
1422   if (!no_null_marker) {
1423     // Nullable inline type
1424     if (in) {
1425       // Set null marker
1426       if (multi->is_Start()) {
1427         null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1428       } else {
1429         null_marker = multi->as_Call()->in(base_input);
1430       }
1431       set_req(NullMarker, null_marker);
1432       base_input++;
1433     }
1434     // Add a null check to make subsequent loads dependent on
1435     assert(null_check_region == nullptr, "already set");
1436     if (null_marker == nullptr) {
1437       // Will only be initialized below, use dummy node for now
1438       null_marker = new Node(1);
1439       null_marker->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1440       gvn.set_type_bottom(null_marker);
1441     }
1442     Node* null_ctrl = kit->top();
1443     kit->null_check_common(null_marker, T_INT, false, &null_ctrl);
1444     Node* non_null_ctrl = kit->control();
1445     null_check_region = new RegionNode(3);
1446     null_check_region->init_req(1, non_null_ctrl);
1447     null_check_region->init_req(2, null_ctrl);
1448     null_check_region = gvn.transform(null_check_region);
1449     kit->set_control(null_check_region);
1450   }
1451 
1452   for (uint i = 0; i < field_count(); ++i) {
1453     ciField* field = this->field(i);
1454     ciType* type = field->type();
1455     Node* parm = nullptr;
1456     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1457     if (field->is_flat()) {
1458       // Flat inline type field
1459       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field->is_null_free());
1460       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1461       if (!field->is_null_free()) {
1462         assert(field->null_marker_offset() != -1, "inconsistency");
1463         Node* null_marker_field_vt = nullptr;
1464         if (multi->is_Start()) {
1465           null_marker_field_vt = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1466         } else if (in) {
1467           null_marker_field_vt = multi->as_Call()->in(base_input);
1468         } else {
1469           null_marker_field_vt = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1470         }
1471         vt->set_req(NullMarker, null_marker_field_vt);
1472         base_input++;
1473       }
1474       parm = gvn.transform(vt);
1475     } else {
1476       if (multi->is_Start()) {
1477         assert(in, "return from start?");
1478         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1479       } else if (in) {
1480         parm = multi->as_Call()->in(base_input);
1481       } else {
1482         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1483       }
1484       // Non-flat inline type field
1485       if (type->is_inlinetype()) {
1486         if (null_check_region != nullptr) {
1487           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1488           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1489           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1490           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1491             parm = parm->as_InlineType()->get_oop();
1492           }
1493           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1494           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1495           parm->set_req(2, kit->zerocon(T_OBJECT));
1496           parm = gvn.transform(parm);
1497         }
1498         if (visited.contains(type)) {
1499           kit->C->set_has_circular_inline_type(true);
1500         } else if (!parm->is_InlineType()) {
1501           int old_len = visited.length();
1502           visited.push(type);
1503           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1504           visited.trunc_to(old_len);
1505         }
1506       }
1507       base_input += type->size();
1508     }
1509     assert(parm != nullptr, "should never be null");
1510     assert(field_value(i) == nullptr, "already set");
1511     set_field_value(i, parm);
1512     gvn.record_for_igvn(parm);
1513   }
1514   // The last argument is used to pass the null marker to compiled code
1515   if (!no_null_marker && !in) {
1516     Node* cmp = null_marker->raw_out(0);
1517     null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1518     set_req(NullMarker, null_marker);
1519     gvn.hash_delete(cmp);
1520     cmp->set_req(1, null_marker);
1521     gvn.hash_find_insert(cmp);
1522     gvn.record_for_igvn(cmp);
1523     base_input++;
1524   }
1525 }
1526 
1527 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1528 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1529 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) const {
1530   PhaseIterGVN* igvn = &phase->igvn();
1531   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1532   // will be removed anyway and changing the memory chain will confuse other optimizations.
1533   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1534     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1535     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1536       Node* res = alloc->result_cast();
1537       if (res == nullptr || !res->is_CheckCastPP()) {
1538         break; // No unique CheckCastPP
1539       }
1540       // Search for a dominating allocation of the same inline type
1541       Node* res_dom = res;
1542       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1543         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1544         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1545           Node* res_other = alloc_other->result_cast();
1546           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1547               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1548             res_dom = res_other;
1549           }
1550         }
1551       }
1552       if (res_dom != res) {
1553         // Replace allocation by dominating one.
1554         replace_allocation(igvn, res, res_dom);
1555         // The result of the dominated allocation is now unused and will be removed
1556         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1557         igvn->_worklist.push(alloc);
1558       }
1559     }
1560   }
1561 }
1562 
1563 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1564   GrowableArray<ciType*> visited;
1565   visited.push(vk);
1566   return make_null_impl(gvn, vk, visited, transform);
1567 }
1568 
1569 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1570   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1571   vt->set_is_buffered(gvn);
1572   vt->set_null_marker(gvn, gvn.intcon(0));
1573   for (uint i = 0; i < vt->field_count(); i++) {
1574     ciField* field = vt->field(i);
1575     ciType* ft = field->type();
1576     Node* value = gvn.zerocon(ft->basic_type());
1577     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1578     if (!field->is_flat() && visited.contains(ft)) {
1579       gvn.C->set_has_circular_inline_type(true);
1580     } else if (ft->is_inlinetype()) {
1581       int old_len = visited.length();
1582       visited.push(ft);
1583       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1584       visited.trunc_to(old_len);
1585     }
1586     vt->set_field_value(i, value);
1587   }
1588   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1589 }
1590 
1591 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1592   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1593     return clone()->as_InlineType();
1594   }
1595   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1596     if (fast_out(i) != map) {
1597       return clone()->as_InlineType();
1598     }
1599   }
1600   gvn->hash_delete(this);
1601   return this;
1602 }
1603 
1604 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1605   Node* oop = get_oop();
1606   const Type* toop = phase->type(oop);
1607 #ifdef ASSERT
1608   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1609     // We are not allocated (anymore) and should therefore not have an instance id
1610     dump(1);
1611     assert(false, "Unbuffered inline type should not have known instance id");
1612   }
1613 #endif
1614   if (toop == Type::TOP) {
1615     return Type::TOP;
1616   }
1617   const Type* t = toop->filter_speculative(_type);
1618   // Because of contradicting type profiling, we can end up with top as speculative type,
1619   // which would then get removed by cleanup_speculative. In this case we have to run filter_speculative
1620   // again, otherwise we would break the idempotence of Value
1621   if (t->speculative() == nullptr && toop->speculative() != nullptr) {
1622     t = toop->filter_speculative(t);
1623   }
1624   if (t->singleton()) {
1625     // Don't replace InlineType by a constant
1626     t = _type;
1627   }
1628   const Type* tinit = phase->type(in(NullMarker));
1629   if (tinit == Type::TOP) {
1630     return Type::TOP;
1631   }
1632   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1633     t = t->join_speculative(TypePtr::NOTNULL);
1634   }
1635   return t;
1636 }
1637 
1638 InlineTypeNode* LoadFlatNode::load(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool null_free, bool trust_null_free_oop, DecoratorSet decorators) {
1639   int output_type_size = vk->nof_nonstatic_fields() + (null_free ? 0 : 1);
1640   const Type** output_types = TypeTuple::fields(output_type_size);
1641   collect_field_types(vk, output_types + TypeFunc::Parms, 0, output_type_size, null_free, trust_null_free_oop);
1642   const TypeTuple* type = TypeTuple::make(output_type_size + TypeFunc::Parms, output_types);
1643 
1644   LoadFlatNode* load = new LoadFlatNode(vk, type, null_free, decorators);
1645   load->init_req(TypeFunc::Control, kit->control());
1646   load->init_req(TypeFunc::I_O, kit->top());
1647   load->init_req(TypeFunc::Memory, kit->reset_memory());
1648   load->init_req(TypeFunc::FramePtr, kit->frameptr());
1649   load->init_req(TypeFunc::ReturnAdr, kit->top());
1650 
1651   load->init_req(TypeFunc::Parms, base);
1652   load->init_req(TypeFunc::Parms + 1, ptr);
1653   kit->kill_dead_locals();
1654   kit->add_safepoint_edges(load);
1655   load = kit->gvn().transform(load)->as_LoadFlat();
1656   kit->record_for_igvn(load);
1657 
1658   kit->set_control(kit->gvn().transform(new ProjNode(load, TypeFunc::Control)));
1659   kit->set_all_memory(kit->gvn().transform(new ProjNode(load, TypeFunc::Memory)));
1660   return load->collect_projs(kit, vk, TypeFunc::Parms, null_free);
1661 }
1662 
1663 bool LoadFlatNode::expand_constant(PhaseIterGVN& igvn, ciInstance* inst) const {
1664   precond(inst != nullptr);
1665   assert(igvn.delay_transform(), "transformation must be delayed");
1666   if ((_decorators & C2_MISMATCHED) != 0) {
1667     return false;
1668   }
1669 
1670   GraphKit kit(this, igvn);
1671   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1672     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1673     if (proj_out == nullptr) {
1674       continue;
1675     }
1676 
1677     ciField* field = _vk->nonstatic_field_at(i);
1678     BasicType bt = field->type()->basic_type();
1679     if (inst == nullptr) {
1680       Node* cst_node = igvn.zerocon(bt);
1681       igvn.replace_node(proj_out, cst_node);
1682     } else {
1683       bool is_unsigned_load = bt == T_BOOLEAN || bt == T_CHAR;
1684       const Type* cst_type = Type::make_constant_from_field(field, inst, bt, is_unsigned_load);
1685       Node* cst_node = igvn.makecon(cst_type);
1686       igvn.replace_node(proj_out, cst_node);
1687     }
1688   }
1689 
1690   if (!_null_free) {
1691     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1692     if (proj_out != nullptr) {
1693       igvn.replace_node(proj_out, igvn.intcon(1));
1694     }
1695   }
1696 
1697   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1698   if (old_ctrl != nullptr) {
1699     igvn.replace_node(old_ctrl, kit.control());
1700   }
1701   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1702   Node* new_mem = kit.reset_memory();
1703   if (old_mem != nullptr) {
1704     igvn.replace_node(old_mem, new_mem);
1705   }
1706   return true;
1707 }
1708 
1709 bool LoadFlatNode::expand_non_atomic(PhaseIterGVN& igvn) const {
1710   assert(igvn.delay_transform(), "transformation must be delayed");
1711   if ((_decorators & C2_MISMATCHED) != 0) {
1712     return false;
1713   }
1714 
1715   GraphKit kit(this, igvn);
1716   Node* base = this->base();
1717   Node* ptr = this->ptr();
1718 
1719   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1720     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1721     if (proj_out == nullptr) {
1722       continue;
1723     }
1724 
1725     ciField* field = _vk->nonstatic_field_at(i);
1726     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - _vk->payload_offset());
1727     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1728     igvn.set_type(field_ptr, field_ptr_type);
1729 
1730     Node* field_value = kit.access_load_at(base, field_ptr, field_ptr_type, igvn.type(proj_out), field->type()->basic_type(), _decorators);
1731     igvn.replace_node(proj_out, field_value);
1732   }
1733 
1734   if (!_null_free) {
1735     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1736     if (proj_out != nullptr) {
1737       Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, _vk->null_marker_offset_in_payload());
1738       const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1739       igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1740       Node* null_marker_value = kit.access_load_at(base, null_marker_ptr, null_marker_ptr_type, TypeInt::BOOL, T_BOOLEAN, _decorators);
1741       igvn.replace_node(proj_out, null_marker_value);
1742     }
1743   }
1744 
1745   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1746   if (old_ctrl != nullptr) {
1747     igvn.replace_node(old_ctrl, kit.control());
1748   }
1749   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1750   Node* new_mem = kit.reset_memory();
1751   if (old_mem != nullptr) {
1752     igvn.replace_node(old_mem, new_mem);
1753   }
1754   return true;
1755 }
1756 
1757 void LoadFlatNode::expand_atomic(PhaseIterGVN& igvn) const {
1758   assert(igvn.delay_transform(), "transformation must be delayed");
1759   GraphKit kit(this, igvn);
1760   Node* base = this->base();
1761   Node* ptr = this->ptr();
1762 
1763   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1764   kit.insert_mem_bar(Op_MemBarCPUOrder);
1765   Node* payload = kit.access_load_at(base, ptr, TypeRawPtr::BOTTOM, Type::get_const_basic_type(payload_bt), payload_bt,
1766                                      _decorators | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD, kit.control());
1767   kit.insert_mem_bar(Op_MemBarCPUOrder);
1768 
1769   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1770   if (old_ctrl != nullptr) {
1771     igvn.replace_node(old_ctrl, kit.control());
1772   }
1773   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1774   Node* new_mem = kit.reset_memory();
1775   if (old_mem != nullptr) {
1776     igvn.replace_node(old_mem, new_mem);
1777   }
1778 
1779   expand_projs_atomic(igvn, kit.control(), payload);
1780 }
1781 
1782 void LoadFlatNode::collect_field_types(ciInlineKlass* vk, const Type** field_types, int idx, int limit, bool null_free, bool trust_null_free_oop) {
1783   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1784   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1785     ciField* field = vk->declared_nonstatic_field_at(i);
1786     if (field->is_flat()) {
1787       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1788       collect_field_types(field_klass, field_types, idx, limit, field->is_null_free(), trust_null_free_oop && field->is_null_free());
1789       idx += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1790       continue;
1791     }
1792 
1793     const Type* field_type = Type::get_const_type(field->type());
1794     if (trust_null_free_oop && field->is_null_free()) {
1795       field_type = field_type->filter(TypePtr::NOTNULL);
1796     }
1797 
1798     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1799     field_types[idx] = field_type;
1800     idx++;
1801   }
1802 
1803   if (!null_free) {
1804     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1805     field_types[idx] = TypeInt::BOOL;
1806   }
1807 }
1808 
1809 // Create an InlineTypeNode from a LoadFlatNode with its fields being extracted from the
1810 // LoadFlatNode
1811 InlineTypeNode* LoadFlatNode::collect_projs(GraphKit* kit, ciInlineKlass* vk, int proj_con, bool null_free) {
1812   PhaseGVN& gvn = kit->gvn();
1813   InlineTypeNode* res = InlineTypeNode::make_uninitialized(gvn, vk, null_free);
1814   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1815     ciField* field = vk->declared_nonstatic_field_at(i);
1816     Node* field_value;
1817     if (field->is_flat()) {
1818       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1819       field_value = collect_projs(kit, field_klass, proj_con, field->is_null_free());
1820       proj_con += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1821     } else {
1822       field_value = gvn.transform(new ProjNode(this, proj_con));
1823       if (field->type()->is_inlinetype()) {
1824         field_value = InlineTypeNode::make_from_oop(kit, field_value, field->type()->as_inline_klass());
1825       }
1826       proj_con++;
1827     }
1828     res->set_field_value(i, field_value);
1829   }
1830 
1831   if (null_free) {
1832     res->set_null_marker(gvn);
1833   } else {
1834     res->set_null_marker(gvn, gvn.transform(new ProjNode(this, proj_con)));
1835   }
1836   return gvn.transform(res)->as_InlineType();
1837 }
1838 
1839 // Extract the values of the flattened fields from the loaded payload
1840 void LoadFlatNode::expand_projs_atomic(PhaseIterGVN& igvn, Node* ctrl, Node* payload) const {
1841   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1842   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1843     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1844     if (proj_out == nullptr) {
1845       continue;
1846     }
1847 
1848     ciField* field = _vk->nonstatic_field_at(i);
1849     int field_offset = field->offset_in_bytes() - _vk->payload_offset();
1850     const Type* field_type = igvn.type(proj_out);
1851     Node* field_value = get_payload_value(igvn, ctrl, payload_bt, payload, field_type, field->type()->basic_type(), field_offset);
1852     igvn.replace_node(proj_out, field_value);
1853   }
1854 
1855   if (!_null_free) {
1856     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1857     if (proj_out == nullptr) {
1858       return;
1859     }
1860 
1861     int null_marker_offset = _vk->null_marker_offset_in_payload();
1862     Node* null_marker_value = get_payload_value(igvn, ctrl, payload_bt, payload, TypeInt::BOOL, T_BOOLEAN, null_marker_offset);
1863     igvn.replace_node(proj_out, null_marker_value);
1864   }
1865 }
1866 
1867 Node* LoadFlatNode::get_payload_value(PhaseIterGVN& igvn, Node* ctrl, BasicType payload_bt, Node* payload, const Type* value_type, BasicType value_bt, int offset) {
1868   assert((offset + type2aelembytes(value_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
1869   Node* value = nullptr;
1870   // Shift to the right position in the long value
1871   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
1872   if (payload_bt == T_LONG) {
1873     value = igvn.transform(new URShiftLNode(payload, shift_val));
1874     value = igvn.transform(new ConvL2INode(value));
1875   } else {
1876     value = igvn.transform(new URShiftINode(payload, shift_val));
1877   }
1878 
1879   if (value_bt == T_INT) {
1880     return value;
1881   } else if (!is_java_primitive(value_bt)) {
1882     assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1883     value = igvn.transform(new CastI2NNode(ctrl, value, value_type->make_narrowoop()));
1884     value = igvn.transform(new DecodeNNode(value, value_type));
1885 
1886     // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
1887     // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
1888     // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
1889     // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
1890     return value;
1891   } else {
1892     // Make sure to zero unused bits in the 32-bit value
1893     return Compile::narrow_value(value_bt, value, nullptr, &igvn, true);
1894   }
1895 }
1896 
1897 void StoreFlatNode::store(GraphKit* kit, Node* base, Node* ptr, InlineTypeNode* value, bool null_free, DecoratorSet decorators) {
1898   value = value->allocate_fields(kit);
1899   StoreFlatNode* store = new StoreFlatNode(null_free, decorators);
1900   store->init_req(TypeFunc::Control, kit->control());
1901   store->init_req(TypeFunc::I_O, kit->top());
1902   store->init_req(TypeFunc::Memory, kit->reset_memory());
1903   store->init_req(TypeFunc::FramePtr, kit->frameptr());
1904   store->init_req(TypeFunc::ReturnAdr, kit->top());
1905 
1906   store->init_req(TypeFunc::Parms, base);
1907   store->init_req(TypeFunc::Parms + 1, ptr);
1908   store->init_req(TypeFunc::Parms + 2, value);
1909   kit->kill_dead_locals();
1910   kit->add_safepoint_edges(store);
1911   store = kit->gvn().transform(store)->as_StoreFlat();
1912   kit->record_for_igvn(store);
1913 
1914   kit->set_control(kit->gvn().transform(new ProjNode(store, TypeFunc::Control)));
1915   kit->set_all_memory(kit->gvn().transform(new ProjNode(store, TypeFunc::Memory)));
1916 }
1917 
1918 bool StoreFlatNode::expand_non_atomic(PhaseIterGVN& igvn) const {
1919   assert(igvn.delay_transform(), "transformation must be delayed");
1920   if ((_decorators & C2_MISMATCHED) != 0) {
1921     return false;
1922   }
1923 
1924   GraphKit kit(this, igvn);
1925   Node* base = this->base();
1926   Node* ptr = this->ptr();
1927   InlineTypeNode* value = this->value();
1928 
1929   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1930   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1931     ciField* field = vk->nonstatic_field_at(i);
1932     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - vk->payload_offset());
1933     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1934     igvn.set_type(field_ptr, field_ptr_type);
1935     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1936     kit.access_store_at(base, field_ptr, field_ptr_type, field_value, igvn.type(field_value), field->type()->basic_type(), _decorators);
1937   }
1938 
1939   if (!_null_free) {
1940     Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, vk->null_marker_offset_in_payload());
1941     const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1942     igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1943     Node* null_marker_value = value->get_null_marker();
1944     kit.access_store_at(base, null_marker_ptr, null_marker_ptr_type, null_marker_value, TypeInt::BOOL, T_BOOLEAN, _decorators);
1945   }
1946 
1947   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1948   if (old_ctrl != nullptr) {
1949     igvn.replace_node(old_ctrl, kit.control());
1950   }
1951   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1952   Node* new_mem = kit.reset_memory();
1953   if (old_mem != nullptr) {
1954     igvn.replace_node(old_mem, new_mem);
1955   }
1956   return true;
1957 }
1958 
1959 void StoreFlatNode::expand_atomic(PhaseIterGVN& igvn) const {
1960   // Convert to a payload value <= 64-bit and write atomically.
1961   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
1962   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
1963   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
1964   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
1965   assert(igvn.delay_transform(), "transformation must be delayed");
1966   GraphKit kit(this, igvn);
1967   Node* base = this->base();
1968   Node* ptr = this->ptr();
1969   InlineTypeNode* value = this->value();
1970 
1971   int oop_off_1 = -1;
1972   int oop_off_2 = -1;
1973   Node* payload = convert_to_payload(igvn, kit.control(), value, _null_free, oop_off_1, oop_off_2);
1974 
1975   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1976   assert(oop_off_1 == -1 || oop_off_1 == 0 || oop_off_1 == 4, "invalid layout for %s, first oop at offset %d", vk->name()->as_utf8(), oop_off_1);
1977   assert(oop_off_2 == -1 || oop_off_2 == 4, "invalid layout for %s, second oop at offset %d", vk->name()->as_utf8(), oop_off_2);
1978   BasicType payload_bt = vk->atomic_size_to_basic_type(_null_free);
1979   kit.insert_mem_bar(Op_MemBarCPUOrder);
1980   if (!UseG1GC || oop_off_1 == -1) {
1981     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
1982     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
1983     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1984     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
1985     kit.access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, Type::get_const_basic_type(payload_bt), payload_bt, _decorators | C2_MISMATCHED, true, value);
1986   } else {
1987     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
1988     assert(UseG1GC, "Unexpected GC");
1989     assert(payload_bt == T_LONG, "Unexpected payload type");
1990     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
1991     Node* oop_offset = (oop_off_2 == -1) ? igvn.intcon(oop_off_1) : nullptr;
1992     Node* mem = kit.reset_memory();
1993     kit.set_all_memory(mem);
1994     Node* store = igvn.transform(new StoreLSpecialNode(kit.control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
1995     kit.set_memory(store, TypeRawPtr::BOTTOM);
1996   }
1997   kit.insert_mem_bar(Op_MemBarCPUOrder);
1998 
1999   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
2000   if (old_ctrl != nullptr) {
2001     igvn.replace_node(old_ctrl, kit.control());
2002   }
2003   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
2004   Node* new_mem = kit.reset_memory();
2005   if (old_mem != nullptr) {
2006     igvn.replace_node(old_mem, new_mem);
2007   }
2008 }
2009 
2010 // Convert the field values to a payload value of type 'bt'
2011 Node* StoreFlatNode::convert_to_payload(PhaseIterGVN& igvn, Node* ctrl, InlineTypeNode* value, bool null_free, int& oop_off_1, int& oop_off_2) {
2012   ciInlineKlass* vk = igvn.type(value)->inline_klass();
2013   BasicType payload_bt = vk->atomic_size_to_basic_type(null_free);
2014   Node* payload = igvn.zerocon(payload_bt);
2015   if (!null_free) {
2016     // Set the null marker
2017     payload = set_payload_value(igvn, payload_bt, payload, T_BOOLEAN, value->get_null_marker(), vk->null_marker_offset_in_payload());
2018   }
2019 
2020   // Iterate over the fields and add their values to the payload
2021   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
2022     ciField* field = vk->nonstatic_field_at(i);
2023     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
2024     ciType* field_klass = field->type();
2025     BasicType field_bt = field_klass->basic_type();
2026     int field_offset_in_payload = field->offset_in_bytes() - vk->payload_offset();
2027     if (!field_klass->is_primitive_type()) {
2028       // Narrow oop field
2029       assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
2030       if (oop_off_1 == -1) {
2031         oop_off_1 = field_offset_in_payload;
2032       } else {
2033         assert(oop_off_2 == -1, "already set");
2034         oop_off_2 = field_offset_in_payload;
2035       }
2036 
2037       const Type* val_type = Type::get_const_type(field_klass)->make_narrowoop();
2038       if (field_value->is_InlineType()) {
2039         assert(field_value->as_InlineType()->is_allocated(&igvn), "must be allocated");
2040       }
2041 
2042       field_value = igvn.transform(new EncodePNode(field_value, val_type));
2043       field_value = igvn.transform(new CastP2XNode(ctrl, field_value));
2044       field_value = igvn.transform(new ConvL2INode(field_value));
2045       field_bt = T_INT;
2046     }
2047     payload = set_payload_value(igvn, payload_bt, payload, field_bt, field_value, field_offset_in_payload);
2048   }
2049 
2050   return payload;
2051 }
2052 
2053 Node* StoreFlatNode::set_payload_value(PhaseIterGVN& igvn, BasicType payload_bt, Node* payload, BasicType val_bt, Node* value, int offset) {
2054   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
2055 
2056   // Make sure to zero unused bits in the 32-bit value
2057   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
2058     value = igvn.transform(new AndINode(value, igvn.intcon(0xFF)));
2059   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
2060     value = igvn.transform(new AndINode(value, igvn.intcon(0xFFFF)));
2061   } else if (val_bt == T_FLOAT) {
2062     value = igvn.transform(new MoveF2INode(value));
2063   } else {
2064     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
2065   }
2066 
2067   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
2068   if (payload_bt == T_LONG) {
2069     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
2070     value = igvn.transform(new ConvI2LNode(value));
2071     value = igvn.transform(new AndLNode(value, igvn.longcon(0xFFFFFFFF)));
2072 
2073     Node* shift_value = igvn.transform(new LShiftLNode(value, shift_val));
2074     payload = new OrLNode(shift_value, payload);
2075   } else {
2076     Node* shift_value = igvn.transform(new LShiftINode(value, shift_val));
2077     payload = new OrINode(shift_value, payload);
2078   }
2079   return igvn.transform(payload);
2080 }
2081 
2082 const Type* LoadFlatNode::Value(PhaseGVN* phase) const {
2083   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
2084       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP) {
2085     return Type::TOP;
2086   }
2087   return bottom_type();
2088 }
2089 
2090 const Type* StoreFlatNode::Value(PhaseGVN* phase) const {
2091   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
2092       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP || phase->type(value()) == Type::TOP) {
2093     return Type::TOP;
2094   }
2095   return bottom_type();
2096 }