1 /*
   2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/convertnode.hpp"
  34 #include "opto/graphKit.hpp"
  35 #include "opto/inlinetypenode.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/multnode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/opcodes.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/type.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 
  46 // Clones the inline type to handle control flow merges involving multiple inline types.
  47 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  48 // init_with_top: input of phis above the returned InlineTypeNode are initialized to top.
  49 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_non_null, bool init_with_top) {
  50   InlineTypeNode* vt = clone_if_required(gvn, map);
  51   const Type* t = Type::get_const_type(inline_klass());
  52   gvn->set_type(vt, t);
  53   vt->as_InlineType()->set_type(t);
  54 
  55   Node* const top = gvn->C->top();
  56 
  57   // Create a PhiNode for merging the oop values
  58   PhiNode* oop = PhiNode::make(region, init_with_top ? top : vt->get_oop(), t);
  59   gvn->set_type(oop, t);
  60   gvn->record_for_igvn(oop);
  61   vt->set_oop(*gvn, oop);
  62 
  63   // Create a PhiNode for merging the is_buffered values
  64   t = Type::get_const_basic_type(T_BOOLEAN);
  65   Node* is_buffered_node = PhiNode::make(region, init_with_top ? top : vt->get_is_buffered(), t);
  66   gvn->set_type(is_buffered_node, t);
  67   gvn->record_for_igvn(is_buffered_node);
  68   vt->set_req(IsBuffered, is_buffered_node);
  69 
  70   // Create a PhiNode for merging the null_marker values
  71   Node* null_marker_node;
  72   if (is_non_null) {
  73     null_marker_node = gvn->intcon(1);
  74   } else {
  75     t = Type::get_const_basic_type(T_BOOLEAN);
  76     null_marker_node = PhiNode::make(region, init_with_top ? top : vt->get_null_marker(), t);
  77     gvn->set_type(null_marker_node, t);
  78     gvn->record_for_igvn(null_marker_node);
  79   }
  80   vt->set_req(NullMarker, null_marker_node);
  81 
  82   // Create a PhiNode each for merging the field values
  83   for (uint i = 0; i < vt->field_count(); ++i) {
  84     ciType* type = vt->field_type(i);
  85     Node*  value = vt->field_value(i);
  86     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  87     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  88     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  89     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  90     if (type->is_inlinetype() && no_circularity) {
  91       // Handle inline type fields recursively
  92       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  93     } else {
  94       t = Type::get_const_type(type);
  95       value = PhiNode::make(region, init_with_top ? top : value, t);
  96       gvn->set_type(value, t);
  97       gvn->record_for_igvn(value);
  98     }
  99     vt->set_field_value(i, value);
 100   }
 101   gvn->record_for_igvn(vt);
 102   return vt;
 103 }
 104 
 105 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 106 // for the given region (see InlineTypeNode::clone_with_phis).
 107 bool InlineTypeNode::has_phi_inputs(Node* region) {
 108   // Check oop input
 109   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 110 #ifdef ASSERT
 111   if (result) {
 112     // Check all field value inputs for consistency
 113     for (uint i = Values; i < field_count(); ++i) {
 114       Node* n = in(i);
 115       if (n->is_InlineType()) {
 116         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 117       } else {
 118         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 119       }
 120     }
 121   }
 122 #endif
 123   return result;
 124 }
 125 
 126 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 127 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int phi_index, bool transform) {
 128   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 129 
 130   // Merge oop inputs
 131   PhiNode* phi = get_oop()->as_Phi();
 132   phi->set_req(phi_index, other->get_oop());
 133   if (transform) {
 134     set_oop(*gvn, gvn->transform(phi));
 135   }
 136 
 137   // Merge is_buffered inputs
 138   phi = get_is_buffered()->as_Phi();
 139   phi->set_req(phi_index, other->get_is_buffered());
 140   if (transform) {
 141     set_req(IsBuffered, gvn->transform(phi));
 142   }
 143 
 144   // Merge null_marker inputs
 145   Node* null_marker = get_null_marker();
 146   if (null_marker->is_Phi()) {
 147     phi = null_marker->as_Phi();
 148     phi->set_req(phi_index, other->get_null_marker());
 149     if (transform) {
 150       set_req(NullMarker, gvn->transform(phi));
 151     }
 152   } else {
 153     assert(null_marker->find_int_con(0) == 1, "only with a non null inline type");
 154   }
 155 
 156   // Merge field values
 157   for (uint i = 0; i < field_count(); ++i) {
 158     Node* val1 =        field_value(i);
 159     Node* val2 = other->field_value(i);
 160     if (val1->is_InlineType()) {
 161       if (val2->is_Phi()) {
 162         val2 = gvn->transform(val2);
 163       }
 164       if (val2->is_top()) {
 165         // The path where 'other' is used is dying. Therefore, we do not need to process the merge with 'other' further.
 166         // The phi inputs of 'this' at 'phi_index' will eventually be removed.
 167         break;
 168       }
 169       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), phi_index, transform);
 170     } else {
 171       assert(val1->is_Phi(), "must be a phi node");
 172       val1->set_req(phi_index, val2);
 173     }
 174     if (transform) {
 175       set_field_value(i, gvn->transform(val1));
 176     }
 177   }
 178   return this;
 179 }
 180 
 181 // Adds a new merge path to an inline type node with phi inputs
 182 void InlineTypeNode::add_new_path(Node* region) {
 183   assert(has_phi_inputs(region), "must have phi inputs");
 184 
 185   PhiNode* phi = get_oop()->as_Phi();
 186   phi->add_req(nullptr);
 187   assert(phi->req() == region->req(), "must be same size as region");
 188 
 189   phi = get_is_buffered()->as_Phi();
 190   phi->add_req(nullptr);
 191   assert(phi->req() == region->req(), "must be same size as region");
 192 
 193   phi = get_null_marker()->as_Phi();
 194   phi->add_req(nullptr);
 195   assert(phi->req() == region->req(), "must be same size as region");
 196 
 197   for (uint i = 0; i < field_count(); ++i) {
 198     Node* val = field_value(i);
 199     if (val->is_InlineType()) {
 200       val->as_InlineType()->add_new_path(region);
 201     } else {
 202       val->as_Phi()->add_req(nullptr);
 203       assert(val->req() == region->req(), "must be same size as region");
 204     }
 205   }
 206 }
 207 
 208 Node* InlineTypeNode::field_value(uint index) const {
 209   assert(index < field_count(), "index out of bounds");
 210   return in(Values + index);
 211 }
 212 
 213 // Get the value of the field at the given offset.
 214 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 215 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 216   // Find the declared field which contains the field we are looking for
 217   int index = inline_klass()->field_index_by_offset(offset);
 218   Node* value = field_value(index);
 219   assert(value != nullptr, "field value not found");
 220 
 221   if (!recursive || !field_is_flat(index) || value->is_top()) {
 222     assert(offset == field_offset(index), "offset mismatch");
 223     return value;
 224   }
 225 
 226   // Flat inline type field
 227   InlineTypeNode* vt = value->as_InlineType();
 228   if (offset == field_null_marker_offset(index)) {
 229     return vt->get_null_marker();
 230   } else {
 231     int sub_offset = offset - field_offset(index); // Offset of the flattened field inside the declared field
 232     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 233     return vt->field_value_by_offset(sub_offset, recursive);
 234   }
 235 }
 236 
 237 void InlineTypeNode::set_field_value(uint index, Node* value) {
 238   assert(index < field_count(), "index out of bounds");
 239   set_req(Values + index, value);
 240 }
 241 
 242 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 243   set_field_value(field_index(offset), value);
 244 }
 245 
 246 int InlineTypeNode::field_offset(uint index) const {
 247   assert(index < field_count(), "index out of bounds");
 248   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 249 }
 250 
 251 uint InlineTypeNode::field_index(int offset) const {
 252   uint i = 0;
 253   for (; i < field_count() && field_offset(i) != offset; i++) { }
 254   assert(i < field_count(), "field not found");
 255   return i;
 256 }
 257 
 258 ciType* InlineTypeNode::field_type(uint index) const {
 259   assert(index < field_count(), "index out of bounds");
 260   return inline_klass()->declared_nonstatic_field_at(index)->type();
 261 }
 262 
 263 bool InlineTypeNode::field_is_flat(uint index) const {
 264   assert(index < field_count(), "index out of bounds");
 265   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 266   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 267   return field->is_flat();
 268 }
 269 
 270 bool InlineTypeNode::field_is_null_free(uint index) const {
 271   assert(index < field_count(), "index out of bounds");
 272   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 273   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 274   return field->is_null_free();
 275 }
 276 
 277 bool InlineTypeNode::field_is_volatile(uint index) const {
 278   assert(index < field_count(), "index out of bounds");
 279   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 280   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 281   return field->is_volatile();
 282 }
 283 
 284 int InlineTypeNode::field_null_marker_offset(uint index) const {
 285   assert(index < field_count(), "index out of bounds");
 286   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 287   assert(field->is_flat(), "must be an inline type");
 288   return field->null_marker_offset();
 289 }
 290 
 291 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) {
 292   uint cnt = 0;
 293   for (uint i = 0; i < field_count(); ++i) {
 294     Node* value = field_value(i);
 295     if (field_is_flat(i)) {
 296       InlineTypeNode* vt = value->as_InlineType();
 297       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 298       if (!field_is_null_free(i)) {
 299         // The null marker of a flat field is added right after we scalarize that field
 300         sfpt->add_req(vt->get_null_marker());
 301         cnt++;
 302       }
 303       continue;
 304     }
 305     if (value->is_InlineType()) {
 306       // Add inline type to the worklist to process later
 307       worklist.push(value);
 308     }
 309     sfpt->add_req(value);
 310     cnt++;
 311   }
 312   return cnt;
 313 }
 314 
 315 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 316   JVMState* jvms = sfpt->jvms();
 317   assert(jvms != nullptr, "missing JVMS");
 318   uint first_ind = (sfpt->req() - jvms->scloff());
 319 
 320   // Iterate over the inline type fields in order of increasing offset and add the
 321   // field values to the safepoint. Nullable inline types have an null marker field that
 322   // needs to be checked before using the field values.
 323   sfpt->add_req(get_null_marker());
 324   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 325   jvms->set_endoff(sfpt->req());
 326   // Replace safepoint edge by SafePointScalarObjectNode
 327   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 328                                                                   nullptr,
 329                                                                   first_ind,
 330                                                                   sfpt->jvms()->depth(),
 331                                                                   nfields);
 332   sobj->init_req(0, igvn->C->root());
 333   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 334   igvn->rehash_node_delayed(sfpt);
 335   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 336     Node* debug = sfpt->in(i);
 337     if (debug != nullptr && debug->uncast() == this) {
 338       sfpt->set_req(i, sobj);
 339     }
 340   }
 341 }
 342 
 343 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 344   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 345   // in the safepoint to avoid keeping field loads live just for the debug info.
 346   Node* oop = get_oop();
 347   bool use_oop = false;
 348   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 349     Unique_Node_List worklist;
 350     VectorSet visited;
 351     visited.set(oop->_idx);
 352     worklist.push(oop);
 353     use_oop = true;
 354     while (worklist.size() > 0 && use_oop) {
 355       Node* n = worklist.pop();
 356       for (uint i = 1; i < n->req(); i++) {
 357         Node* in = n->in(i);
 358         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 359           worklist.push(in);
 360         } else if (!(in->is_Con() || in->is_Parm())) {
 361           use_oop = false;
 362           break;
 363         }
 364       }
 365     }
 366   } else {
 367     use_oop = allow_oop && is_allocated(igvn) &&
 368               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 369   }
 370 
 371   ResourceMark rm;
 372   Unique_Node_List safepoints;
 373   Unique_Node_List vt_worklist;
 374   Unique_Node_List worklist;
 375   worklist.push(this);
 376   while (worklist.size() > 0) {
 377     Node* n = worklist.pop();
 378     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 379       Node* use = n->fast_out(i);
 380       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 381         safepoints.push(use);
 382       } else if (use->is_ConstraintCast()) {
 383         worklist.push(use);
 384       }
 385     }
 386   }
 387 
 388   // Process all safepoint uses and scalarize inline type
 389   while (safepoints.size() > 0) {
 390     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 391     if (use_oop) {
 392       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 393         Node* debug = sfpt->in(i);
 394         if (debug != nullptr && debug->uncast() == this) {
 395           sfpt->set_req(i, get_oop());
 396         }
 397       }
 398       igvn->rehash_node_delayed(sfpt);
 399     } else {
 400       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 401     }
 402   }
 403   // Now scalarize non-flat fields
 404   for (uint i = 0; i < vt_worklist.size(); ++i) {
 405     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 406     vt->make_scalar_in_safepoints(igvn);
 407   }
 408   if (outcnt() == 0) {
 409     igvn->record_for_igvn(this);
 410   }
 411 }
 412 
 413 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 414 // of the same type but with different scalarization depth during GVN. This method adjusts the
 415 // scalarization depth to avoid inconsistencies during merging.
 416 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 417   if (!kit->C->has_circular_inline_type()) {
 418     return this;
 419   }
 420   GrowableArray<ciType*> visited;
 421   visited.push(inline_klass());
 422   return adjust_scalarization_depth_impl(kit, visited);
 423 }
 424 
 425 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 426   InlineTypeNode* val = this;
 427   for (uint i = 0; i < field_count(); ++i) {
 428     Node* value = field_value(i);
 429     Node* new_value = value;
 430     ciType* ft = field_type(i);
 431     if (value->is_InlineType()) {
 432       if (!field_is_flat(i) && visited.contains(ft)) {
 433         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 434       } else {
 435         int old_len = visited.length();
 436         visited.push(ft);
 437         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 438         visited.trunc_to(old_len);
 439       }
 440     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 441       int old_len = visited.length();
 442       visited.push(ft);
 443       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 444       visited.trunc_to(old_len);
 445     }
 446     if (value != new_value) {
 447       if (val == this) {
 448         val = clone_if_required(&kit->gvn(), kit->map());
 449       }
 450       val->set_field_value(i, new_value);
 451     }
 452   }
 453   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 454 }
 455 
 456 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 457   // Initialize the inline type by loading its field values from
 458   // memory and adding the values as input edges to the node.
 459   ciInlineKlass* vk = inline_klass();
 460   for (uint i = 0; i < field_count(); ++i) {
 461     int field_off = field_offset(i) - vk->payload_offset();
 462     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 463     Node* value = nullptr;
 464     ciType* ft = field_type(i);
 465     bool field_null_free = field_is_null_free(i);
 466     if (field_is_flat(i)) {
 467       // Recursively load the flat inline type field
 468       ciInlineKlass* fvk = ft->as_inline_klass();
 469       // Atomic if nullable or not LooselyConsistentValue
 470       bool atomic = !field_null_free || fvk->must_be_atomic();
 471 
 472       int old_len = visited.length();
 473       visited.push(ft);
 474       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 475                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 476       visited.trunc_to(old_len);
 477     } else {
 478       // Load field value from memory
 479       BasicType bt = type2field[ft->basic_type()];
 480       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 481       const Type* val_type = Type::get_const_type(ft);
 482       if (trust_null_free_oop && field_null_free) {
 483         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 484       }
 485       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 486       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 487       // Loading a non-flattened inline type from memory
 488       if (visited.contains(ft)) {
 489         kit->C->set_has_circular_inline_type(true);
 490       } else if (ft->is_inlinetype()) {
 491         int old_len = visited.length();
 492         visited.push(ft);
 493         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 494         visited.trunc_to(old_len);
 495       }
 496     }
 497     set_field_value(i, value);
 498   }
 499 }
 500 
 501 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
 502   ciInlineKlass* vk = inline_klass();
 503   bool do_atomic = atomic;
 504   // With immutable memory, a non-atomic load and an atomic load are the same
 505   if (immutable_memory) {
 506     do_atomic = false;
 507   }
 508   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 509   if (vk->is_naturally_atomic(null_free)) {
 510     do_atomic = false;
 511   }
 512 
 513   if (!do_atomic) {
 514     if (!null_free) {
 515       int nm_offset = vk->null_marker_offset_in_payload();
 516       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 517       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 518       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_null_marker(), TypeInt::BOOL, T_BOOLEAN, decorators);
 519     }
 520     store(kit, base, ptr, immutable_memory, decorators);
 521     return;
 522   }
 523 
 524   StoreFlatNode::store(kit, base, ptr, this, null_free, decorators);
 525 }
 526 
 527 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) {
 528   PhaseGVN& gvn = kit->gvn();
 529   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 530   kit->C->set_flat_accesses();
 531   ciInlineKlass* vk = inline_klass();
 532   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 533 
 534   RegionNode* region = new RegionNode(4);
 535   gvn.set_type(region, Type::CONTROL);
 536   kit->record_for_igvn(region);
 537 
 538   Node* input_memory_state = kit->reset_memory();
 539   kit->set_all_memory(input_memory_state);
 540 
 541   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 542   gvn.set_type(mem, Type::MEMORY);
 543   kit->record_for_igvn(mem);
 544 
 545   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 546   gvn.set_type(io, Type::ABIO);
 547   kit->record_for_igvn(io);
 548 
 549   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 550   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 551 
 552   // Nullable
 553   kit->set_control(kit->IfFalse(iff_null_free));
 554   if (!kit->stopped()) {
 555     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 556     kit->set_all_memory(input_memory_state);
 557     Node* cast = kit->cast_to_flat_array_exact(base, vk, false, true);
 558     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 559     store_flat(kit, cast, ptr, true, false, false, decorators);
 560 
 561     region->init_req(1, kit->control());
 562     mem->set_req(1, kit->reset_memory());
 563     io->set_req(1, kit->i_o());
 564   }
 565 
 566   // Null-free
 567   kit->set_control(kit->IfTrue(iff_null_free));
 568   if (!kit->stopped()) {
 569     kit->set_all_memory(input_memory_state);
 570 
 571     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 572     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 573 
 574     // Atomic
 575     kit->set_control(kit->IfTrue(iff_atomic));
 576     if (!kit->stopped()) {
 577       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 578       kit->set_all_memory(input_memory_state);
 579       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, true);
 580       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 581       store_flat(kit, cast, ptr, true, false, true, decorators);
 582 
 583       region->init_req(2, kit->control());
 584       mem->set_req(2, kit->reset_memory());
 585       io->set_req(2, kit->i_o());
 586     }
 587 
 588     // Non-atomic
 589     kit->set_control(kit->IfFalse(iff_atomic));
 590     if (!kit->stopped()) {
 591       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 592       kit->set_all_memory(input_memory_state);
 593       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, false);
 594       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 595       store_flat(kit, cast, ptr, false, false, true, decorators);
 596 
 597       region->init_req(3, kit->control());
 598       mem->set_req(3, kit->reset_memory());
 599       io->set_req(3, kit->i_o());
 600     }
 601   }
 602 
 603   kit->set_control(gvn.transform(region));
 604   kit->set_all_memory(gvn.transform(mem));
 605   kit->set_i_o(gvn.transform(io));
 606 }
 607 
 608 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 609   // Write field values to memory
 610   ciInlineKlass* vk = inline_klass();
 611   for (uint i = 0; i < field_count(); ++i) {
 612     int field_off = field_offset(i) - vk->payload_offset();
 613     Node* field_val = field_value(i);
 614     bool field_null_free = field_is_null_free(i);
 615     ciType* ft = field_type(i);
 616     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 617     if (field_is_flat(i)) {
 618       // Recursively store the flat inline type field
 619       ciInlineKlass* fvk = ft->as_inline_klass();
 620       // Atomic if nullable or not LooselyConsistentValue
 621       bool atomic = !field_null_free || fvk->must_be_atomic();
 622 
 623       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 624     } else {
 625       // Store field value to memory
 626       BasicType bt = type2field[ft->basic_type()];
 627       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 628       const Type* val_type = Type::get_const_type(ft);
 629       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 630     }
 631   }
 632 }
 633 
 634 // Adds a check between val1 and val2. Jumps to 'region' if check passes and optionally sets the corresponding phi input to false.
 635 static void acmp_val_guard(PhaseIterGVN* igvn, RegionNode* region, Node* phi, Node** ctrl, BasicType bt, BoolTest::mask test, Node* val1, Node* val2) {
 636   Node* cmp = nullptr;
 637   switch (bt) {
 638   case T_FLOAT:
 639     val1 = igvn->register_new_node_with_optimizer(new MoveF2INode(val1));
 640     val2 = igvn->register_new_node_with_optimizer(new MoveF2INode(val2));
 641     // Fall-through to the int case
 642   case T_BOOLEAN:
 643   case T_CHAR:
 644   case T_BYTE:
 645   case T_SHORT:
 646   case T_INT:
 647     cmp = igvn->register_new_node_with_optimizer(new CmpINode(val1, val2));
 648     break;
 649   case T_DOUBLE:
 650     val1 = igvn->register_new_node_with_optimizer(new MoveD2LNode(val1));
 651     val2 = igvn->register_new_node_with_optimizer(new MoveD2LNode(val2));
 652     // Fall-through to the long case
 653   case T_LONG:
 654     cmp = igvn->register_new_node_with_optimizer(new CmpLNode(val1, val2));
 655     break;
 656   default:
 657     assert(is_reference_type(bt), "must be");
 658     cmp = igvn->register_new_node_with_optimizer(new CmpPNode(val1, val2));
 659   }
 660   Node* bol = igvn->register_new_node_with_optimizer(new BoolNode(cmp, test));
 661   IfNode* iff = igvn->register_new_node_with_optimizer(new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN))->as_If();
 662   Node* if_f = igvn->register_new_node_with_optimizer(new IfFalseNode(iff));
 663   Node* if_t = igvn->register_new_node_with_optimizer(new IfTrueNode(iff));
 664 
 665   region->add_req(if_t);
 666   if (phi != nullptr) {
 667     phi->add_req(igvn->intcon(0));
 668   }
 669   *ctrl = if_f;
 670 }
 671 
 672 // Check if a substitutability check between 'this' and 'other' can be implemented in IR
 673 bool InlineTypeNode::can_emit_substitutability_check(Node* other) const {
 674   if (other != nullptr && other->is_InlineType() && bottom_type() != other->bottom_type()) {
 675     // Different types, this is dead code because there's a check above that guarantees this.
 676     return false;
 677   }
 678   for (uint i = 0; i < field_count(); i++) {
 679     ciType* ft = field_type(i);
 680     if (ft->is_inlinetype()) {
 681       // Check recursively
 682       if (!field_value(i)->as_InlineType()->can_emit_substitutability_check(nullptr)){
 683         return false;
 684       }
 685     } else if (!ft->is_primitive_type() && ft->as_klass()->can_be_inline_klass()) {
 686       // Comparing this field might require (another) substitutability check, bail out
 687       return false;
 688     }
 689   }
 690   return true;
 691 }
 692 
 693 // Emit IR to check substitutability between 'this' (left operand) and the value object referred to by 'other' (right operand).
 694 // Parse-time checks guarantee that both operands have the same type. If 'other' is not an InlineTypeNode, we need to emit loads for the field values.
 695 void InlineTypeNode::check_substitutability(PhaseIterGVN* igvn, RegionNode* region, Node* phi, Node** ctrl, Node* mem, Node* base, Node* other, bool flat) const {
 696   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 697   DecoratorSet decorators = IN_HEAP | MO_UNORDERED | C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD;
 698   MergeMemNode* local_mem = igvn->register_new_node_with_optimizer(MergeMemNode::make(mem))->as_MergeMem();
 699 
 700   ciInlineKlass* vk = inline_klass();
 701   for (uint i = 0; i < field_count(); i++) {
 702     int field_off = field_offset(i);
 703     if (flat) {
 704       // Flat access, no header
 705       field_off -= vk->payload_offset();
 706     }
 707     Node* this_field = field_value(i);
 708     ciType* ft = field_type(i);
 709     BasicType bt = ft->basic_type();
 710 
 711     Node* other_base = base;
 712     Node* other_field = other;
 713 
 714     // Get field value of the other operand
 715     if (other->is_InlineType()) {
 716       other_field = other->as_InlineType()->field_value(i);
 717       other_base = nullptr;
 718     } else {
 719       // 'other' is an oop, compute address of the field
 720       other_field = igvn->register_new_node_with_optimizer(new AddPNode(base, other, igvn->MakeConX(field_off)));
 721       if (field_is_flat(i)) {
 722         // Flat field, load is handled recursively below
 723         assert(this_field->is_InlineType(), "inconsistent field value");
 724       } else {
 725         // Non-flat field, load the field value and update the base because we are now operating on a different object
 726         assert(is_java_primitive(bt) || other_field->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent field type");
 727         C2AccessValuePtr addr(other_field, other_field->bottom_type()->is_ptr());
 728         C2OptAccess access(*igvn, *ctrl, local_mem, decorators, bt, base, addr);
 729         other_field = bs->load_at(access, Type::get_const_type(ft));
 730         other_base = other_field;
 731       }
 732     }
 733 
 734     if (this_field->is_InlineType()) {
 735       RegionNode* done_region = new RegionNode(1);
 736       if (!field_is_null_free(i)) {
 737         // Nullable field, check null marker before accessing the fields
 738         if (field_is_flat(i)) {
 739           // Flat field, check embedded null marker
 740           Node* null_marker = nullptr;
 741           if (other_field->is_InlineType()) {
 742             // TODO 8350865 Should we add an IGVN optimization to fold null marker loads from InlineTypeNodes?
 743             null_marker = other_field->as_InlineType()->get_null_marker();
 744           } else {
 745             Node* nm_offset = igvn->MakeConX(ft->as_inline_klass()->null_marker_offset_in_payload());
 746             Node* nm_adr = igvn->register_new_node_with_optimizer(new AddPNode(base, other_field, nm_offset));
 747             C2AccessValuePtr addr(nm_adr, nm_adr->bottom_type()->is_ptr());
 748             C2OptAccess access(*igvn, *ctrl, local_mem, decorators, T_BOOLEAN, base, addr);
 749             null_marker = bs->load_at(access, TypeInt::BOOL);
 750           }
 751           // Return false if null markers are not equal
 752           acmp_val_guard(igvn, region, phi, ctrl, T_INT, BoolTest::ne, this_field->as_InlineType()->get_null_marker(), null_marker);
 753 
 754           // Null markers are equal. If both operands are null, skip the comparison of the fields.
 755           acmp_val_guard(igvn, done_region, nullptr, ctrl, T_INT, BoolTest::eq, this_field->as_InlineType()->get_null_marker(), igvn->intcon(0));
 756         } else {
 757           // Non-flat field, check if oop is null
 758 
 759           // Check if 'this' is null
 760           RegionNode* not_null_region = new RegionNode(1);
 761           acmp_val_guard(igvn, not_null_region, nullptr, ctrl, T_INT, BoolTest::ne, this_field->as_InlineType()->get_null_marker(), igvn->intcon(0));
 762 
 763           // 'this' is null. If 'other' is non-null, return false.
 764           acmp_val_guard(igvn, region, phi, ctrl, T_OBJECT, BoolTest::ne, other_field, igvn->zerocon(T_OBJECT));
 765 
 766           // Both are null, skip comparing the fields
 767           done_region->add_req(*ctrl);
 768 
 769           // 'this' is not null. If 'other' is null, return false.
 770           *ctrl = igvn->register_new_node_with_optimizer(not_null_region);
 771           acmp_val_guard(igvn, region, phi, ctrl, T_OBJECT, BoolTest::eq, other_field, igvn->zerocon(T_OBJECT));
 772         }
 773       }
 774       // Both operands are non-null, compare all the fields recursively
 775       this_field->as_InlineType()->check_substitutability(igvn, region, phi, ctrl, mem, other_base, other_field, field_is_flat(i));
 776 
 777       done_region->add_req(*ctrl);
 778       *ctrl = igvn->register_new_node_with_optimizer(done_region);
 779     } else {
 780       assert(ft->is_primitive_type() || !ft->as_klass()->can_be_inline_klass(), "Needs substitutability test");
 781       acmp_val_guard(igvn, region, phi, ctrl, bt, BoolTest::ne, this_field, other_field);
 782     }
 783   }
 784 }
 785 
 786 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 787   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 788     // Already buffered
 789     return this;
 790   }
 791 
 792   // Check if inline type is already buffered
 793   Node* not_buffered_ctl = kit->top();
 794   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 795   if (not_buffered_ctl->is_top()) {
 796     // Already buffered
 797     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 798     vt->set_is_buffered(kit->gvn());
 799     vt = kit->gvn().transform(vt)->as_InlineType();
 800     if (safe_for_replace) {
 801       kit->replace_in_map(this, vt);
 802     }
 803     return vt;
 804   }
 805   Node* buffered_ctl = kit->control();
 806   kit->set_control(not_buffered_ctl);
 807 
 808   // Inline type is not buffered, check if it is null.
 809   Node* null_ctl = kit->top();
 810   kit->null_check_common(get_null_marker(), T_INT, false, &null_ctl);
 811   bool null_free = null_ctl->is_top();
 812 
 813   RegionNode* region = new RegionNode(4);
 814   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 815 
 816   // InlineType is already buffered
 817   region->init_req(1, buffered_ctl);
 818   oop->init_req(1, not_null_oop);
 819 
 820   // InlineType is null
 821   region->init_req(2, null_ctl);
 822   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 823 
 824   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 825   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 826 
 827   if (!kit->stopped()) {
 828     assert(!is_allocated(&kit->gvn()), "already buffered");
 829     PreserveJVMState pjvms(kit);
 830     ciInlineKlass* vk = inline_klass();
 831     // Allocate and initialize buffer, re-execute on deoptimization.
 832     kit->jvms()->set_bci(kit->bci());
 833     kit->jvms()->set_should_reexecute(true);
 834     kit->kill_dead_locals();
 835     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 836     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 837     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 838     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 839 
 840     // Do not let stores that initialize this buffer be reordered with a subsequent
 841     // store that would make this buffer accessible by other threads.
 842     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 843     assert(alloc != nullptr, "must have an allocation node");
 844     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 845     oop->init_req(3, alloc_oop);
 846     region->init_req(3, kit->control());
 847     io    ->init_req(3, kit->i_o());
 848     mem   ->init_req(3, kit->merged_memory());
 849   }
 850 
 851   // Update GraphKit
 852   kit->set_control(kit->gvn().transform(region));
 853   kit->set_i_o(kit->gvn().transform(io));
 854   kit->set_all_memory(kit->gvn().transform(mem));
 855   kit->record_for_igvn(region);
 856   kit->record_for_igvn(oop);
 857   kit->record_for_igvn(io);
 858   kit->record_for_igvn(mem);
 859 
 860   // Use cloned InlineTypeNode to propagate oop from now on
 861   Node* res_oop = kit->gvn().transform(oop);
 862   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 863   vt->set_oop(kit->gvn(), res_oop);
 864   vt->set_is_buffered(kit->gvn());
 865   vt = kit->gvn().transform(vt)->as_InlineType();
 866   if (safe_for_replace) {
 867     kit->replace_in_map(this, vt);
 868   }
 869   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 870   // Make sure it gets a chance to remove this allocation.
 871   kit->C->set_has_split_ifs(true);
 872   return vt;
 873 }
 874 
 875 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 876   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 877     return true;
 878   }
 879   Node* oop = get_oop();
 880   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 881   return !oop_type->maybe_null();
 882 }
 883 
 884 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 885   ProjNode* pn = call->proj_out_or_null(proj_idx);
 886   if (pn != nullptr) {
 887     C->gvn_replace_by(pn, value);
 888     C->initial_gvn()->hash_delete(pn);
 889     pn->set_req(0, C->top());
 890   }
 891   proj_idx += type2size[bt];
 892 }
 893 
 894 // When a call returns multiple values, it has several result
 895 // projections, one per field. Replacing the result of the call by an
 896 // inline type node (after late inlining) requires that for each result
 897 // projection, we find the corresponding inline type field.
 898 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 899   uint proj_idx = TypeFunc::Parms;
 900   // Replace oop projection
 901   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 902   // Replace field projections
 903   replace_field_projs(C, call, proj_idx);
 904   // Replace null_marker projection
 905   replace_proj(C, call, proj_idx, get_null_marker(), T_BOOLEAN);
 906   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 907 }
 908 
 909 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
 910   for (uint i = 0; i < field_count(); ++i) {
 911     Node* value = field_value(i);
 912     if (field_is_flat(i)) {
 913       InlineTypeNode* vt = value->as_InlineType();
 914       // Replace field projections for flat field
 915       vt->replace_field_projs(C, call, proj_idx);
 916       if (!field_is_null_free(i)) {
 917         // Replace null_marker projection for nullable field
 918         replace_proj(C, call, proj_idx, vt->get_null_marker(), T_BOOLEAN);
 919       }
 920       continue;
 921     }
 922     // Replace projection for field value
 923     replace_proj(C, call, proj_idx, value, field_type(i)->basic_type());
 924   }
 925 }
 926 
 927 InlineTypeNode* InlineTypeNode::allocate_fields(GraphKit* kit) {
 928   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 929   for (uint i = 0; i < field_count(); i++) {
 930      Node* value = field_value(i);
 931      if (field_is_flat(i)) {
 932        // Flat inline type field
 933        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 934      } else if (value->is_InlineType()) {
 935        // Non-flat inline type field
 936        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 937      }
 938   }
 939   vt = kit->gvn().transform(vt)->as_InlineType();
 940   kit->replace_in_map(this, vt);
 941   return vt;
 942 }
 943 
 944 // Replace a buffer allocation by a dominating allocation
 945 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 946   // Remove initializing stores and GC barriers
 947   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 948     Node* use = res->fast_out(i);
 949     if (use->is_AddP()) {
 950       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 951         Node* store = use->fast_out(j)->isa_Store();
 952         if (store != nullptr) {
 953           igvn->rehash_node_delayed(store);
 954           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 955         }
 956       }
 957     } else if (use->Opcode() == Op_CastP2X) {
 958       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 959         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 960         // we store into, as well as the value we are storing. Skip if this is a
 961         // barrier for storing 'res' into another object.
 962         continue;
 963       }
 964       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 965       bs->eliminate_gc_barrier(igvn, use);
 966       --i; --imax;
 967     }
 968   }
 969   igvn->replace_node(res, dom);
 970 }
 971 
 972 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 973   Node* oop = get_oop();
 974   Node* is_buffered = get_is_buffered();
 975 
 976   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 977     InlineTypeNode* vtptr = oop->as_InlineType();
 978     set_oop(*phase, vtptr->get_oop());
 979     set_is_buffered(*phase);
 980     set_null_marker(*phase);
 981     for (uint i = Values; i < vtptr->req(); ++i) {
 982       set_req(i, vtptr->in(i));
 983     }
 984     return this;
 985   }
 986 
 987   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
 988   // allocation because the only case we load from a naked CheckCastPP is when we exit a
 989   // constructor of an inline type and we want to relinquish the larval oop there. This has a
 990   // couple of benefits:
 991   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
 992   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
 993   //   when we try to clone a value object.
 994   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
 995   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
 996   //   future, we can move them around more freely such as hoisting out of loops. This is not true
 997   //   for the old allocation since larval value objects do have unique identities.
 998   Node* base = is_loaded(phase);
 999   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && phase->C->allow_macro_nodes() && AllocateNode::Ideal_allocation(base) == nullptr) {
1000     if (oop != base || phase->type(is_buffered) != TypeInt::ONE) {
1001       set_oop(*phase, base);
1002       set_is_buffered(*phase);
1003       return this;
1004     }
1005   }
1006 
1007   if (can_reshape) {
1008     PhaseIterGVN* igvn = phase->is_IterGVN();
1009     if (is_allocated(phase)) {
1010       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
1011       // they will be removed anyway and changing the memory chain will confuse other optimizations.
1012       // This can happen with late inlining when we first allocate an inline type argument
1013       // but later decide to inline the call after the callee code also triggered allocation.
1014       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1015         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1016         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1017           // Found a re-allocation
1018           Node* res = alloc->result_cast();
1019           if (res != nullptr && res->is_CheckCastPP()) {
1020             // Replace allocation by oop and unlink AllocateNode
1021             replace_allocation(igvn, res, oop);
1022             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1023             --i; --imax;
1024           }
1025         }
1026       }
1027     }
1028   }
1029 
1030   return nullptr;
1031 }
1032 
1033 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1034   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1035   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1036   vt->set_is_buffered(gvn, false);
1037   vt->set_null_marker(gvn);
1038   return vt;
1039 }
1040 
1041 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
1042   GrowableArray<ciType*> visited;
1043   visited.push(vk);
1044   return make_all_zero_impl(gvn, vk, visited);
1045 }
1046 
1047 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1048   // Create a new InlineTypeNode initialized with all zero
1049   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1050   vt->set_is_buffered(gvn, false);
1051   vt->set_null_marker(gvn);
1052   for (uint i = 0; i < vt->field_count(); ++i) {
1053     ciType* ft = vt->field_type(i);
1054     Node* value = gvn.zerocon(ft->basic_type());
1055     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1056       gvn.C->set_has_circular_inline_type(true);
1057     } else if (ft->is_inlinetype()) {
1058       int old_len = visited.length();
1059       visited.push(ft);
1060       ciInlineKlass* vk = ft->as_inline_klass();
1061       if (vt->field_is_null_free(i)) {
1062         value = make_all_zero_impl(gvn, vk, visited);
1063       } else {
1064         value = make_null_impl(gvn, vk, visited);
1065       }
1066       visited.trunc_to(old_len);
1067     }
1068     vt->set_field_value(i, value);
1069   }
1070   vt = gvn.transform(vt)->as_InlineType();
1071   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1072   return vt;
1073 }
1074 
1075 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1076   const TypeInt* tinit = gvn->type(get_null_marker())->isa_int();
1077   if (tinit == nullptr || !tinit->is_con(1)) {
1078     return false; // May be null
1079   }
1080   for (uint i = 0; i < field_count(); ++i) {
1081     Node* value = field_value(i);
1082     if (field_is_null_free(i)) {
1083       // Null-free value class field must have the all-zero value. If 'flat' is set,
1084       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1085       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) {
1086         return false;
1087       }
1088       continue;
1089     } else if (value->is_InlineType()) {
1090       // Nullable value class field must be null
1091       tinit = gvn->type(value->as_InlineType()->get_null_marker())->isa_int();
1092       if (tinit != nullptr && tinit->is_con(0)) {
1093         continue;
1094       }
1095       return false;
1096     } else if (!gvn->type(value)->is_zero_type()) {
1097       return false;
1098     }
1099   }
1100   return true;
1101 }
1102 
1103 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
1104   GrowableArray<ciType*> visited;
1105   visited.push(vk);
1106   return make_from_oop_impl(kit, oop, vk, visited);
1107 }
1108 
1109 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1110   PhaseGVN& gvn = kit->gvn();
1111 
1112   // Create and initialize an InlineTypeNode by loading all field
1113   // values from a heap-allocated version and also save the oop.
1114   InlineTypeNode* vt = nullptr;
1115 
1116   if (oop->isa_InlineType()) {
1117     return oop->as_InlineType();
1118   }
1119 
1120   if (gvn.type(oop)->maybe_null()) {
1121     // Add a null check because the oop may be null
1122     Node* null_ctl = kit->top();
1123     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1124     if (kit->stopped()) {
1125       // Constant null
1126       kit->set_control(null_ctl);
1127       vt = make_null_impl(gvn, vk, visited);
1128       kit->record_for_igvn(vt);
1129       return vt;
1130     }
1131     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1132     vt->set_is_buffered(gvn);
1133     vt->set_null_marker(gvn);
1134     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
1135     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1136 
1137     if (null_ctl != kit->top()) {
1138       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1139       Node* region = new RegionNode(3);
1140       region->init_req(1, kit->control());
1141       region->init_req(2, null_ctl);
1142       vt = vt->clone_with_phis(&gvn, region, kit->map());
1143       vt->merge_with(&gvn, null_vt, 2, true);
1144       vt->set_oop(gvn, oop);
1145       kit->set_control(gvn.transform(region));
1146     }
1147   } else {
1148     // Oop can never be null
1149     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1150     Node* init_ctl = kit->control();
1151     vt->set_is_buffered(gvn);
1152     vt->set_null_marker(gvn);
1153     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1154     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1155 // TODO 8284443
1156 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1157 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1158   }
1159   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1160   kit->record_for_igvn(vt);
1161   return gvn.transform(vt)->as_InlineType();
1162 }
1163 
1164 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1165                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1166   GrowableArray<ciType*> visited;
1167   visited.push(vk);
1168   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1169 }
1170 
1171 // GraphKit wrapper for the 'make_from_flat' method
1172 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1173                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1174   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1175   PhaseGVN& gvn = kit->gvn();
1176   bool do_atomic = atomic;
1177   // With immutable memory, a non-atomic load and an atomic load are the same
1178   if (immutable_memory) {
1179     do_atomic = false;
1180   }
1181   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1182   if (vk->is_naturally_atomic(null_free)) {
1183     do_atomic = false;
1184   }
1185 
1186   if (!do_atomic) {
1187     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1188     if (!null_free) {
1189       int nm_offset = vk->null_marker_offset_in_payload();
1190       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1191       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1192       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1193       vt->set_req(NullMarker, nm_value);
1194     }
1195 
1196     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1197     return gvn.transform(vt)->as_InlineType();
1198   }
1199 
1200   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1201   return LoadFlatNode::load(kit, vk, base, ptr, null_free, trust_null_free_oop, decorators);
1202 }
1203 
1204 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1205   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1206   PhaseGVN& gvn = kit->gvn();
1207   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1208   kit->C->set_flat_accesses();
1209   InlineTypeNode* vt_nullable = nullptr;
1210   InlineTypeNode* vt_null_free = nullptr;
1211   InlineTypeNode* vt_non_atomic = nullptr;
1212 
1213   RegionNode* region = new RegionNode(4);
1214   gvn.set_type(region, Type::CONTROL);
1215   kit->record_for_igvn(region);
1216 
1217   Node* input_memory_state = kit->reset_memory();
1218   kit->set_all_memory(input_memory_state);
1219 
1220   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1221   gvn.set_type(mem, Type::MEMORY);
1222   kit->record_for_igvn(mem);
1223 
1224   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1225   gvn.set_type(io, Type::ABIO);
1226   kit->record_for_igvn(io);
1227 
1228   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1229   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1230 
1231   // Nullable
1232   kit->set_control(kit->IfFalse(iff_null_free));
1233   if (!kit->stopped()) {
1234     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1235     kit->set_all_memory(input_memory_state);
1236     Node* cast = kit->cast_to_flat_array_exact(base, vk, false, true);
1237     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1238     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1239 
1240     region->init_req(1, kit->control());
1241     mem->set_req(1, kit->reset_memory());
1242     io->set_req(1, kit->i_o());
1243   }
1244 
1245   // Null-free
1246   kit->set_control(kit->IfTrue(iff_null_free));
1247   if (!kit->stopped()) {
1248     kit->set_all_memory(input_memory_state);
1249 
1250     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1251     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1252 
1253     // Atomic
1254     kit->set_control(kit->IfTrue(iff_atomic));
1255     if (!kit->stopped()) {
1256       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1257       kit->set_all_memory(input_memory_state);
1258       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, true);
1259       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1260       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1261 
1262       region->init_req(2, kit->control());
1263       mem->set_req(2, kit->reset_memory());
1264       io->set_req(2, kit->i_o());
1265     }
1266 
1267     // Non-Atomic
1268     kit->set_control(kit->IfFalse(iff_atomic));
1269     if (!kit->stopped()) {
1270       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1271       kit->set_all_memory(input_memory_state);
1272       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, false);
1273       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1274       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1275 
1276       region->init_req(3, kit->control());
1277       mem->set_req(3, kit->reset_memory());
1278       io->set_req(3, kit->i_o());
1279     }
1280   }
1281 
1282   InlineTypeNode* vt = nullptr;
1283   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1284     // All paths are dead
1285     vt = make_null(gvn, vk);
1286   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1287     vt = vt_non_atomic;
1288   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1289     vt = vt_null_free;
1290   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1291     vt = vt_nullable;
1292   }
1293   if (vt != nullptr) {
1294     kit->set_control(kit->gvn().transform(region));
1295     kit->set_all_memory(kit->gvn().transform(mem));
1296     kit->set_i_o(kit->gvn().transform(io));
1297     return vt;
1298   }
1299 
1300   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1301   vt = zero->clone_with_phis(&gvn, region);
1302   if (vt_nullable != nullptr) {
1303     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1304   }
1305   if (vt_null_free != nullptr) {
1306     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1307   }
1308   if (vt_non_atomic != nullptr) {
1309     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1310   }
1311 
1312   kit->set_control(kit->gvn().transform(region));
1313   kit->set_all_memory(kit->gvn().transform(mem));
1314   kit->set_i_o(kit->gvn().transform(io));
1315   return gvn.transform(vt)->as_InlineType();
1316 }
1317 
1318 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1319   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1320   if (!in) {
1321     // Keep track of the oop. The returned inline type might already be buffered.
1322     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1323     vt->set_oop(kit->gvn(), oop);
1324   }
1325   GrowableArray<ciType*> visited;
1326   visited.push(vk);
1327   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1328   return kit->gvn().transform(vt)->as_InlineType();
1329 }
1330 
1331 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1332   if (vk == nullptr) {
1333     vk = inline_klass();
1334   }
1335   for (uint i = 0; i < field_count(); ++i) {
1336     int offset = holder_offset + field_offset(i);
1337     Node* value = field_value(i);
1338     if (value->is_InlineType()) {
1339       InlineTypeNode* vt = value->as_InlineType();
1340       if (vt->type()->inline_klass()->is_empty()) {
1341         continue;
1342       } else if (field_is_flat(i) && vt->is_InlineType()) {
1343         // Check inline type field load recursively
1344         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1345         if (base == nullptr) {
1346           return nullptr;
1347         }
1348         continue;
1349       } else {
1350         value = vt->get_oop();
1351         if (value->Opcode() == Op_CastPP) {
1352           // Skip CastPP
1353           value = value->in(1);
1354         }
1355       }
1356     }
1357     if (value->isa_DecodeN()) {
1358       // Skip DecodeN
1359       value = value->in(1);
1360     }
1361     if (value->isa_Load()) {
1362       // Check if base and offset of field load matches inline type layout
1363       intptr_t loffset = 0;
1364       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1365       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1366         return nullptr;
1367       } else if (base == nullptr) {
1368         // Set base and check if pointer type matches
1369         base = lbase;
1370         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1371         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1372           return nullptr;
1373         }
1374       }
1375     } else {
1376       return nullptr;
1377     }
1378   }
1379   return base;
1380 }
1381 
1382 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1383   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1384   intptr_t bits = tk->get_con();
1385   set_nth_bit(bits, 0);
1386   return gvn.longcon((jlong)bits);
1387 }
1388 
1389 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1390   if (!null_free && in) {
1391     n->init_req(base_input++, get_null_marker());
1392   }
1393   for (uint i = 0; i < field_count(); i++) {
1394     Node* arg = field_value(i);
1395     if (field_is_flat(i)) {
1396       // Flat inline type field
1397       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1398       if (!field_is_null_free(i)) {
1399         assert(field_null_marker_offset(i) != -1, "inconsistency");
1400         n->init_req(base_input++, arg->as_InlineType()->get_null_marker());
1401       }
1402     } else {
1403       if (arg->is_InlineType()) {
1404         // Non-flat inline type field
1405         InlineTypeNode* vt = arg->as_InlineType();
1406         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1407         arg = vt->buffer(kit);
1408       }
1409       // Initialize call/return arguments
1410       n->init_req(base_input++, arg);
1411       if (field_type(i)->size() == 2) {
1412         n->init_req(base_input++, kit->top());
1413       }
1414     }
1415   }
1416   // The last argument is used to pass the null marker to compiled code and not required here.
1417   if (!null_free && !in) {
1418     n->init_req(base_input++, kit->top());
1419   }
1420 }
1421 
1422 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1423   PhaseGVN& gvn = kit->gvn();
1424   Node* null_marker = nullptr;
1425   if (!null_free) {
1426     // Nullable inline type
1427     if (in) {
1428       // Set null marker
1429       if (multi->is_Start()) {
1430         null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1431       } else {
1432         null_marker = multi->as_Call()->in(base_input);
1433       }
1434       set_req(NullMarker, null_marker);
1435       base_input++;
1436     }
1437     // Add a null check to make subsequent loads dependent on
1438     assert(null_check_region == nullptr, "already set");
1439     if (null_marker == nullptr) {
1440       // Will only be initialized below, use dummy node for now
1441       null_marker = new Node(1);
1442       null_marker->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1443       gvn.set_type_bottom(null_marker);
1444     }
1445     Node* null_ctrl = kit->top();
1446     kit->null_check_common(null_marker, T_INT, false, &null_ctrl);
1447     Node* non_null_ctrl = kit->control();
1448     null_check_region = new RegionNode(3);
1449     null_check_region->init_req(1, non_null_ctrl);
1450     null_check_region->init_req(2, null_ctrl);
1451     null_check_region = gvn.transform(null_check_region);
1452     kit->set_control(null_check_region);
1453   }
1454 
1455   for (uint i = 0; i < field_count(); ++i) {
1456     ciType* type = field_type(i);
1457     Node* parm = nullptr;
1458     if (field_is_flat(i)) {
1459       // Flat inline type field
1460       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i));
1461       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1462       if (!field_is_null_free(i)) {
1463         assert(field_null_marker_offset(i) != -1, "inconsistency");
1464         Node* null_marker = nullptr;
1465         if (multi->is_Start()) {
1466           null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1467         } else if (in) {
1468           null_marker = multi->as_Call()->in(base_input);
1469         } else {
1470           null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1471         }
1472         vt->set_req(NullMarker, null_marker);
1473         base_input++;
1474       }
1475       parm = gvn.transform(vt);
1476     } else {
1477       if (multi->is_Start()) {
1478         assert(in, "return from start?");
1479         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1480       } else if (in) {
1481         parm = multi->as_Call()->in(base_input);
1482       } else {
1483         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1484       }
1485       bool null_free = field_is_null_free(i);
1486       // Non-flat inline type field
1487       if (type->is_inlinetype()) {
1488         if (null_check_region != nullptr) {
1489           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1490           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1491           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1492           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1493             parm = parm->as_InlineType()->get_oop();
1494           }
1495           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1496           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1497           parm->set_req(2, kit->zerocon(T_OBJECT));
1498           parm = gvn.transform(parm);
1499           null_free = false;
1500         }
1501         if (visited.contains(type)) {
1502           kit->C->set_has_circular_inline_type(true);
1503         } else if (!parm->is_InlineType()) {
1504           int old_len = visited.length();
1505           visited.push(type);
1506           if (null_free) {
1507             parm = kit->cast_not_null(parm);
1508           }
1509           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1510           visited.trunc_to(old_len);
1511         }
1512       }
1513       base_input += type->size();
1514     }
1515     assert(parm != nullptr, "should never be null");
1516     assert(field_value(i) == nullptr, "already set");
1517     set_field_value(i, parm);
1518     gvn.record_for_igvn(parm);
1519   }
1520   // The last argument is used to pass the null marker to compiled code
1521   if (!null_free && !in) {
1522     Node* cmp = null_marker->raw_out(0);
1523     null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1524     set_req(NullMarker, null_marker);
1525     gvn.hash_delete(cmp);
1526     cmp->set_req(1, null_marker);
1527     gvn.hash_find_insert(cmp);
1528     gvn.record_for_igvn(cmp);
1529     base_input++;
1530   }
1531 }
1532 
1533 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1534 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1535 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1536   PhaseIterGVN* igvn = &phase->igvn();
1537   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1538   // will be removed anyway and changing the memory chain will confuse other optimizations.
1539   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1540     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1541     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1542       Node* res = alloc->result_cast();
1543       if (res == nullptr || !res->is_CheckCastPP()) {
1544         break; // No unique CheckCastPP
1545       }
1546       // Search for a dominating allocation of the same inline type
1547       Node* res_dom = res;
1548       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1549         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1550         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1551           Node* res_other = alloc_other->result_cast();
1552           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1553               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1554             res_dom = res_other;
1555           }
1556         }
1557       }
1558       if (res_dom != res) {
1559         // Replace allocation by dominating one.
1560         replace_allocation(igvn, res, res_dom);
1561         // The result of the dominated allocation is now unused and will be removed
1562         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1563         igvn->_worklist.push(alloc);
1564       }
1565     }
1566   }
1567 }
1568 
1569 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1570   GrowableArray<ciType*> visited;
1571   visited.push(vk);
1572   return make_null_impl(gvn, vk, visited, transform);
1573 }
1574 
1575 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1576   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1577   vt->set_is_buffered(gvn);
1578   vt->set_null_marker(gvn, gvn.intcon(0));
1579   for (uint i = 0; i < vt->field_count(); i++) {
1580     ciType* ft = vt->field_type(i);
1581     Node* value = gvn.zerocon(ft->basic_type());
1582     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1583       gvn.C->set_has_circular_inline_type(true);
1584     } else if (ft->is_inlinetype()) {
1585       int old_len = visited.length();
1586       visited.push(ft);
1587       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1588       visited.trunc_to(old_len);
1589     }
1590     vt->set_field_value(i, value);
1591   }
1592   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1593 }
1594 
1595 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1596   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1597     return clone()->as_InlineType();
1598   }
1599   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1600     if (fast_out(i) != map) {
1601       return clone()->as_InlineType();
1602     }
1603   }
1604   gvn->hash_delete(this);
1605   return this;
1606 }
1607 
1608 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1609   Node* oop = get_oop();
1610   const Type* toop = phase->type(oop);
1611 #ifdef ASSERT
1612   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1613     // We are not allocated (anymore) and should therefore not have an instance id
1614     dump(1);
1615     assert(false, "Unbuffered inline type should not have known instance id");
1616   }
1617 #endif
1618   if (toop == Type::TOP) {
1619     return Type::TOP;
1620   }
1621   const Type* t = toop->filter_speculative(_type);
1622   if (t->singleton()) {
1623     // Don't replace InlineType by a constant
1624     t = _type;
1625   }
1626   const Type* tinit = phase->type(in(NullMarker));
1627   if (tinit == Type::TOP) {
1628     return Type::TOP;
1629   }
1630   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1631     t = t->join_speculative(TypePtr::NOTNULL);
1632   }
1633   return t;
1634 }
1635 
1636 InlineTypeNode* LoadFlatNode::load(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool null_free, bool trust_null_free_oop, DecoratorSet decorators) {
1637   int output_type_size = vk->nof_nonstatic_fields() + (null_free ? 0 : 1);
1638   const Type** output_types = TypeTuple::fields(output_type_size);
1639   collect_field_types(vk, output_types + TypeFunc::Parms, 0, output_type_size, null_free, trust_null_free_oop);
1640   const TypeTuple* type = TypeTuple::make(output_type_size + TypeFunc::Parms, output_types);
1641 
1642   LoadFlatNode* load = new LoadFlatNode(vk, type, null_free, decorators);
1643   load->init_req(TypeFunc::Control, kit->control());
1644   load->init_req(TypeFunc::I_O, kit->top());
1645   load->init_req(TypeFunc::Memory, kit->reset_memory());
1646   load->init_req(TypeFunc::FramePtr, kit->frameptr());
1647   load->init_req(TypeFunc::ReturnAdr, kit->top());
1648 
1649   load->init_req(TypeFunc::Parms, base);
1650   load->init_req(TypeFunc::Parms + 1, ptr);
1651   kit->kill_dead_locals();
1652   kit->add_safepoint_edges(load);
1653   load = kit->gvn().transform(load)->as_LoadFlat();
1654   kit->record_for_igvn(load);
1655 
1656   kit->set_control(kit->gvn().transform(new ProjNode(load, TypeFunc::Control)));
1657   kit->set_all_memory(kit->gvn().transform(new ProjNode(load, TypeFunc::Memory)));
1658   return load->collect_projs(kit, vk, TypeFunc::Parms, null_free);
1659 }
1660 
1661 bool LoadFlatNode::expand_non_atomic(PhaseIterGVN& igvn) {
1662   assert(igvn.delay_transform(), "transformation must be delayed");
1663   if ((_decorators & C2_MISMATCHED) != 0) {
1664     return false;
1665   }
1666 
1667   GraphKit kit(jvms(), &igvn);
1668   kit.set_all_memory(kit.reset_memory());
1669 
1670   Node* base = this->base();
1671   Node* ptr = this->ptr();
1672 
1673   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1674     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1675     if (proj_out == nullptr) {
1676       continue;
1677     }
1678 
1679     ciField* field = _vk->nonstatic_field_at(i);
1680     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - _vk->payload_offset());
1681     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1682     igvn.set_type(field_ptr, field_ptr_type);
1683 
1684     Node* field_value = kit.access_load_at(base, field_ptr, field_ptr_type, igvn.type(proj_out), field->type()->basic_type(), _decorators);
1685     igvn.replace_node(proj_out, field_value);
1686   }
1687 
1688   if (!_null_free) {
1689     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1690     if (proj_out != nullptr) {
1691       Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, _vk->null_marker_offset_in_payload());
1692       const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1693       igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1694       Node* null_marker_value = kit.access_load_at(base, null_marker_ptr, null_marker_ptr_type, TypeInt::BOOL, T_BOOLEAN, _decorators);
1695       igvn.replace_node(proj_out, null_marker_value);
1696     }
1697   }
1698 
1699   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1700   if (old_ctrl != nullptr) {
1701     igvn.replace_node(old_ctrl, kit.control());
1702   }
1703   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1704   Node* new_mem = kit.reset_memory();
1705   if (old_mem != nullptr) {
1706     igvn.replace_node(old_mem, new_mem);
1707   }
1708   return true;
1709 }
1710 
1711 void LoadFlatNode::expand_atomic(PhaseIterGVN& igvn) {
1712   assert(igvn.delay_transform(), "transformation must be delayed");
1713   GraphKit kit(jvms(), &igvn);
1714   kit.set_all_memory(kit.reset_memory());
1715 
1716   Node* base = this->base();
1717   Node* ptr = this->ptr();
1718 
1719   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1720   kit.insert_mem_bar(Op_MemBarCPUOrder);
1721   Node* payload = kit.access_load_at(base, ptr, TypeRawPtr::BOTTOM, Type::get_const_basic_type(payload_bt), payload_bt,
1722                                      _decorators | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD, kit.control());
1723   kit.insert_mem_bar(Op_MemBarCPUOrder);
1724 
1725   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1726   if (old_ctrl != nullptr) {
1727     igvn.replace_node(old_ctrl, kit.control());
1728   }
1729   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1730   Node* new_mem = kit.reset_memory();
1731   if (old_mem != nullptr) {
1732     igvn.replace_node(old_mem, new_mem);
1733   }
1734 
1735   expand_projs_atomic(igvn, kit.control(), payload);
1736 }
1737 
1738 void LoadFlatNode::collect_field_types(ciInlineKlass* vk, const Type** field_types, int idx, int limit, bool null_free, bool trust_null_free_oop) {
1739   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1740   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1741     ciField* field = vk->declared_nonstatic_field_at(i);
1742     if (field->is_flat()) {
1743       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1744       collect_field_types(field_klass, field_types, idx, limit, field->is_null_free(), trust_null_free_oop && field->is_null_free());
1745       idx += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1746       continue;
1747     }
1748 
1749     const Type* field_type = Type::get_const_type(field->type());
1750     if (trust_null_free_oop && field->is_null_free()) {
1751       field_type = field_type->filter(TypePtr::NOTNULL);
1752     }
1753 
1754     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1755     field_types[idx] = field_type;
1756     idx++;
1757   }
1758 
1759   if (!null_free) {
1760     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1761     field_types[idx] = TypeInt::BOOL;
1762   }
1763 }
1764 
1765 // Create an InlineTypeNode from a LoadFlatNode with its fields being extracted from the
1766 // LoadFlatNode
1767 InlineTypeNode* LoadFlatNode::collect_projs(GraphKit* kit, ciInlineKlass* vk, int proj_con, bool null_free) {
1768   PhaseGVN& gvn = kit->gvn();
1769   InlineTypeNode* res = InlineTypeNode::make_uninitialized(gvn, vk, null_free);
1770   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1771     ciField* field = vk->declared_nonstatic_field_at(i);
1772     Node* field_value;
1773     if (field->is_flat()) {
1774       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1775       field_value = collect_projs(kit, field_klass, proj_con, field->is_null_free());
1776       proj_con += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1777     } else {
1778       field_value = gvn.transform(new ProjNode(this, proj_con));
1779       if (field->type()->is_inlinetype()) {
1780         field_value = InlineTypeNode::make_from_oop(kit, field_value, field->type()->as_inline_klass());
1781       }
1782       proj_con++;
1783     }
1784     res->set_field_value(i, field_value);
1785   }
1786 
1787   if (null_free) {
1788     res->set_null_marker(gvn);
1789   } else {
1790     res->set_null_marker(gvn, gvn.transform(new ProjNode(this, proj_con)));
1791   }
1792   return gvn.transform(res)->as_InlineType();
1793 }
1794 
1795 // Extract the values of the flattened fields from the loaded payload
1796 void LoadFlatNode::expand_projs_atomic(PhaseIterGVN& igvn, Node* ctrl, Node* payload) {
1797   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1798   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1799     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1800     if (proj_out == nullptr) {
1801       continue;
1802     }
1803 
1804     ciField* field = _vk->nonstatic_field_at(i);
1805     int field_offset = field->offset_in_bytes() - _vk->payload_offset();
1806     const Type* field_type = igvn.type(proj_out);
1807     Node* field_value = get_payload_value(igvn, ctrl, payload_bt, payload, field_type, field->type()->basic_type(), field_offset);
1808     igvn.replace_node(proj_out, field_value);
1809   }
1810 
1811   if (!_null_free) {
1812     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1813     if (proj_out == nullptr) {
1814       return;
1815     }
1816 
1817     int null_marker_offset = _vk->null_marker_offset_in_payload();
1818     Node* null_marker_value = get_payload_value(igvn, ctrl, payload_bt, payload, TypeInt::BOOL, T_BOOLEAN, null_marker_offset);
1819     igvn.replace_node(proj_out, null_marker_value);
1820   }
1821 }
1822 
1823 Node* LoadFlatNode::get_payload_value(PhaseIterGVN& igvn, Node* ctrl, BasicType payload_bt, Node* payload, const Type* value_type, BasicType value_bt, int offset) {
1824   assert((offset + type2aelembytes(value_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
1825   Node* value = nullptr;
1826   // Shift to the right position in the long value
1827   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
1828   if (payload_bt == T_LONG) {
1829     value = igvn.transform(new URShiftLNode(payload, shift_val));
1830     value = igvn.transform(new ConvL2INode(value));
1831   } else {
1832     value = igvn.transform(new URShiftINode(payload, shift_val));
1833   }
1834 
1835   if (value_bt == T_INT) {
1836     return value;
1837   } else if (!is_java_primitive(value_bt)) {
1838     assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1839     value = igvn.transform(new CastI2NNode(ctrl, value, value_type->make_narrowoop()));
1840     value = igvn.transform(new DecodeNNode(value, value_type));
1841 
1842     // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
1843     // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
1844     // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
1845     // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
1846     return value;
1847   } else {
1848     // Make sure to zero unused bits in the 32-bit value
1849     return Compile::narrow_value(value_bt, value, nullptr, &igvn, true);
1850   }
1851 }
1852 
1853 void StoreFlatNode::store(GraphKit* kit, Node* base, Node* ptr, InlineTypeNode* value, bool null_free, DecoratorSet decorators) {
1854   value = value->allocate_fields(kit);
1855   StoreFlatNode* store = new StoreFlatNode(null_free, decorators);
1856   store->init_req(TypeFunc::Control, kit->control());
1857   store->init_req(TypeFunc::I_O, kit->top());
1858   store->init_req(TypeFunc::Memory, kit->reset_memory());
1859   store->init_req(TypeFunc::FramePtr, kit->frameptr());
1860   store->init_req(TypeFunc::ReturnAdr, kit->top());
1861 
1862   store->init_req(TypeFunc::Parms, base);
1863   store->init_req(TypeFunc::Parms + 1, ptr);
1864   store->init_req(TypeFunc::Parms + 2, value);
1865   kit->kill_dead_locals();
1866   kit->add_safepoint_edges(store);
1867   store = kit->gvn().transform(store)->as_StoreFlat();
1868   kit->record_for_igvn(store);
1869 
1870   kit->set_control(kit->gvn().transform(new ProjNode(store, TypeFunc::Control)));
1871   kit->set_all_memory(kit->gvn().transform(new ProjNode(store, TypeFunc::Memory)));
1872 }
1873 
1874 bool StoreFlatNode::expand_non_atomic(PhaseIterGVN& igvn) {
1875   assert(igvn.delay_transform(), "transformation must be delayed");
1876   if ((_decorators & C2_MISMATCHED) != 0) {
1877     return false;
1878   }
1879 
1880   GraphKit kit(jvms(), &igvn);
1881   kit.set_all_memory(kit.reset_memory());
1882 
1883   Node* base = this->base();
1884   Node* ptr = this->ptr();
1885   InlineTypeNode* value = this->value();
1886 
1887   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1888   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1889     ciField* field = vk->nonstatic_field_at(i);
1890     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - vk->payload_offset());
1891     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1892     igvn.set_type(field_ptr, field_ptr_type);
1893     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1894     Node* store = kit.access_store_at(base, field_ptr, field_ptr_type, field_value, igvn.type(field_value), field->type()->basic_type(), _decorators);
1895   }
1896 
1897   if (!_null_free) {
1898     Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, vk->null_marker_offset_in_payload());
1899     const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1900     igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1901     Node* null_marker_value = value->get_null_marker();
1902     Node* store = kit.access_store_at(base, null_marker_ptr, null_marker_ptr_type, null_marker_value, TypeInt::BOOL, T_BOOLEAN, _decorators);
1903   }
1904 
1905   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1906   if (old_ctrl != nullptr) {
1907     igvn.replace_node(old_ctrl, kit.control());
1908   }
1909   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1910   Node* new_mem = kit.reset_memory();
1911   if (old_mem != nullptr) {
1912     igvn.replace_node(old_mem, new_mem);
1913   }
1914   return true;
1915 }
1916 
1917 void StoreFlatNode::expand_atomic(PhaseIterGVN& igvn) {
1918   // Convert to a payload value <= 64-bit and write atomically.
1919   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
1920   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
1921   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
1922   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
1923   assert(igvn.delay_transform(), "transformation must be delayed");
1924   GraphKit kit(jvms(), &igvn);
1925   kit.set_all_memory(kit.reset_memory());
1926 
1927   Node* base = this->base();
1928   Node* ptr = this->ptr();
1929   InlineTypeNode* value = this->value();
1930 
1931   int oop_off_1 = -1;
1932   int oop_off_2 = -1;
1933   Node* payload = convert_to_payload(igvn, kit.control(), value, _null_free, oop_off_1, oop_off_2);
1934 
1935   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1936   BasicType payload_bt = vk->atomic_size_to_basic_type(_null_free);
1937   kit.insert_mem_bar(Op_MemBarCPUOrder);
1938   if (!UseG1GC || oop_off_1 == -1) {
1939     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
1940     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
1941     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1942     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
1943     kit.access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, Type::get_const_basic_type(payload_bt), payload_bt, _decorators | C2_MISMATCHED, true, value);
1944   } else {
1945     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
1946     assert(UseG1GC, "Unexpected GC");
1947     assert(payload_bt == T_LONG, "Unexpected payload type");
1948     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
1949     Node* oop_offset = (oop_off_2 == -1) ? igvn.intcon(oop_off_1) : nullptr;
1950     Node* mem = kit.reset_memory();
1951     kit.set_all_memory(mem);
1952     Node* store = igvn.transform(new StoreLSpecialNode(kit.control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
1953     kit.set_memory(store, TypeRawPtr::BOTTOM);
1954   }
1955   kit.insert_mem_bar(Op_MemBarCPUOrder);
1956 
1957   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1958   if (old_ctrl != nullptr) {
1959     igvn.replace_node(old_ctrl, kit.control());
1960   }
1961   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1962   Node* new_mem = kit.reset_memory();
1963   if (old_mem != nullptr) {
1964     igvn.replace_node(old_mem, new_mem);
1965   }
1966 }
1967 
1968 // Convert the field values to a payload value of type 'bt'
1969 Node* StoreFlatNode::convert_to_payload(PhaseIterGVN& igvn, Node* ctrl, InlineTypeNode* value, bool null_free, int& oop_off_1, int& oop_off_2) {
1970   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1971   BasicType payload_bt = vk->atomic_size_to_basic_type(null_free);
1972   Node* payload = igvn.zerocon(payload_bt);
1973   if (!null_free) {
1974     // Set the null marker
1975     payload = set_payload_value(igvn, payload_bt, payload, T_BOOLEAN, value->get_null_marker(), vk->null_marker_offset_in_payload());
1976   }
1977 
1978   // Iterate over the fields and add their values to the payload
1979   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1980     ciField* field = vk->nonstatic_field_at(i);
1981     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1982     ciType* field_klass = field->type();
1983     BasicType field_bt = field_klass->basic_type();
1984     int field_offset_in_payload = field->offset_in_bytes() - vk->payload_offset();
1985     if (!field_klass->is_primitive_type()) {
1986       // Narrow oop field
1987       assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1988       if (oop_off_1 == -1) {
1989         oop_off_1 = field_offset_in_payload;
1990       } else {
1991         assert(oop_off_2 == -1, "already set");
1992         oop_off_2 = field_offset_in_payload;
1993       }
1994 
1995       const Type* val_type = Type::get_const_type(field_klass)->make_narrowoop();
1996       if (field_value->is_InlineType()) {
1997         assert(field_value->as_InlineType()->is_allocated(&igvn), "must be allocated");
1998       }
1999 
2000       field_value = igvn.transform(new EncodePNode(field_value, val_type));
2001       field_value = igvn.transform(new CastP2XNode(ctrl, field_value));
2002       field_value = igvn.transform(new ConvL2INode(field_value));
2003       field_bt = T_INT;
2004     }
2005     payload = set_payload_value(igvn, payload_bt, payload, field_bt, field_value, field_offset_in_payload);
2006   }
2007 
2008   return payload;
2009 }
2010 
2011 Node* StoreFlatNode::set_payload_value(PhaseIterGVN& igvn, BasicType payload_bt, Node* payload, BasicType val_bt, Node* value, int offset) {
2012   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
2013 
2014   // Make sure to zero unused bits in the 32-bit value
2015   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
2016     value = igvn.transform(new AndINode(value, igvn.intcon(0xFF)));
2017   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
2018     value = igvn.transform(new AndINode(value, igvn.intcon(0xFFFF)));
2019   } else if (val_bt == T_FLOAT) {
2020     value = igvn.transform(new MoveF2INode(value));
2021   } else {
2022     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
2023   }
2024 
2025   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
2026   if (payload_bt == T_LONG) {
2027     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
2028     value = igvn.transform(new ConvI2LNode(value));
2029     value = igvn.transform(new AndLNode(value, igvn.longcon(0xFFFFFFFF)));
2030 
2031     Node* shift_value = igvn.transform(new LShiftLNode(value, shift_val));
2032     payload = new OrLNode(shift_value, payload);
2033   } else {
2034     Node* shift_value = igvn.transform(new LShiftINode(value, shift_val));
2035     payload = new OrINode(shift_value, payload);
2036   }
2037   return igvn.transform(payload);
2038 }
2039 
2040 const Type* LoadFlatNode::Value(PhaseGVN* phase) const {
2041   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
2042       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP) {
2043     return Type::TOP;
2044   }
2045   return bottom_type();
2046 }
2047 
2048 const Type* StoreFlatNode::Value(PhaseGVN* phase) const {
2049   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
2050       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP || phase->type(value()) == Type::TOP) {
2051     return Type::TOP;
2052   }
2053   return bottom_type();
2054 }