1 /*
   2  * Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/convertnode.hpp"
  34 #include "opto/graphKit.hpp"
  35 #include "opto/inlinetypenode.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/multnode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/opcodes.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/type.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 
  46 // Clones the inline type to handle control flow merges involving multiple inline types.
  47 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  48 // init_with_top: input of phis above the returned InlineTypeNode are initialized to top.
  49 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_non_null, bool init_with_top) {
  50   InlineTypeNode* vt = clone_if_required(gvn, map);
  51   const Type* t = Type::get_const_type(inline_klass());
  52   gvn->set_type(vt, t);
  53   vt->as_InlineType()->set_type(t);
  54 
  55   Node* const top = gvn->C->top();
  56 
  57   // Create a PhiNode for merging the oop values
  58   PhiNode* oop = PhiNode::make(region, init_with_top ? top : vt->get_oop(), t);
  59   gvn->set_type(oop, t);
  60   gvn->record_for_igvn(oop);
  61   vt->set_oop(*gvn, oop);
  62 
  63   // Create a PhiNode for merging the is_buffered values
  64   t = Type::get_const_basic_type(T_BOOLEAN);
  65   Node* is_buffered_node = PhiNode::make(region, init_with_top ? top : vt->get_is_buffered(), t);
  66   gvn->set_type(is_buffered_node, t);
  67   gvn->record_for_igvn(is_buffered_node);
  68   vt->set_req(IsBuffered, is_buffered_node);
  69 
  70   // Create a PhiNode for merging the null_marker values
  71   Node* null_marker_node;
  72   if (is_non_null) {
  73     null_marker_node = gvn->intcon(1);
  74   } else {
  75     t = Type::get_const_basic_type(T_BOOLEAN);
  76     null_marker_node = PhiNode::make(region, init_with_top ? top : vt->get_null_marker(), t);
  77     gvn->set_type(null_marker_node, t);
  78     gvn->record_for_igvn(null_marker_node);
  79   }
  80   vt->set_req(NullMarker, null_marker_node);
  81 
  82   // Create a PhiNode each for merging the field values
  83   for (uint i = 0; i < vt->field_count(); ++i) {
  84     ciType* type = vt->field(i)->type();
  85     Node*  value = vt->field_value(i);
  86     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  87     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  88     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  89     ciField* field = this->field(i);
  90     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
  91     bool no_circularity = !gvn->C->has_circular_inline_type() || field->is_flat();
  92     if (type->is_inlinetype() && no_circularity) {
  93       // Handle inline type fields recursively
  94       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  95     } else {
  96       t = Type::get_const_type(type);
  97       value = PhiNode::make(region, init_with_top ? top : value, t);
  98       gvn->set_type(value, t);
  99       gvn->record_for_igvn(value);
 100     }
 101     vt->set_field_value(i, value);
 102   }
 103   gvn->record_for_igvn(vt);
 104   return vt;
 105 }
 106 
 107 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 108 // for the given region (see InlineTypeNode::clone_with_phis).
 109 bool InlineTypeNode::has_phi_inputs(Node* region) {
 110   // Check oop input
 111   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 112 #ifdef ASSERT
 113   if (result) {
 114     // Check all field value inputs for consistency
 115     for (uint i = 0; i < field_count(); ++i) {
 116       Node* n = field_value(i);
 117       if (n->is_InlineType()) {
 118         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 119       } else {
 120         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 121       }
 122     }
 123   }
 124 #endif
 125   return result;
 126 }
 127 
 128 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 129 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int phi_index, bool transform) {
 130   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 131 
 132   // Merge oop inputs
 133   PhiNode* phi = get_oop()->as_Phi();
 134   phi->set_req(phi_index, other->get_oop());
 135   if (transform) {
 136     set_oop(*gvn, gvn->transform(phi));
 137   }
 138 
 139   // Merge is_buffered inputs
 140   phi = get_is_buffered()->as_Phi();
 141   phi->set_req(phi_index, other->get_is_buffered());
 142   if (transform) {
 143     set_req(IsBuffered, gvn->transform(phi));
 144   }
 145 
 146   // Merge null_marker inputs
 147   Node* null_marker = get_null_marker();
 148   if (null_marker->is_Phi()) {
 149     phi = null_marker->as_Phi();
 150     phi->set_req(phi_index, other->get_null_marker());
 151     if (transform) {
 152       set_req(NullMarker, gvn->transform(phi));
 153     }
 154   } else {
 155     assert(null_marker->find_int_con(0) == 1, "only with a non null inline type");
 156   }
 157 
 158   // Merge field values
 159   for (uint i = 0; i < field_count(); ++i) {
 160     Node* val1 =        field_value(i);
 161     Node* val2 = other->field_value(i);
 162     if (val1->is_InlineType()) {
 163       if (val2->is_Phi()) {
 164         val2 = gvn->transform(val2);
 165       }
 166       if (val2->is_top()) {
 167         // The path where 'other' is used is dying. Therefore, we do not need to process the merge with 'other' further.
 168         // The phi inputs of 'this' at 'phi_index' will eventually be removed.
 169         break;
 170       }
 171       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), phi_index, transform);
 172     } else {
 173       assert(val1->is_Phi(), "must be a phi node");
 174       val1->set_req(phi_index, val2);
 175     }
 176     if (transform) {
 177       set_field_value(i, gvn->transform(val1));
 178     }
 179   }
 180   return this;
 181 }
 182 
 183 // Adds a new merge path to an inline type node with phi inputs
 184 void InlineTypeNode::add_new_path(Node* region) {
 185   assert(has_phi_inputs(region), "must have phi inputs");
 186 
 187   PhiNode* phi = get_oop()->as_Phi();
 188   phi->add_req(nullptr);
 189   assert(phi->req() == region->req(), "must be same size as region");
 190 
 191   phi = get_is_buffered()->as_Phi();
 192   phi->add_req(nullptr);
 193   assert(phi->req() == region->req(), "must be same size as region");
 194 
 195   phi = get_null_marker()->as_Phi();
 196   phi->add_req(nullptr);
 197   assert(phi->req() == region->req(), "must be same size as region");
 198 
 199   for (uint i = 0; i < field_count(); ++i) {
 200     Node* val = field_value(i);
 201     if (val->is_InlineType()) {
 202       val->as_InlineType()->add_new_path(region);
 203     } else {
 204       val->as_Phi()->add_req(nullptr);
 205       assert(val->req() == region->req(), "must be same size as region");
 206     }
 207   }
 208 }
 209 
 210 Node* InlineTypeNode::field_value(uint index) const {
 211   assert(index < field_count(), "index out of bounds");
 212   return in(Values + index);
 213 }
 214 
 215 // Get the value of the field at the given offset.
 216 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 217 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 218   // Find the declared field which contains the field we are looking for
 219   int index = inline_klass()->field_index_by_offset(offset);
 220   Node* value = field_value(index);
 221   assert(value != nullptr, "field value not found");
 222   ciField* field = this->field(index);
 223   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 224 
 225   if (!recursive || !field->is_flat() || value->is_top()) {
 226     assert(offset == field->offset_in_bytes(), "offset mismatch");
 227     return value;
 228   }
 229 
 230   // Flat inline type field
 231   InlineTypeNode* vt = value->as_InlineType();
 232   assert(field->is_flat(), "must be flat");
 233   if (offset == field->null_marker_offset()) {
 234     return vt->get_null_marker();
 235   } else {
 236     int sub_offset = offset - field->offset_in_bytes(); // Offset of the flattened field inside the declared field
 237     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 238     return vt->field_value_by_offset(sub_offset, recursive);
 239   }
 240 }
 241 
 242 void InlineTypeNode::set_field_value(uint index, Node* value) {
 243   assert(index < field_count(), "index out of bounds");
 244   set_req(Values + index, value);
 245 }
 246 
 247 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 248   set_field_value(field_index(offset), value);
 249 }
 250 
 251 uint InlineTypeNode::field_index(int offset) const {
 252   uint i = 0;
 253   for (; i < field_count() && field(i)->offset_in_bytes() != offset; i++) { }
 254   assert(i < field_count(), "field not found");
 255   return i;
 256 }
 257 
 258 ciField* InlineTypeNode::field(uint index) const {
 259   assert(index < field_count(), "index out of bounds");
 260   return inline_klass()->declared_nonstatic_field_at(index);
 261 }
 262 
 263 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) {
 264   uint cnt = 0;
 265   for (uint i = 0; i < field_count(); ++i) {
 266     Node* value = field_value(i);
 267     ciField* field = this->field(i);
 268     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 269     if (field->is_flat()) {
 270       InlineTypeNode* vt = value->as_InlineType();
 271       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 272       if (!field->is_null_free()) {
 273         // The null marker of a flat field is added right after we scalarize that field
 274         sfpt->add_req(vt->get_null_marker());
 275         cnt++;
 276       }
 277       continue;
 278     }
 279     if (value->is_InlineType()) {
 280       // Add inline type to the worklist to process later
 281       worklist.push(value);
 282     }
 283     sfpt->add_req(value);
 284     cnt++;
 285   }
 286   return cnt;
 287 }
 288 
 289 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 290   JVMState* jvms = sfpt->jvms();
 291   assert(jvms != nullptr, "missing JVMS");
 292   uint first_ind = (sfpt->req() - jvms->scloff());
 293 
 294   // Iterate over the inline type fields in order of increasing offset and add the
 295   // field values to the safepoint. Nullable inline types have an null marker field that
 296   // needs to be checked before using the field values.
 297   sfpt->add_req(get_null_marker());
 298   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 299   jvms->set_endoff(sfpt->req());
 300   // Replace safepoint edge by SafePointScalarObjectNode
 301   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 302                                                                   nullptr,
 303                                                                   first_ind,
 304                                                                   sfpt->jvms()->depth(),
 305                                                                   nfields);
 306   sobj->init_req(0, igvn->C->root());
 307   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 308   igvn->rehash_node_delayed(sfpt);
 309   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 310     Node* debug = sfpt->in(i);
 311     if (debug != nullptr && debug->uncast() == this) {
 312       sfpt->set_req(i, sobj);
 313     }
 314   }
 315 }
 316 
 317 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 318   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 319   // in the safepoint to avoid keeping field loads live just for the debug info.
 320   Node* oop = get_oop();
 321   bool use_oop = false;
 322   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 323     Unique_Node_List worklist;
 324     VectorSet visited;
 325     visited.set(oop->_idx);
 326     worklist.push(oop);
 327     use_oop = true;
 328     while (worklist.size() > 0 && use_oop) {
 329       Node* n = worklist.pop();
 330       for (uint i = 1; i < n->req(); i++) {
 331         Node* in = n->in(i);
 332         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 333           worklist.push(in);
 334         } else if (!(in->is_Con() || in->is_Parm())) {
 335           use_oop = false;
 336           break;
 337         }
 338       }
 339     }
 340   } else {
 341     use_oop = allow_oop && is_allocated(igvn) &&
 342               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 343   }
 344 
 345   ResourceMark rm;
 346   Unique_Node_List safepoints;
 347   Unique_Node_List vt_worklist;
 348   Unique_Node_List worklist;
 349   worklist.push(this);
 350   while (worklist.size() > 0) {
 351     Node* n = worklist.pop();
 352     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 353       Node* use = n->fast_out(i);
 354       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 355         safepoints.push(use);
 356       } else if (use->is_ConstraintCast()) {
 357         worklist.push(use);
 358       }
 359     }
 360   }
 361 
 362   // Process all safepoint uses and scalarize inline type
 363   while (safepoints.size() > 0) {
 364     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 365     if (use_oop) {
 366       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 367         Node* debug = sfpt->in(i);
 368         if (debug != nullptr && debug->uncast() == this) {
 369           sfpt->set_req(i, get_oop());
 370         }
 371       }
 372       igvn->rehash_node_delayed(sfpt);
 373     } else {
 374       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 375     }
 376   }
 377   // Now scalarize non-flat fields
 378   for (uint i = 0; i < vt_worklist.size(); ++i) {
 379     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 380     vt->make_scalar_in_safepoints(igvn);
 381   }
 382   if (outcnt() == 0) {
 383     igvn->record_for_igvn(this);
 384   }
 385 }
 386 
 387 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 388 // of the same type but with different scalarization depth during GVN. This method adjusts the
 389 // scalarization depth to avoid inconsistencies during merging.
 390 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 391   if (!kit->C->has_circular_inline_type()) {
 392     return this;
 393   }
 394   GrowableArray<ciType*> visited;
 395   visited.push(inline_klass());
 396   return adjust_scalarization_depth_impl(kit, visited);
 397 }
 398 
 399 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 400   InlineTypeNode* val = this;
 401   for (uint i = 0; i < field_count(); ++i) {
 402     Node* value = field_value(i);
 403     Node* new_value = value;
 404     ciField* field = this->field(i);
 405     ciType* ft = field->type();
 406     if (value->is_InlineType()) {
 407       assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 408       if (!field->is_flat() && visited.contains(ft)) {
 409         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 410       } else {
 411         int old_len = visited.length();
 412         visited.push(ft);
 413         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 414         visited.trunc_to(old_len);
 415       }
 416     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 417       int old_len = visited.length();
 418       visited.push(ft);
 419       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 420       visited.trunc_to(old_len);
 421     }
 422     if (value != new_value) {
 423       if (val == this) {
 424         val = clone_if_required(&kit->gvn(), kit->map());
 425       }
 426       val->set_field_value(i, new_value);
 427     }
 428   }
 429   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 430 }
 431 
 432 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 433   // Initialize the inline type by loading its field values from
 434   // memory and adding the values as input edges to the node.
 435   ciInlineKlass* vk = inline_klass();
 436   for (uint i = 0; i < field_count(); ++i) {
 437     ciField* field = this->field(i);
 438     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 439     int field_off = field->offset_in_bytes() - vk->payload_offset();
 440     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 441     Node* value = nullptr;
 442     ciType* ft = field->type();
 443     bool field_null_free = field->is_null_free();
 444     if (field->is_flat()) {
 445       // Recursively load the flat inline type field
 446       ciInlineKlass* fvk = ft->as_inline_klass();
 447       bool atomic = field->is_atomic();
 448 
 449       int old_len = visited.length();
 450       visited.push(ft);
 451       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 452                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 453       visited.trunc_to(old_len);
 454     } else {
 455       // Load field value from memory
 456       BasicType bt = type2field[ft->basic_type()];
 457       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 458       const Type* val_type = Type::get_const_type(ft);
 459       if (trust_null_free_oop && field_null_free) {
 460         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 461       }
 462       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 463       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 464       // Loading a non-flattened inline type from memory
 465       if (visited.contains(ft)) {
 466         kit->C->set_has_circular_inline_type(true);
 467       } else if (ft->is_inlinetype()) {
 468         int old_len = visited.length();
 469         visited.push(ft);
 470         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 471         visited.trunc_to(old_len);
 472       }
 473     }
 474     set_field_value(i, value);
 475   }
 476 }
 477 
 478 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
 479   ciInlineKlass* vk = inline_klass();
 480   bool do_atomic = atomic;
 481   // With immutable memory, a non-atomic load and an atomic load are the same
 482   if (immutable_memory) {
 483     do_atomic = false;
 484   }
 485   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 486   if (vk->is_naturally_atomic(null_free)) {
 487     do_atomic = false;
 488   }
 489 
 490   if (!do_atomic) {
 491     if (!null_free) {
 492       int nm_offset = vk->null_marker_offset_in_payload();
 493       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 494       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 495       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_null_marker(), TypeInt::BOOL, T_BOOLEAN, decorators);
 496     }
 497     store(kit, base, ptr, immutable_memory, decorators);
 498     return;
 499   }
 500 
 501   StoreFlatNode::store(kit, base, ptr, this, null_free, decorators);
 502 }
 503 
 504 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) {
 505   PhaseGVN& gvn = kit->gvn();
 506   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 507   kit->C->set_flat_accesses();
 508   ciInlineKlass* vk = inline_klass();
 509   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 510 
 511   RegionNode* region = new RegionNode(4);
 512   gvn.set_type(region, Type::CONTROL);
 513   kit->record_for_igvn(region);
 514 
 515   Node* input_memory_state = kit->reset_memory();
 516   kit->set_all_memory(input_memory_state);
 517 
 518   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 519   gvn.set_type(mem, Type::MEMORY);
 520   kit->record_for_igvn(mem);
 521 
 522   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 523   gvn.set_type(io, Type::ABIO);
 524   kit->record_for_igvn(io);
 525 
 526   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 527   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 528 
 529   // Nullable
 530   kit->set_control(kit->IfFalse(iff_null_free));
 531   if (!kit->stopped()) {
 532     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 533     kit->set_all_memory(input_memory_state);
 534     Node* cast = kit->cast_to_flat_array_exact(base, vk, false, true);
 535     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 536     store_flat(kit, cast, ptr, true, false, false, decorators);
 537 
 538     region->init_req(1, kit->control());
 539     mem->set_req(1, kit->reset_memory());
 540     io->set_req(1, kit->i_o());
 541   }
 542 
 543   // Null-free
 544   kit->set_control(kit->IfTrue(iff_null_free));
 545   if (!kit->stopped()) {
 546     kit->set_all_memory(input_memory_state);
 547 
 548     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 549     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 550 
 551     // Atomic
 552     kit->set_control(kit->IfTrue(iff_atomic));
 553     if (!kit->stopped()) {
 554       assert(vk->has_null_free_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 555       kit->set_all_memory(input_memory_state);
 556       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, true);
 557       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 558       store_flat(kit, cast, ptr, true, false, true, decorators);
 559 
 560       region->init_req(2, kit->control());
 561       mem->set_req(2, kit->reset_memory());
 562       io->set_req(2, kit->i_o());
 563     }
 564 
 565     // Non-atomic
 566     kit->set_control(kit->IfFalse(iff_atomic));
 567     if (!kit->stopped()) {
 568       assert(vk->has_null_free_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 569       kit->set_all_memory(input_memory_state);
 570       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, false);
 571       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 572       store_flat(kit, cast, ptr, false, false, true, decorators);
 573 
 574       region->init_req(3, kit->control());
 575       mem->set_req(3, kit->reset_memory());
 576       io->set_req(3, kit->i_o());
 577     }
 578   }
 579 
 580   kit->set_control(gvn.transform(region));
 581   kit->set_all_memory(gvn.transform(mem));
 582   kit->set_i_o(gvn.transform(io));
 583 }
 584 
 585 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 586   // Write field values to memory
 587   ciInlineKlass* vk = inline_klass();
 588   for (uint i = 0; i < field_count(); ++i) {
 589     ciField* field = this->field(i);
 590     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 591     int field_off = field->offset_in_bytes() - vk->payload_offset();
 592     Node* field_val = field_value(i);
 593     bool field_null_free = field->is_null_free();
 594     ciType* ft = field->type();
 595     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 596     if (field->is_flat()) {
 597       // Recursively store the flat inline type field
 598       ciInlineKlass* fvk = ft->as_inline_klass();
 599       bool atomic = field->is_atomic();
 600 
 601       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 602     } else {
 603       // Store field value to memory
 604       BasicType bt = type2field[ft->basic_type()];
 605       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 606       const Type* val_type = Type::get_const_type(ft);
 607       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 608     }
 609   }
 610 }
 611 
 612 // Adds a check between val1 and val2. Jumps to 'region' if check passes and optionally sets the corresponding phi input to false.
 613 static void acmp_val_guard(PhaseIterGVN* igvn, RegionNode* region, Node* phi, Node** ctrl, BasicType bt, BoolTest::mask test, Node* val1, Node* val2) {
 614   Node* cmp = nullptr;
 615   switch (bt) {
 616   case T_FLOAT:
 617     val1 = igvn->register_new_node_with_optimizer(new MoveF2INode(val1));
 618     val2 = igvn->register_new_node_with_optimizer(new MoveF2INode(val2));
 619     // Fall-through to the int case
 620   case T_BOOLEAN:
 621   case T_CHAR:
 622   case T_BYTE:
 623   case T_SHORT:
 624   case T_INT:
 625     cmp = igvn->register_new_node_with_optimizer(new CmpINode(val1, val2));
 626     break;
 627   case T_DOUBLE:
 628     val1 = igvn->register_new_node_with_optimizer(new MoveD2LNode(val1));
 629     val2 = igvn->register_new_node_with_optimizer(new MoveD2LNode(val2));
 630     // Fall-through to the long case
 631   case T_LONG:
 632     cmp = igvn->register_new_node_with_optimizer(new CmpLNode(val1, val2));
 633     break;
 634   default:
 635     assert(is_reference_type(bt), "must be");
 636     cmp = igvn->register_new_node_with_optimizer(new CmpPNode(val1, val2));
 637   }
 638   Node* bol = igvn->register_new_node_with_optimizer(new BoolNode(cmp, test));
 639   IfNode* iff = igvn->register_new_node_with_optimizer(new IfNode(*ctrl, bol, PROB_MAX, COUNT_UNKNOWN))->as_If();
 640   Node* if_f = igvn->register_new_node_with_optimizer(new IfFalseNode(iff));
 641   Node* if_t = igvn->register_new_node_with_optimizer(new IfTrueNode(iff));
 642 
 643   region->add_req(if_t);
 644   if (phi != nullptr) {
 645     phi->add_req(igvn->intcon(0));
 646   }
 647   *ctrl = if_f;
 648 }
 649 
 650 // Check if a substitutability check between 'this' and 'other' can be implemented in IR
 651 bool InlineTypeNode::can_emit_substitutability_check(Node* other) const {
 652   if (other != nullptr && other->is_InlineType() && bottom_type() != other->bottom_type()) {
 653     // Different types, this is dead code because there's a check above that guarantees this.
 654     return false;
 655   }
 656   for (uint i = 0; i < field_count(); i++) {
 657     ciType* ft = field(i)->type();
 658     Node* fv = field_value(i);
 659     if (ft->is_inlinetype() && fv->is_InlineType()) {
 660       // Check recursively
 661       if (!fv->as_InlineType()->can_emit_substitutability_check(nullptr)){
 662         return false;
 663       }
 664     } else if (!ft->is_primitive_type() && ft->as_klass()->can_be_inline_klass()) {
 665       // Comparing this field might require (another) substitutability check, bail out
 666       return false;
 667     }
 668   }
 669   return true;
 670 }
 671 
 672 // Emit IR to check substitutability between 'this' (left operand) and the value object referred to by 'other' (right operand).
 673 // Parse-time checks guarantee that both operands have the same type. If 'other' is not an InlineTypeNode, we need to emit loads for the field values.
 674 void InlineTypeNode::check_substitutability(PhaseIterGVN* igvn, RegionNode* region, Node* phi, Node** ctrl, Node* mem, Node* base, Node* other, bool flat) const {
 675   BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 676   DecoratorSet decorators = IN_HEAP | MO_UNORDERED | C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD;
 677   MergeMemNode* local_mem = igvn->register_new_node_with_optimizer(MergeMemNode::make(mem))->as_MergeMem();
 678 
 679   ciInlineKlass* vk = inline_klass();
 680   for (uint i = 0; i < field_count(); i++) {
 681     ciField* field = this->field(i);
 682     int field_off = field->offset_in_bytes();
 683     if (flat) {
 684       // Flat access, no header
 685       field_off -= vk->payload_offset();
 686     }
 687     Node* this_field = field_value(i);
 688     ciType* ft = field->type();
 689     BasicType bt = ft->basic_type();
 690 
 691     Node* other_base = base;
 692     Node* other_field = other;
 693 
 694     // Get field value of the other operand
 695     if (other->is_InlineType()) {
 696       other_field = other->as_InlineType()->field_value(i);
 697       other_base = nullptr;
 698     } else {
 699       // 'other' is an oop, compute address of the field
 700       other_field = igvn->register_new_node_with_optimizer(new AddPNode(base, other, igvn->MakeConX(field_off)));
 701       if (field->is_flat()) {
 702         // Flat field, load is handled recursively below
 703         assert(this_field->is_InlineType(), "inconsistent field value");
 704       } else {
 705         // Non-flat field, load the field value and update the base because we are now operating on a different object
 706         assert(is_java_primitive(bt) || other_field->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent field type");
 707         C2AccessValuePtr addr(other_field, other_field->bottom_type()->is_ptr());
 708         C2OptAccess access(*igvn, *ctrl, local_mem, decorators, bt, base, addr);
 709         other_field = bs->load_at(access, Type::get_const_type(ft));
 710         other_base = other_field;
 711       }
 712     }
 713 
 714     if (this_field->is_InlineType()) {
 715       RegionNode* done_region = new RegionNode(1);
 716       ciField* field = this->field(i);
 717       assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 718       if (!field->is_null_free()) {
 719         // Nullable field, check null marker before accessing the fields
 720         if (field->is_flat()) {
 721           // Flat field, check embedded null marker
 722           Node* null_marker = nullptr;
 723           if (other_field->is_InlineType()) {
 724             // TODO 8350865 Should we add an IGVN optimization to fold null marker loads from InlineTypeNodes?
 725             null_marker = other_field->as_InlineType()->get_null_marker();
 726           } else {
 727             Node* nm_offset = igvn->MakeConX(ft->as_inline_klass()->null_marker_offset_in_payload());
 728             Node* nm_adr = igvn->register_new_node_with_optimizer(new AddPNode(base, other_field, nm_offset));
 729             C2AccessValuePtr addr(nm_adr, nm_adr->bottom_type()->is_ptr());
 730             C2OptAccess access(*igvn, *ctrl, local_mem, decorators, T_BOOLEAN, base, addr);
 731             null_marker = bs->load_at(access, TypeInt::BOOL);
 732           }
 733           // Return false if null markers are not equal
 734           acmp_val_guard(igvn, region, phi, ctrl, T_INT, BoolTest::ne, this_field->as_InlineType()->get_null_marker(), null_marker);
 735 
 736           // Null markers are equal. If both operands are null, skip the comparison of the fields.
 737           acmp_val_guard(igvn, done_region, nullptr, ctrl, T_INT, BoolTest::eq, this_field->as_InlineType()->get_null_marker(), igvn->intcon(0));
 738         } else {
 739           // Non-flat field, check if oop is null
 740 
 741           // Check if 'this' is null
 742           RegionNode* not_null_region = new RegionNode(1);
 743           acmp_val_guard(igvn, not_null_region, nullptr, ctrl, T_INT, BoolTest::ne, this_field->as_InlineType()->get_null_marker(), igvn->intcon(0));
 744 
 745           // 'this' is null. If 'other' is non-null, return false.
 746           acmp_val_guard(igvn, region, phi, ctrl, T_OBJECT, BoolTest::ne, other_field, igvn->zerocon(T_OBJECT));
 747 
 748           // Both are null, skip comparing the fields
 749           done_region->add_req(*ctrl);
 750 
 751           // 'this' is not null. If 'other' is null, return false.
 752           *ctrl = igvn->register_new_node_with_optimizer(not_null_region);
 753           acmp_val_guard(igvn, region, phi, ctrl, T_OBJECT, BoolTest::eq, other_field, igvn->zerocon(T_OBJECT));
 754         }
 755       }
 756       // Both operands are non-null, compare all the fields recursively
 757       this_field->as_InlineType()->check_substitutability(igvn, region, phi, ctrl, mem, other_base, other_field, field->is_flat());
 758 
 759       done_region->add_req(*ctrl);
 760       *ctrl = igvn->register_new_node_with_optimizer(done_region);
 761     } else {
 762       assert(ft->is_primitive_type() || !ft->as_klass()->can_be_inline_klass(), "Needs substitutability test");
 763       acmp_val_guard(igvn, region, phi, ctrl, bt, BoolTest::ne, this_field, other_field);
 764     }
 765   }
 766 }
 767 
 768 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 769   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 770     // Already buffered
 771     return this;
 772   }
 773 
 774   // Check if inline type is already buffered
 775   Node* not_buffered_ctl = kit->top();
 776   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 777   if (not_buffered_ctl->is_top()) {
 778     // Already buffered
 779     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 780     vt->set_is_buffered(kit->gvn());
 781     vt = kit->gvn().transform(vt)->as_InlineType();
 782     if (safe_for_replace) {
 783       kit->replace_in_map(this, vt);
 784     }
 785     return vt;
 786   }
 787   Node* buffered_ctl = kit->control();
 788   kit->set_control(not_buffered_ctl);
 789 
 790   // Inline type is not buffered, check if it is null.
 791   Node* null_ctl = kit->top();
 792   kit->null_check_common(get_null_marker(), T_INT, false, &null_ctl);
 793   bool null_free = null_ctl->is_top();
 794 
 795   RegionNode* region = new RegionNode(4);
 796   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 797 
 798   // InlineType is already buffered
 799   region->init_req(1, buffered_ctl);
 800   oop->init_req(1, not_null_oop);
 801 
 802   // InlineType is null
 803   region->init_req(2, null_ctl);
 804   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 805 
 806   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 807   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 808 
 809   if (!kit->stopped()) {
 810     assert(!is_allocated(&kit->gvn()), "already buffered");
 811     PreserveJVMState pjvms(kit);
 812     ciInlineKlass* vk = inline_klass();
 813     // Allocate and initialize buffer, re-execute on deoptimization.
 814     kit->jvms()->set_bci(kit->bci());
 815     kit->jvms()->set_should_reexecute(true);
 816     kit->kill_dead_locals();
 817     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 818     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 819     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 820     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 821 
 822     // Do not let stores that initialize this buffer be reordered with a subsequent
 823     // store that would make this buffer accessible by other threads.
 824     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 825     assert(alloc != nullptr, "must have an allocation node");
 826     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 827     oop->init_req(3, alloc_oop);
 828     region->init_req(3, kit->control());
 829     io    ->init_req(3, kit->i_o());
 830     mem   ->init_req(3, kit->merged_memory());
 831   }
 832 
 833   // Update GraphKit
 834   kit->set_control(kit->gvn().transform(region));
 835   kit->set_i_o(kit->gvn().transform(io));
 836   kit->set_all_memory(kit->gvn().transform(mem));
 837   kit->record_for_igvn(region);
 838   kit->record_for_igvn(oop);
 839   kit->record_for_igvn(io);
 840   kit->record_for_igvn(mem);
 841 
 842   // Use cloned InlineTypeNode to propagate oop from now on
 843   Node* res_oop = kit->gvn().transform(oop);
 844   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 845   vt->set_oop(kit->gvn(), res_oop);
 846   vt->set_is_buffered(kit->gvn());
 847   vt = kit->gvn().transform(vt)->as_InlineType();
 848   if (safe_for_replace) {
 849     kit->replace_in_map(this, vt);
 850   }
 851   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 852   // Make sure it gets a chance to remove this allocation.
 853   kit->C->set_has_split_ifs(true);
 854   return vt;
 855 }
 856 
 857 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 858   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 859     return true;
 860   }
 861   Node* oop = get_oop();
 862   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 863   return !oop_type->maybe_null();
 864 }
 865 
 866 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 867   ProjNode* pn = call->proj_out_or_null(proj_idx);
 868   if (pn != nullptr) {
 869     C->gvn_replace_by(pn, value);
 870     C->initial_gvn()->hash_delete(pn);
 871     pn->set_req(0, C->top());
 872   }
 873   proj_idx += type2size[bt];
 874 }
 875 
 876 // When a call returns multiple values, it has several result
 877 // projections, one per field. Replacing the result of the call by an
 878 // inline type node (after late inlining) requires that for each result
 879 // projection, we find the corresponding inline type field.
 880 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 881   uint proj_idx = TypeFunc::Parms;
 882   // Replace oop projection
 883   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 884   // Replace field projections
 885   replace_field_projs(C, call, proj_idx);
 886   // Replace null_marker projection
 887   replace_proj(C, call, proj_idx, get_null_marker(), T_BOOLEAN);
 888   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 889 }
 890 
 891 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
 892   for (uint i = 0; i < field_count(); ++i) {
 893     Node* value = field_value(i);
 894     ciField* field = this->field(i);
 895     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 896     if (field->is_flat()) {
 897       InlineTypeNode* vt = value->as_InlineType();
 898       // Replace field projections for flat field
 899       vt->replace_field_projs(C, call, proj_idx);
 900       if (!field->is_null_free()) {
 901         // Replace null_marker projection for nullable field
 902         replace_proj(C, call, proj_idx, vt->get_null_marker(), T_BOOLEAN);
 903       }
 904       continue;
 905     }
 906     // Replace projection for field value
 907     replace_proj(C, call, proj_idx, value, field->type()->basic_type());
 908   }
 909 }
 910 
 911 InlineTypeNode* InlineTypeNode::allocate_fields(GraphKit* kit) {
 912   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 913   for (uint i = 0; i < field_count(); i++) {
 914     Node* value = field_value(i);
 915     ciField* field = this->field(i);
 916     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 917      if (field->is_flat()) {
 918        // Flat inline type field
 919        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 920      } else if (value->is_InlineType()) {
 921        // Non-flat inline type field
 922        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 923      }
 924   }
 925   vt = kit->gvn().transform(vt)->as_InlineType();
 926   kit->replace_in_map(this, vt);
 927   return vt;
 928 }
 929 
 930 // Replace a buffer allocation by a dominating allocation
 931 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 932   // Remove initializing stores and GC barriers
 933   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 934     Node* use = res->fast_out(i);
 935     if (use->is_AddP()) {
 936       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 937         Node* store = use->fast_out(j)->isa_Store();
 938         if (store != nullptr) {
 939           igvn->rehash_node_delayed(store);
 940           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 941         }
 942       }
 943     } else if (use->Opcode() == Op_CastP2X) {
 944       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 945         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 946         // we store into, as well as the value we are storing. Skip if this is a
 947         // barrier for storing 'res' into another object.
 948         continue;
 949       }
 950       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 951       bs->eliminate_gc_barrier(igvn, use);
 952       --i; --imax;
 953     }
 954   }
 955   igvn->replace_node(res, dom);
 956 }
 957 
 958 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 959   Node* oop = get_oop();
 960   Node* is_buffered = get_is_buffered();
 961 
 962   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 963     InlineTypeNode* vtptr = oop->as_InlineType();
 964     set_oop(*phase, vtptr->get_oop());
 965     set_is_buffered(*phase);
 966     set_null_marker(*phase);
 967     for (uint i = Values; i < vtptr->req(); ++i) {
 968       set_req(i, vtptr->in(i));
 969     }
 970     return this;
 971   }
 972 
 973   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
 974   // allocation because the only case we load from a naked CheckCastPP is when we exit a
 975   // constructor of an inline type and we want to relinquish the larval oop there. This has a
 976   // couple of benefits:
 977   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
 978   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
 979   //   when we try to clone a value object.
 980   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
 981   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
 982   //   future, we can move them around more freely such as hoisting out of loops. This is not true
 983   //   for the old allocation since larval value objects do have unique identities.
 984   Node* base = is_loaded(phase);
 985   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && phase->C->allow_macro_nodes() && AllocateNode::Ideal_allocation(base) == nullptr) {
 986     if (oop != base || phase->type(is_buffered) != TypeInt::ONE) {
 987       set_oop(*phase, base);
 988       set_is_buffered(*phase);
 989       return this;
 990     }
 991   }
 992 
 993   if (can_reshape) {
 994     PhaseIterGVN* igvn = phase->is_IterGVN();
 995     if (is_allocated(phase)) {
 996       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 997       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 998       // This can happen with late inlining when we first allocate an inline type argument
 999       // but later decide to inline the call after the callee code also triggered allocation.
1000       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1001         AllocateNode* alloc = fast_out(i)->isa_Allocate();
1002         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1003           // Found a re-allocation
1004           Node* res = alloc->result_cast();
1005           if (res != nullptr && res->is_CheckCastPP()) {
1006             // Replace allocation by oop and unlink AllocateNode
1007             replace_allocation(igvn, res, oop);
1008             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
1009             --i; --imax;
1010           }
1011         }
1012       }
1013     }
1014   }
1015 
1016   return nullptr;
1017 }
1018 
1019 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
1020   // Create a new InlineTypeNode with uninitialized values and nullptr oop
1021   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
1022   vt->set_is_buffered(gvn, false);
1023   vt->set_null_marker(gvn);
1024   return vt;
1025 }
1026 
1027 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
1028   GrowableArray<ciType*> visited;
1029   visited.push(vk);
1030   return make_all_zero_impl(gvn, vk, visited);
1031 }
1032 
1033 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1034   // Create a new InlineTypeNode initialized with all zero
1035   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
1036   vt->set_is_buffered(gvn, false);
1037   vt->set_null_marker(gvn);
1038   for (uint i = 0; i < vt->field_count(); ++i) {
1039     ciField* field = vt->field(i);
1040     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1041     ciType* ft = field->type();
1042     Node* value = gvn.zerocon(ft->basic_type());
1043     if (!field->is_flat() && visited.contains(ft)) {
1044       gvn.C->set_has_circular_inline_type(true);
1045     } else if (ft->is_inlinetype()) {
1046       int old_len = visited.length();
1047       visited.push(ft);
1048       ciInlineKlass* vk = ft->as_inline_klass();
1049       if (field->is_null_free()) {
1050         value = make_all_zero_impl(gvn, vk, visited);
1051       } else {
1052         value = make_null_impl(gvn, vk, visited);
1053       }
1054       visited.trunc_to(old_len);
1055     }
1056     vt->set_field_value(i, value);
1057   }
1058   vt = gvn.transform(vt)->as_InlineType();
1059   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
1060   return vt;
1061 }
1062 
1063 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
1064   const TypeInt* tinit = gvn->type(get_null_marker())->isa_int();
1065   if (tinit == nullptr || !tinit->is_con(1)) {
1066     return false; // May be null
1067   }
1068   for (uint i = 0; i < field_count(); ++i) {
1069     Node* value = field_value(i);
1070     ciField* field = this->field(i);
1071     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1072     if (field->is_null_free()) {
1073       // Null-free value class field must have the all-zero value. If 'flat' is set,
1074       // reject non-flat fields because they need to be initialized with an oop to a buffer.
1075       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field->is_flat())) {
1076         return false;
1077       }
1078       continue;
1079     } else if (value->is_InlineType()) {
1080       // Nullable value class field must be null
1081       tinit = gvn->type(value->as_InlineType()->get_null_marker())->isa_int();
1082       if (tinit != nullptr && tinit->is_con(0)) {
1083         continue;
1084       }
1085       return false;
1086     } else if (!gvn->type(value)->is_zero_type()) {
1087       return false;
1088     }
1089   }
1090   return true;
1091 }
1092 
1093 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
1094   GrowableArray<ciType*> visited;
1095   visited.push(vk);
1096   return make_from_oop_impl(kit, oop, vk, visited);
1097 }
1098 
1099 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
1100   PhaseGVN& gvn = kit->gvn();
1101 
1102   // Create and initialize an InlineTypeNode by loading all field
1103   // values from a heap-allocated version and also save the oop.
1104   InlineTypeNode* vt = nullptr;
1105 
1106   if (oop->isa_InlineType()) {
1107     return oop->as_InlineType();
1108   }
1109 
1110   if (gvn.type(oop)->maybe_null()) {
1111     // Add a null check because the oop may be null
1112     Node* null_ctl = kit->top();
1113     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
1114     if (kit->stopped()) {
1115       // Constant null
1116       kit->set_control(null_ctl);
1117       vt = make_null_impl(gvn, vk, visited);
1118       kit->record_for_igvn(vt);
1119       return vt;
1120     }
1121     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
1122     vt->set_is_buffered(gvn);
1123     vt->set_null_marker(gvn);
1124     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
1125     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1126 
1127     if (null_ctl != kit->top()) {
1128       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
1129       Node* region = new RegionNode(3);
1130       region->init_req(1, kit->control());
1131       region->init_req(2, null_ctl);
1132       vt = vt->clone_with_phis(&gvn, region, kit->map());
1133       vt->merge_with(&gvn, null_vt, 2, true);
1134       vt->set_oop(gvn, oop);
1135       kit->set_control(gvn.transform(region));
1136     }
1137   } else {
1138     // Oop can never be null
1139     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
1140     Node* init_ctl = kit->control();
1141     vt->set_is_buffered(gvn);
1142     vt->set_null_marker(gvn);
1143     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1144     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1145 // TODO 8284443
1146 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1147 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1148   }
1149   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1150   kit->record_for_igvn(vt);
1151   return gvn.transform(vt)->as_InlineType();
1152 }
1153 
1154 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1155                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1156   GrowableArray<ciType*> visited;
1157   visited.push(vk);
1158   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1159 }
1160 
1161 // GraphKit wrapper for the 'make_from_flat' method
1162 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1163                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1164   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1165   PhaseGVN& gvn = kit->gvn();
1166   bool do_atomic = atomic;
1167   // With immutable memory, a non-atomic load and an atomic load are the same
1168   if (immutable_memory) {
1169     do_atomic = false;
1170   }
1171   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1172   if (vk->is_naturally_atomic(null_free)) {
1173     do_atomic = false;
1174   }
1175 
1176   if (!do_atomic) {
1177     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1178     if (!null_free) {
1179       int nm_offset = vk->null_marker_offset_in_payload();
1180       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1181       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1182       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1183       vt->set_req(NullMarker, nm_value);
1184     }
1185 
1186     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1187     return gvn.transform(vt)->as_InlineType();
1188   }
1189 
1190   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1191   return LoadFlatNode::load(kit, vk, base, ptr, null_free, trust_null_free_oop, decorators);
1192 }
1193 
1194 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1195   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1196   PhaseGVN& gvn = kit->gvn();
1197   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1198   kit->C->set_flat_accesses();
1199   InlineTypeNode* vt_nullable = nullptr;
1200   InlineTypeNode* vt_null_free = nullptr;
1201   InlineTypeNode* vt_non_atomic = nullptr;
1202 
1203   RegionNode* region = new RegionNode(4);
1204   gvn.set_type(region, Type::CONTROL);
1205   kit->record_for_igvn(region);
1206 
1207   Node* input_memory_state = kit->reset_memory();
1208   kit->set_all_memory(input_memory_state);
1209 
1210   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1211   gvn.set_type(mem, Type::MEMORY);
1212   kit->record_for_igvn(mem);
1213 
1214   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1215   gvn.set_type(io, Type::ABIO);
1216   kit->record_for_igvn(io);
1217 
1218   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1219   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1220 
1221   // Nullable
1222   kit->set_control(kit->IfFalse(iff_null_free));
1223   if (!kit->stopped()) {
1224     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1225     kit->set_all_memory(input_memory_state);
1226     Node* cast = kit->cast_to_flat_array_exact(base, vk, false, true);
1227     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1228     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1229 
1230     region->init_req(1, kit->control());
1231     mem->set_req(1, kit->reset_memory());
1232     io->set_req(1, kit->i_o());
1233   }
1234 
1235   // Null-free
1236   kit->set_control(kit->IfTrue(iff_null_free));
1237   if (!kit->stopped()) {
1238     kit->set_all_memory(input_memory_state);
1239 
1240     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1241     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1242 
1243     // Atomic
1244     kit->set_control(kit->IfTrue(iff_atomic));
1245     if (!kit->stopped()) {
1246       assert(vk->has_null_free_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1247       kit->set_all_memory(input_memory_state);
1248       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, true);
1249       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1250       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1251 
1252       region->init_req(2, kit->control());
1253       mem->set_req(2, kit->reset_memory());
1254       io->set_req(2, kit->i_o());
1255     }
1256 
1257     // Non-Atomic
1258     kit->set_control(kit->IfFalse(iff_atomic));
1259     if (!kit->stopped()) {
1260       assert(vk->has_null_free_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1261       kit->set_all_memory(input_memory_state);
1262       Node* cast = kit->cast_to_flat_array_exact(base, vk, true, false);
1263       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1264       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1265 
1266       region->init_req(3, kit->control());
1267       mem->set_req(3, kit->reset_memory());
1268       io->set_req(3, kit->i_o());
1269     }
1270   }
1271 
1272   InlineTypeNode* vt = nullptr;
1273   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1274     // All paths are dead
1275     vt = make_null(gvn, vk);
1276   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1277     vt = vt_non_atomic;
1278   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1279     vt = vt_null_free;
1280   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1281     vt = vt_nullable;
1282   }
1283   if (vt != nullptr) {
1284     kit->set_control(kit->gvn().transform(region));
1285     kit->set_all_memory(kit->gvn().transform(mem));
1286     kit->set_i_o(kit->gvn().transform(io));
1287     return vt;
1288   }
1289 
1290   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1291   vt = zero->clone_with_phis(&gvn, region);
1292   if (vt_nullable != nullptr) {
1293     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1294   }
1295   if (vt_null_free != nullptr) {
1296     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1297   }
1298   if (vt_non_atomic != nullptr) {
1299     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1300   }
1301 
1302   kit->set_control(kit->gvn().transform(region));
1303   kit->set_all_memory(kit->gvn().transform(mem));
1304   kit->set_i_o(kit->gvn().transform(io));
1305   return gvn.transform(vt)->as_InlineType();
1306 }
1307 
1308 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1309   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1310   if (!in) {
1311     // Keep track of the oop. The returned inline type might already be buffered.
1312     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1313     vt->set_oop(kit->gvn(), oop);
1314   }
1315   GrowableArray<ciType*> visited;
1316   visited.push(vk);
1317   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1318   return kit->gvn().transform(vt)->as_InlineType();
1319 }
1320 
1321 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1322   if (vk == nullptr) {
1323     vk = inline_klass();
1324   }
1325   for (uint i = 0; i < field_count(); ++i) {
1326     ciField* field = this->field(i);
1327     int offset = holder_offset + field->offset_in_bytes();
1328     Node* value = field_value(i);
1329     if (value->is_InlineType()) {
1330       assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1331       InlineTypeNode* vt = value->as_InlineType();
1332       if (vt->type()->inline_klass()->is_empty()) {
1333         continue;
1334       } else if (field->is_flat() && vt->is_InlineType()) {
1335         // Check inline type field load recursively
1336         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1337         if (base == nullptr) {
1338           return nullptr;
1339         }
1340         continue;
1341       } else {
1342         value = vt->get_oop();
1343         if (value->Opcode() == Op_CastPP) {
1344           // Skip CastPP
1345           value = value->in(1);
1346         }
1347       }
1348     }
1349     if (value->isa_DecodeN()) {
1350       // Skip DecodeN
1351       value = value->in(1);
1352     }
1353     if (value->isa_Load()) {
1354       // Check if base and offset of field load matches inline type layout
1355       intptr_t loffset = 0;
1356       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1357       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1358         return nullptr;
1359       } else if (base == nullptr) {
1360         // Set base and check if pointer type matches
1361         base = lbase;
1362         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1363         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1364           return nullptr;
1365         }
1366       }
1367     } else {
1368       return nullptr;
1369     }
1370   }
1371   return base;
1372 }
1373 
1374 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1375   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1376   intptr_t bits = tk->get_con();
1377   set_nth_bit(bits, 0);
1378   return gvn.longcon((jlong)bits);
1379 }
1380 
1381 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1382   if (!null_free && in) {
1383     n->init_req(base_input++, get_null_marker());
1384   }
1385   for (uint i = 0; i < field_count(); i++) {
1386     Node* arg = field_value(i);
1387     ciField* field = this->field(i);
1388     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1389     if (field->is_flat()) {
1390       // Flat inline type field
1391       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1392       if (!field->is_null_free()) {
1393         assert(field->null_marker_offset() != -1, "inconsistency");
1394         n->init_req(base_input++, arg->as_InlineType()->get_null_marker());
1395       }
1396     } else {
1397       if (arg->is_InlineType()) {
1398         // Non-flat inline type field
1399         InlineTypeNode* vt = arg->as_InlineType();
1400         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1401         arg = vt->buffer(kit);
1402       }
1403       // Initialize call/return arguments
1404       n->init_req(base_input++, arg);
1405       if (field->type()->size() == 2) {
1406         n->init_req(base_input++, kit->top());
1407       }
1408     }
1409   }
1410   // The last argument is used to pass the null marker to compiled code and not required here.
1411   if (!null_free && !in) {
1412     n->init_req(base_input++, kit->top());
1413   }
1414 }
1415 
1416 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool no_null_marker, Node* null_check_region, GrowableArray<ciType*>& visited) {
1417   PhaseGVN& gvn = kit->gvn();
1418   Node* null_marker = nullptr;
1419   if (!no_null_marker) {
1420     // Nullable inline type
1421     if (in) {
1422       // Set null marker
1423       if (multi->is_Start()) {
1424         null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1425       } else {
1426         null_marker = multi->as_Call()->in(base_input);
1427       }
1428       set_req(NullMarker, null_marker);
1429       base_input++;
1430     }
1431     // Add a null check to make subsequent loads dependent on
1432     assert(null_check_region == nullptr, "already set");
1433     if (null_marker == nullptr) {
1434       // Will only be initialized below, use dummy node for now
1435       null_marker = new Node(1);
1436       null_marker->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1437       gvn.set_type_bottom(null_marker);
1438     }
1439     Node* null_ctrl = kit->top();
1440     kit->null_check_common(null_marker, T_INT, false, &null_ctrl);
1441     Node* non_null_ctrl = kit->control();
1442     null_check_region = new RegionNode(3);
1443     null_check_region->init_req(1, non_null_ctrl);
1444     null_check_region->init_req(2, null_ctrl);
1445     null_check_region = gvn.transform(null_check_region);
1446     kit->set_control(null_check_region);
1447   }
1448 
1449   for (uint i = 0; i < field_count(); ++i) {
1450     ciField* field = this->field(i);
1451     ciType* type = field->type();
1452     Node* parm = nullptr;
1453     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1454     if (field->is_flat()) {
1455       // Flat inline type field
1456       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field->is_null_free());
1457       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1458       if (!field->is_null_free()) {
1459         assert(field->null_marker_offset() != -1, "inconsistency");
1460         Node* null_marker = nullptr;
1461         if (multi->is_Start()) {
1462           null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1463         } else if (in) {
1464           null_marker = multi->as_Call()->in(base_input);
1465         } else {
1466           null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1467         }
1468         vt->set_req(NullMarker, null_marker);
1469         base_input++;
1470       }
1471       parm = gvn.transform(vt);
1472     } else {
1473       if (multi->is_Start()) {
1474         assert(in, "return from start?");
1475         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1476       } else if (in) {
1477         parm = multi->as_Call()->in(base_input);
1478       } else {
1479         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1480       }
1481       // Non-flat inline type field
1482       if (type->is_inlinetype()) {
1483         if (null_check_region != nullptr) {
1484           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1485           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1486           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1487           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1488             parm = parm->as_InlineType()->get_oop();
1489           }
1490           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1491           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1492           parm->set_req(2, kit->zerocon(T_OBJECT));
1493           parm = gvn.transform(parm);
1494         }
1495         if (visited.contains(type)) {
1496           kit->C->set_has_circular_inline_type(true);
1497         } else if (!parm->is_InlineType()) {
1498           int old_len = visited.length();
1499           visited.push(type);
1500           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1501           visited.trunc_to(old_len);
1502         }
1503       }
1504       base_input += type->size();
1505     }
1506     assert(parm != nullptr, "should never be null");
1507     assert(field_value(i) == nullptr, "already set");
1508     set_field_value(i, parm);
1509     gvn.record_for_igvn(parm);
1510   }
1511   // The last argument is used to pass the null marker to compiled code
1512   if (!no_null_marker && !in) {
1513     Node* cmp = null_marker->raw_out(0);
1514     null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1515     set_req(NullMarker, null_marker);
1516     gvn.hash_delete(cmp);
1517     cmp->set_req(1, null_marker);
1518     gvn.hash_find_insert(cmp);
1519     gvn.record_for_igvn(cmp);
1520     base_input++;
1521   }
1522 }
1523 
1524 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1525 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1526 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1527   PhaseIterGVN* igvn = &phase->igvn();
1528   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1529   // will be removed anyway and changing the memory chain will confuse other optimizations.
1530   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1531     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1532     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1533       Node* res = alloc->result_cast();
1534       if (res == nullptr || !res->is_CheckCastPP()) {
1535         break; // No unique CheckCastPP
1536       }
1537       // Search for a dominating allocation of the same inline type
1538       Node* res_dom = res;
1539       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1540         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1541         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1542           Node* res_other = alloc_other->result_cast();
1543           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1544               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1545             res_dom = res_other;
1546           }
1547         }
1548       }
1549       if (res_dom != res) {
1550         // Replace allocation by dominating one.
1551         replace_allocation(igvn, res, res_dom);
1552         // The result of the dominated allocation is now unused and will be removed
1553         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1554         igvn->_worklist.push(alloc);
1555       }
1556     }
1557   }
1558 }
1559 
1560 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1561   GrowableArray<ciType*> visited;
1562   visited.push(vk);
1563   return make_null_impl(gvn, vk, visited, transform);
1564 }
1565 
1566 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1567   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1568   vt->set_is_buffered(gvn);
1569   vt->set_null_marker(gvn, gvn.intcon(0));
1570   for (uint i = 0; i < vt->field_count(); i++) {
1571     ciField* field = vt->field(i);
1572     ciType* ft = field->type();
1573     Node* value = gvn.zerocon(ft->basic_type());
1574     assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
1575     if (!field->is_flat() && visited.contains(ft)) {
1576       gvn.C->set_has_circular_inline_type(true);
1577     } else if (ft->is_inlinetype()) {
1578       int old_len = visited.length();
1579       visited.push(ft);
1580       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1581       visited.trunc_to(old_len);
1582     }
1583     vt->set_field_value(i, value);
1584   }
1585   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1586 }
1587 
1588 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1589   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1590     return clone()->as_InlineType();
1591   }
1592   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1593     if (fast_out(i) != map) {
1594       return clone()->as_InlineType();
1595     }
1596   }
1597   gvn->hash_delete(this);
1598   return this;
1599 }
1600 
1601 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1602   Node* oop = get_oop();
1603   const Type* toop = phase->type(oop);
1604 #ifdef ASSERT
1605   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1606     // We are not allocated (anymore) and should therefore not have an instance id
1607     dump(1);
1608     assert(false, "Unbuffered inline type should not have known instance id");
1609   }
1610 #endif
1611   if (toop == Type::TOP) {
1612     return Type::TOP;
1613   }
1614   const Type* t = toop->filter_speculative(_type);
1615   if (t->singleton()) {
1616     // Don't replace InlineType by a constant
1617     t = _type;
1618   }
1619   const Type* tinit = phase->type(in(NullMarker));
1620   if (tinit == Type::TOP) {
1621     return Type::TOP;
1622   }
1623   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1624     t = t->join_speculative(TypePtr::NOTNULL);
1625   }
1626   return t;
1627 }
1628 
1629 InlineTypeNode* LoadFlatNode::load(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool null_free, bool trust_null_free_oop, DecoratorSet decorators) {
1630   int output_type_size = vk->nof_nonstatic_fields() + (null_free ? 0 : 1);
1631   const Type** output_types = TypeTuple::fields(output_type_size);
1632   collect_field_types(vk, output_types + TypeFunc::Parms, 0, output_type_size, null_free, trust_null_free_oop);
1633   const TypeTuple* type = TypeTuple::make(output_type_size + TypeFunc::Parms, output_types);
1634 
1635   LoadFlatNode* load = new LoadFlatNode(vk, type, null_free, decorators);
1636   load->init_req(TypeFunc::Control, kit->control());
1637   load->init_req(TypeFunc::I_O, kit->top());
1638   load->init_req(TypeFunc::Memory, kit->reset_memory());
1639   load->init_req(TypeFunc::FramePtr, kit->frameptr());
1640   load->init_req(TypeFunc::ReturnAdr, kit->top());
1641 
1642   load->init_req(TypeFunc::Parms, base);
1643   load->init_req(TypeFunc::Parms + 1, ptr);
1644   kit->kill_dead_locals();
1645   kit->add_safepoint_edges(load);
1646   load = kit->gvn().transform(load)->as_LoadFlat();
1647   kit->record_for_igvn(load);
1648 
1649   kit->set_control(kit->gvn().transform(new ProjNode(load, TypeFunc::Control)));
1650   kit->set_all_memory(kit->gvn().transform(new ProjNode(load, TypeFunc::Memory)));
1651   return load->collect_projs(kit, vk, TypeFunc::Parms, null_free);
1652 }
1653 
1654 bool LoadFlatNode::expand_non_atomic(PhaseIterGVN& igvn) {
1655   assert(igvn.delay_transform(), "transformation must be delayed");
1656   if ((_decorators & C2_MISMATCHED) != 0) {
1657     return false;
1658   }
1659 
1660   GraphKit kit(jvms(), &igvn);
1661   kit.set_all_memory(kit.reset_memory());
1662 
1663   Node* base = this->base();
1664   Node* ptr = this->ptr();
1665 
1666   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1667     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1668     if (proj_out == nullptr) {
1669       continue;
1670     }
1671 
1672     ciField* field = _vk->nonstatic_field_at(i);
1673     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - _vk->payload_offset());
1674     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1675     igvn.set_type(field_ptr, field_ptr_type);
1676 
1677     Node* field_value = kit.access_load_at(base, field_ptr, field_ptr_type, igvn.type(proj_out), field->type()->basic_type(), _decorators);
1678     igvn.replace_node(proj_out, field_value);
1679   }
1680 
1681   if (!_null_free) {
1682     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1683     if (proj_out != nullptr) {
1684       Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, _vk->null_marker_offset_in_payload());
1685       const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1686       igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1687       Node* null_marker_value = kit.access_load_at(base, null_marker_ptr, null_marker_ptr_type, TypeInt::BOOL, T_BOOLEAN, _decorators);
1688       igvn.replace_node(proj_out, null_marker_value);
1689     }
1690   }
1691 
1692   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1693   if (old_ctrl != nullptr) {
1694     igvn.replace_node(old_ctrl, kit.control());
1695   }
1696   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1697   Node* new_mem = kit.reset_memory();
1698   if (old_mem != nullptr) {
1699     igvn.replace_node(old_mem, new_mem);
1700   }
1701   return true;
1702 }
1703 
1704 void LoadFlatNode::expand_atomic(PhaseIterGVN& igvn) {
1705   assert(igvn.delay_transform(), "transformation must be delayed");
1706   GraphKit kit(jvms(), &igvn);
1707   kit.set_all_memory(kit.reset_memory());
1708 
1709   Node* base = this->base();
1710   Node* ptr = this->ptr();
1711 
1712   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1713   kit.insert_mem_bar(Op_MemBarCPUOrder);
1714   Node* payload = kit.access_load_at(base, ptr, TypeRawPtr::BOTTOM, Type::get_const_basic_type(payload_bt), payload_bt,
1715                                      _decorators | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD, kit.control());
1716   kit.insert_mem_bar(Op_MemBarCPUOrder);
1717 
1718   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1719   if (old_ctrl != nullptr) {
1720     igvn.replace_node(old_ctrl, kit.control());
1721   }
1722   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1723   Node* new_mem = kit.reset_memory();
1724   if (old_mem != nullptr) {
1725     igvn.replace_node(old_mem, new_mem);
1726   }
1727 
1728   expand_projs_atomic(igvn, kit.control(), payload);
1729 }
1730 
1731 void LoadFlatNode::collect_field_types(ciInlineKlass* vk, const Type** field_types, int idx, int limit, bool null_free, bool trust_null_free_oop) {
1732   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1733   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1734     ciField* field = vk->declared_nonstatic_field_at(i);
1735     if (field->is_flat()) {
1736       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1737       collect_field_types(field_klass, field_types, idx, limit, field->is_null_free(), trust_null_free_oop && field->is_null_free());
1738       idx += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1739       continue;
1740     }
1741 
1742     const Type* field_type = Type::get_const_type(field->type());
1743     if (trust_null_free_oop && field->is_null_free()) {
1744       field_type = field_type->filter(TypePtr::NOTNULL);
1745     }
1746 
1747     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1748     field_types[idx] = field_type;
1749     idx++;
1750   }
1751 
1752   if (!null_free) {
1753     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1754     field_types[idx] = TypeInt::BOOL;
1755   }
1756 }
1757 
1758 // Create an InlineTypeNode from a LoadFlatNode with its fields being extracted from the
1759 // LoadFlatNode
1760 InlineTypeNode* LoadFlatNode::collect_projs(GraphKit* kit, ciInlineKlass* vk, int proj_con, bool null_free) {
1761   PhaseGVN& gvn = kit->gvn();
1762   InlineTypeNode* res = InlineTypeNode::make_uninitialized(gvn, vk, null_free);
1763   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1764     ciField* field = vk->declared_nonstatic_field_at(i);
1765     Node* field_value;
1766     if (field->is_flat()) {
1767       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1768       field_value = collect_projs(kit, field_klass, proj_con, field->is_null_free());
1769       proj_con += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1770     } else {
1771       field_value = gvn.transform(new ProjNode(this, proj_con));
1772       if (field->type()->is_inlinetype()) {
1773         field_value = InlineTypeNode::make_from_oop(kit, field_value, field->type()->as_inline_klass());
1774       }
1775       proj_con++;
1776     }
1777     res->set_field_value(i, field_value);
1778   }
1779 
1780   if (null_free) {
1781     res->set_null_marker(gvn);
1782   } else {
1783     res->set_null_marker(gvn, gvn.transform(new ProjNode(this, proj_con)));
1784   }
1785   return gvn.transform(res)->as_InlineType();
1786 }
1787 
1788 // Extract the values of the flattened fields from the loaded payload
1789 void LoadFlatNode::expand_projs_atomic(PhaseIterGVN& igvn, Node* ctrl, Node* payload) {
1790   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1791   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1792     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1793     if (proj_out == nullptr) {
1794       continue;
1795     }
1796 
1797     ciField* field = _vk->nonstatic_field_at(i);
1798     int field_offset = field->offset_in_bytes() - _vk->payload_offset();
1799     const Type* field_type = igvn.type(proj_out);
1800     Node* field_value = get_payload_value(igvn, ctrl, payload_bt, payload, field_type, field->type()->basic_type(), field_offset);
1801     igvn.replace_node(proj_out, field_value);
1802   }
1803 
1804   if (!_null_free) {
1805     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1806     if (proj_out == nullptr) {
1807       return;
1808     }
1809 
1810     int null_marker_offset = _vk->null_marker_offset_in_payload();
1811     Node* null_marker_value = get_payload_value(igvn, ctrl, payload_bt, payload, TypeInt::BOOL, T_BOOLEAN, null_marker_offset);
1812     igvn.replace_node(proj_out, null_marker_value);
1813   }
1814 }
1815 
1816 Node* LoadFlatNode::get_payload_value(PhaseIterGVN& igvn, Node* ctrl, BasicType payload_bt, Node* payload, const Type* value_type, BasicType value_bt, int offset) {
1817   assert((offset + type2aelembytes(value_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
1818   Node* value = nullptr;
1819   // Shift to the right position in the long value
1820   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
1821   if (payload_bt == T_LONG) {
1822     value = igvn.transform(new URShiftLNode(payload, shift_val));
1823     value = igvn.transform(new ConvL2INode(value));
1824   } else {
1825     value = igvn.transform(new URShiftINode(payload, shift_val));
1826   }
1827 
1828   if (value_bt == T_INT) {
1829     return value;
1830   } else if (!is_java_primitive(value_bt)) {
1831     assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1832     value = igvn.transform(new CastI2NNode(ctrl, value, value_type->make_narrowoop()));
1833     value = igvn.transform(new DecodeNNode(value, value_type));
1834 
1835     // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
1836     // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
1837     // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
1838     // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
1839     return value;
1840   } else {
1841     // Make sure to zero unused bits in the 32-bit value
1842     return Compile::narrow_value(value_bt, value, nullptr, &igvn, true);
1843   }
1844 }
1845 
1846 void StoreFlatNode::store(GraphKit* kit, Node* base, Node* ptr, InlineTypeNode* value, bool null_free, DecoratorSet decorators) {
1847   value = value->allocate_fields(kit);
1848   StoreFlatNode* store = new StoreFlatNode(null_free, decorators);
1849   store->init_req(TypeFunc::Control, kit->control());
1850   store->init_req(TypeFunc::I_O, kit->top());
1851   store->init_req(TypeFunc::Memory, kit->reset_memory());
1852   store->init_req(TypeFunc::FramePtr, kit->frameptr());
1853   store->init_req(TypeFunc::ReturnAdr, kit->top());
1854 
1855   store->init_req(TypeFunc::Parms, base);
1856   store->init_req(TypeFunc::Parms + 1, ptr);
1857   store->init_req(TypeFunc::Parms + 2, value);
1858   kit->kill_dead_locals();
1859   kit->add_safepoint_edges(store);
1860   store = kit->gvn().transform(store)->as_StoreFlat();
1861   kit->record_for_igvn(store);
1862 
1863   kit->set_control(kit->gvn().transform(new ProjNode(store, TypeFunc::Control)));
1864   kit->set_all_memory(kit->gvn().transform(new ProjNode(store, TypeFunc::Memory)));
1865 }
1866 
1867 bool StoreFlatNode::expand_non_atomic(PhaseIterGVN& igvn) {
1868   assert(igvn.delay_transform(), "transformation must be delayed");
1869   if ((_decorators & C2_MISMATCHED) != 0) {
1870     return false;
1871   }
1872 
1873   GraphKit kit(jvms(), &igvn);
1874   kit.set_all_memory(kit.reset_memory());
1875 
1876   Node* base = this->base();
1877   Node* ptr = this->ptr();
1878   InlineTypeNode* value = this->value();
1879 
1880   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1881   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1882     ciField* field = vk->nonstatic_field_at(i);
1883     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - vk->payload_offset());
1884     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1885     igvn.set_type(field_ptr, field_ptr_type);
1886     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1887     Node* store = kit.access_store_at(base, field_ptr, field_ptr_type, field_value, igvn.type(field_value), field->type()->basic_type(), _decorators);
1888   }
1889 
1890   if (!_null_free) {
1891     Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, vk->null_marker_offset_in_payload());
1892     const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1893     igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1894     Node* null_marker_value = value->get_null_marker();
1895     Node* store = kit.access_store_at(base, null_marker_ptr, null_marker_ptr_type, null_marker_value, TypeInt::BOOL, T_BOOLEAN, _decorators);
1896   }
1897 
1898   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1899   if (old_ctrl != nullptr) {
1900     igvn.replace_node(old_ctrl, kit.control());
1901   }
1902   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1903   Node* new_mem = kit.reset_memory();
1904   if (old_mem != nullptr) {
1905     igvn.replace_node(old_mem, new_mem);
1906   }
1907   return true;
1908 }
1909 
1910 void StoreFlatNode::expand_atomic(PhaseIterGVN& igvn) {
1911   // Convert to a payload value <= 64-bit and write atomically.
1912   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
1913   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
1914   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
1915   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
1916   assert(igvn.delay_transform(), "transformation must be delayed");
1917   GraphKit kit(jvms(), &igvn);
1918   kit.set_all_memory(kit.reset_memory());
1919 
1920   Node* base = this->base();
1921   Node* ptr = this->ptr();
1922   InlineTypeNode* value = this->value();
1923 
1924   int oop_off_1 = -1;
1925   int oop_off_2 = -1;
1926   Node* payload = convert_to_payload(igvn, kit.control(), value, _null_free, oop_off_1, oop_off_2);
1927 
1928   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1929   assert(oop_off_1 == -1 || oop_off_1 == 0 || oop_off_1 == 4, "invalid layout for %s, first oop at offset %d", vk->name()->as_utf8(), oop_off_1);
1930   assert(oop_off_2 == -1 || oop_off_2 == 4, "invalid layout for %s, second oop at offset %d", vk->name()->as_utf8(), oop_off_2);
1931   BasicType payload_bt = vk->atomic_size_to_basic_type(_null_free);
1932   kit.insert_mem_bar(Op_MemBarCPUOrder);
1933   if (!UseG1GC || oop_off_1 == -1) {
1934     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
1935     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
1936     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1937     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
1938     kit.access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, Type::get_const_basic_type(payload_bt), payload_bt, _decorators | C2_MISMATCHED, true, value);
1939   } else {
1940     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
1941     assert(UseG1GC, "Unexpected GC");
1942     assert(payload_bt == T_LONG, "Unexpected payload type");
1943     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
1944     Node* oop_offset = (oop_off_2 == -1) ? igvn.intcon(oop_off_1) : nullptr;
1945     Node* mem = kit.reset_memory();
1946     kit.set_all_memory(mem);
1947     Node* store = igvn.transform(new StoreLSpecialNode(kit.control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
1948     kit.set_memory(store, TypeRawPtr::BOTTOM);
1949   }
1950   kit.insert_mem_bar(Op_MemBarCPUOrder);
1951 
1952   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1953   if (old_ctrl != nullptr) {
1954     igvn.replace_node(old_ctrl, kit.control());
1955   }
1956   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1957   Node* new_mem = kit.reset_memory();
1958   if (old_mem != nullptr) {
1959     igvn.replace_node(old_mem, new_mem);
1960   }
1961 }
1962 
1963 // Convert the field values to a payload value of type 'bt'
1964 Node* StoreFlatNode::convert_to_payload(PhaseIterGVN& igvn, Node* ctrl, InlineTypeNode* value, bool null_free, int& oop_off_1, int& oop_off_2) {
1965   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1966   BasicType payload_bt = vk->atomic_size_to_basic_type(null_free);
1967   Node* payload = igvn.zerocon(payload_bt);
1968   if (!null_free) {
1969     // Set the null marker
1970     payload = set_payload_value(igvn, payload_bt, payload, T_BOOLEAN, value->get_null_marker(), vk->null_marker_offset_in_payload());
1971   }
1972 
1973   // Iterate over the fields and add their values to the payload
1974   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1975     ciField* field = vk->nonstatic_field_at(i);
1976     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1977     ciType* field_klass = field->type();
1978     BasicType field_bt = field_klass->basic_type();
1979     int field_offset_in_payload = field->offset_in_bytes() - vk->payload_offset();
1980     if (!field_klass->is_primitive_type()) {
1981       // Narrow oop field
1982       assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1983       if (oop_off_1 == -1) {
1984         oop_off_1 = field_offset_in_payload;
1985       } else {
1986         assert(oop_off_2 == -1, "already set");
1987         oop_off_2 = field_offset_in_payload;
1988       }
1989 
1990       const Type* val_type = Type::get_const_type(field_klass)->make_narrowoop();
1991       if (field_value->is_InlineType()) {
1992         assert(field_value->as_InlineType()->is_allocated(&igvn), "must be allocated");
1993       }
1994 
1995       field_value = igvn.transform(new EncodePNode(field_value, val_type));
1996       field_value = igvn.transform(new CastP2XNode(ctrl, field_value));
1997       field_value = igvn.transform(new ConvL2INode(field_value));
1998       field_bt = T_INT;
1999     }
2000     payload = set_payload_value(igvn, payload_bt, payload, field_bt, field_value, field_offset_in_payload);
2001   }
2002 
2003   return payload;
2004 }
2005 
2006 Node* StoreFlatNode::set_payload_value(PhaseIterGVN& igvn, BasicType payload_bt, Node* payload, BasicType val_bt, Node* value, int offset) {
2007   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
2008 
2009   // Make sure to zero unused bits in the 32-bit value
2010   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
2011     value = igvn.transform(new AndINode(value, igvn.intcon(0xFF)));
2012   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
2013     value = igvn.transform(new AndINode(value, igvn.intcon(0xFFFF)));
2014   } else if (val_bt == T_FLOAT) {
2015     value = igvn.transform(new MoveF2INode(value));
2016   } else {
2017     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
2018   }
2019 
2020   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
2021   if (payload_bt == T_LONG) {
2022     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
2023     value = igvn.transform(new ConvI2LNode(value));
2024     value = igvn.transform(new AndLNode(value, igvn.longcon(0xFFFFFFFF)));
2025 
2026     Node* shift_value = igvn.transform(new LShiftLNode(value, shift_val));
2027     payload = new OrLNode(shift_value, payload);
2028   } else {
2029     Node* shift_value = igvn.transform(new LShiftINode(value, shift_val));
2030     payload = new OrINode(shift_value, payload);
2031   }
2032   return igvn.transform(payload);
2033 }
2034 
2035 const Type* LoadFlatNode::Value(PhaseGVN* phase) const {
2036   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
2037       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP) {
2038     return Type::TOP;
2039   }
2040   return bottom_type();
2041 }
2042 
2043 const Type* StoreFlatNode::Value(PhaseGVN* phase) const {
2044   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
2045       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP || phase->type(value()) == Type::TOP) {
2046     return Type::TOP;
2047   }
2048   return bottom_type();
2049 }