1 /*
   2  * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciInlineKlass.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "gc/shared/gc_globals.hpp"
  29 #include "oops/accessDecorators.hpp"
  30 #include "opto/addnode.hpp"
  31 #include "opto/castnode.hpp"
  32 #include "opto/compile.hpp"
  33 #include "opto/convertnode.hpp"
  34 #include "opto/graphKit.hpp"
  35 #include "opto/inlinetypenode.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/multnode.hpp"
  39 #include "opto/narrowptrnode.hpp"
  40 #include "opto/opcodes.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/rootnode.hpp"
  43 #include "opto/type.hpp"
  44 #include "utilities/globalDefinitions.hpp"
  45 
  46 // Clones the inline type to handle control flow merges involving multiple inline types.
  47 // The inputs are replaced by PhiNodes to represent the merged values for the given region.
  48 // init_with_top: input of phis above the returned InlineTypeNode are initialized to top.
  49 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_non_null, bool init_with_top) {
  50   InlineTypeNode* vt = clone_if_required(gvn, map);
  51   const Type* t = Type::get_const_type(inline_klass());
  52   gvn->set_type(vt, t);
  53   vt->as_InlineType()->set_type(t);
  54 
  55   Node* const top = gvn->C->top();
  56 
  57   // Create a PhiNode for merging the oop values
  58   PhiNode* oop = PhiNode::make(region, init_with_top ? top : vt->get_oop(), t);
  59   gvn->set_type(oop, t);
  60   gvn->record_for_igvn(oop);
  61   vt->set_oop(*gvn, oop);
  62 
  63   // Create a PhiNode for merging the is_buffered values
  64   t = Type::get_const_basic_type(T_BOOLEAN);
  65   Node* is_buffered_node = PhiNode::make(region, init_with_top ? top : vt->get_is_buffered(), t);
  66   gvn->set_type(is_buffered_node, t);
  67   gvn->record_for_igvn(is_buffered_node);
  68   vt->set_req(IsBuffered, is_buffered_node);
  69 
  70   // Create a PhiNode for merging the null_marker values
  71   Node* null_marker_node;
  72   if (is_non_null) {
  73     null_marker_node = gvn->intcon(1);
  74   } else {
  75     t = Type::get_const_basic_type(T_BOOLEAN);
  76     null_marker_node = PhiNode::make(region, init_with_top ? top : vt->get_null_marker(), t);
  77     gvn->set_type(null_marker_node, t);
  78     gvn->record_for_igvn(null_marker_node);
  79   }
  80   vt->set_req(NullMarker, null_marker_node);
  81 
  82   // Create a PhiNode each for merging the field values
  83   for (uint i = 0; i < vt->field_count(); ++i) {
  84     ciType* type = vt->field_type(i);
  85     Node*  value = vt->field_value(i);
  86     // We limit scalarization for inline types with circular fields and can therefore observe nodes
  87     // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
  88     // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
  89     bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i);
  90     if (type->is_inlinetype() && no_circularity) {
  91       // Handle inline type fields recursively
  92       value = value->as_InlineType()->clone_with_phis(gvn, region, map);
  93     } else {
  94       t = Type::get_const_type(type);
  95       value = PhiNode::make(region, init_with_top ? top : value, t);
  96       gvn->set_type(value, t);
  97       gvn->record_for_igvn(value);
  98     }
  99     vt->set_field_value(i, value);
 100   }
 101   gvn->record_for_igvn(vt);
 102   return vt;
 103 }
 104 
 105 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes
 106 // for the given region (see InlineTypeNode::clone_with_phis).
 107 bool InlineTypeNode::has_phi_inputs(Node* region) {
 108   // Check oop input
 109   bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region;
 110 #ifdef ASSERT
 111   if (result) {
 112     // Check all field value inputs for consistency
 113     for (uint i = Values; i < field_count(); ++i) {
 114       Node* n = in(i);
 115       if (n->is_InlineType()) {
 116         assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs");
 117       } else {
 118         assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs");
 119       }
 120     }
 121   }
 122 #endif
 123   return result;
 124 }
 125 
 126 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis'
 127 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int phi_index, bool transform) {
 128   assert(inline_klass() == other->inline_klass(), "Merging incompatible types");
 129 
 130   // Merge oop inputs
 131   PhiNode* phi = get_oop()->as_Phi();
 132   phi->set_req(phi_index, other->get_oop());
 133   if (transform) {
 134     set_oop(*gvn, gvn->transform(phi));
 135   }
 136 
 137   // Merge is_buffered inputs
 138   phi = get_is_buffered()->as_Phi();
 139   phi->set_req(phi_index, other->get_is_buffered());
 140   if (transform) {
 141     set_req(IsBuffered, gvn->transform(phi));
 142   }
 143 
 144   // Merge null_marker inputs
 145   Node* null_marker = get_null_marker();
 146   if (null_marker->is_Phi()) {
 147     phi = null_marker->as_Phi();
 148     phi->set_req(phi_index, other->get_null_marker());
 149     if (transform) {
 150       set_req(NullMarker, gvn->transform(phi));
 151     }
 152   } else {
 153     assert(null_marker->find_int_con(0) == 1, "only with a non null inline type");
 154   }
 155 
 156   // Merge field values
 157   for (uint i = 0; i < field_count(); ++i) {
 158     Node* val1 =        field_value(i);
 159     Node* val2 = other->field_value(i);
 160     if (val1->is_InlineType()) {
 161       if (val2->is_Phi()) {
 162         val2 = gvn->transform(val2);
 163       }
 164       if (val2->is_top()) {
 165         // The path where 'other' is used is dying. Therefore, we do not need to process the merge with 'other' further.
 166         // The phi inputs of 'this' at 'phi_index' will eventually be removed.
 167         break;
 168       }
 169       val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), phi_index, transform);
 170     } else {
 171       assert(val1->is_Phi(), "must be a phi node");
 172       val1->set_req(phi_index, val2);
 173     }
 174     if (transform) {
 175       set_field_value(i, gvn->transform(val1));
 176     }
 177   }
 178   return this;
 179 }
 180 
 181 // Adds a new merge path to an inline type node with phi inputs
 182 void InlineTypeNode::add_new_path(Node* region) {
 183   assert(has_phi_inputs(region), "must have phi inputs");
 184 
 185   PhiNode* phi = get_oop()->as_Phi();
 186   phi->add_req(nullptr);
 187   assert(phi->req() == region->req(), "must be same size as region");
 188 
 189   phi = get_is_buffered()->as_Phi();
 190   phi->add_req(nullptr);
 191   assert(phi->req() == region->req(), "must be same size as region");
 192 
 193   phi = get_null_marker()->as_Phi();
 194   phi->add_req(nullptr);
 195   assert(phi->req() == region->req(), "must be same size as region");
 196 
 197   for (uint i = 0; i < field_count(); ++i) {
 198     Node* val = field_value(i);
 199     if (val->is_InlineType()) {
 200       val->as_InlineType()->add_new_path(region);
 201     } else {
 202       val->as_Phi()->add_req(nullptr);
 203       assert(val->req() == region->req(), "must be same size as region");
 204     }
 205   }
 206 }
 207 
 208 Node* InlineTypeNode::field_value(uint index) const {
 209   assert(index < field_count(), "index out of bounds");
 210   return in(Values + index);
 211 }
 212 
 213 // Get the value of the field at the given offset.
 214 // If 'recursive' is true, flat inline type fields will be resolved recursively.
 215 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive) const {
 216   // Find the declared field which contains the field we are looking for
 217   int index = inline_klass()->field_index_by_offset(offset);
 218   Node* value = field_value(index);
 219   assert(value != nullptr, "field value not found");
 220 
 221   if (!recursive || !field_is_flat(index) || value->is_top()) {
 222     assert(offset == field_offset(index), "offset mismatch");
 223     return value;
 224   }
 225 
 226   // Flat inline type field
 227   InlineTypeNode* vt = value->as_InlineType();
 228   if (offset == field_null_marker_offset(index)) {
 229     return vt->get_null_marker();
 230   } else {
 231     int sub_offset = offset - field_offset(index); // Offset of the flattened field inside the declared field
 232     sub_offset += vt->inline_klass()->payload_offset(); // Add header size
 233     return vt->field_value_by_offset(sub_offset, recursive);
 234   }
 235 }
 236 
 237 void InlineTypeNode::set_field_value(uint index, Node* value) {
 238   assert(index < field_count(), "index out of bounds");
 239   set_req(Values + index, value);
 240 }
 241 
 242 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) {
 243   set_field_value(field_index(offset), value);
 244 }
 245 
 246 int InlineTypeNode::field_offset(uint index) const {
 247   assert(index < field_count(), "index out of bounds");
 248   return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes();
 249 }
 250 
 251 uint InlineTypeNode::field_index(int offset) const {
 252   uint i = 0;
 253   for (; i < field_count() && field_offset(i) != offset; i++) { }
 254   assert(i < field_count(), "field not found");
 255   return i;
 256 }
 257 
 258 ciType* InlineTypeNode::field_type(uint index) const {
 259   assert(index < field_count(), "index out of bounds");
 260   return inline_klass()->declared_nonstatic_field_at(index)->type();
 261 }
 262 
 263 bool InlineTypeNode::field_is_flat(uint index) const {
 264   assert(index < field_count(), "index out of bounds");
 265   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 266   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 267   return field->is_flat();
 268 }
 269 
 270 bool InlineTypeNode::field_is_null_free(uint index) const {
 271   assert(index < field_count(), "index out of bounds");
 272   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 273   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 274   return field->is_null_free();
 275 }
 276 
 277 bool InlineTypeNode::field_is_volatile(uint index) const {
 278   assert(index < field_count(), "index out of bounds");
 279   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 280   assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type");
 281   return field->is_volatile();
 282 }
 283 
 284 int InlineTypeNode::field_null_marker_offset(uint index) const {
 285   assert(index < field_count(), "index out of bounds");
 286   ciField* field = inline_klass()->declared_nonstatic_field_at(index);
 287   assert(field->is_flat(), "must be an inline type");
 288   return field->null_marker_offset();
 289 }
 290 
 291 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt) {
 292   uint cnt = 0;
 293   for (uint i = 0; i < field_count(); ++i) {
 294     Node* value = field_value(i);
 295     if (field_is_flat(i)) {
 296       InlineTypeNode* vt = value->as_InlineType();
 297       cnt += vt->add_fields_to_safepoint(worklist, sfpt);
 298       if (!field_is_null_free(i)) {
 299         // The null marker of a flat field is added right after we scalarize that field
 300         sfpt->add_req(vt->get_null_marker());
 301         cnt++;
 302       }
 303       continue;
 304     }
 305     if (value->is_InlineType()) {
 306       // Add inline type to the worklist to process later
 307       worklist.push(value);
 308     }
 309     sfpt->add_req(value);
 310     cnt++;
 311   }
 312   return cnt;
 313 }
 314 
 315 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) {
 316   JVMState* jvms = sfpt->jvms();
 317   assert(jvms != nullptr, "missing JVMS");
 318   uint first_ind = (sfpt->req() - jvms->scloff());
 319 
 320   // Iterate over the inline type fields in order of increasing offset and add the
 321   // field values to the safepoint. Nullable inline types have an null marker field that
 322   // needs to be checked before using the field values.
 323   sfpt->add_req(get_null_marker());
 324   uint nfields = add_fields_to_safepoint(worklist, sfpt);
 325   jvms->set_endoff(sfpt->req());
 326   // Replace safepoint edge by SafePointScalarObjectNode
 327   SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(),
 328                                                                   nullptr,
 329                                                                   first_ind,
 330                                                                   sfpt->jvms()->depth(),
 331                                                                   nfields);
 332   sobj->init_req(0, igvn->C->root());
 333   sobj = igvn->transform(sobj)->as_SafePointScalarObject();
 334   igvn->rehash_node_delayed(sfpt);
 335   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 336     Node* debug = sfpt->in(i);
 337     if (debug != nullptr && debug->uncast() == this) {
 338       sfpt->set_req(i, sobj);
 339     }
 340   }
 341 }
 342 
 343 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) {
 344   // If the inline type has a constant or loaded oop, use the oop instead of scalarization
 345   // in the safepoint to avoid keeping field loads live just for the debug info.
 346   Node* oop = get_oop();
 347   bool use_oop = false;
 348   if (allow_oop && is_allocated(igvn) && oop->is_Phi()) {
 349     Unique_Node_List worklist;
 350     VectorSet visited;
 351     visited.set(oop->_idx);
 352     worklist.push(oop);
 353     use_oop = true;
 354     while (worklist.size() > 0 && use_oop) {
 355       Node* n = worklist.pop();
 356       for (uint i = 1; i < n->req(); i++) {
 357         Node* in = n->in(i);
 358         if (in->is_Phi() && !visited.test_set(in->_idx)) {
 359           worklist.push(in);
 360         } else if (!(in->is_Con() || in->is_Parm())) {
 361           use_oop = false;
 362           break;
 363         }
 364       }
 365     }
 366   } else {
 367     use_oop = allow_oop && is_allocated(igvn) &&
 368               (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load()));
 369   }
 370 
 371   ResourceMark rm;
 372   Unique_Node_List safepoints;
 373   Unique_Node_List vt_worklist;
 374   Unique_Node_List worklist;
 375   worklist.push(this);
 376   while (worklist.size() > 0) {
 377     Node* n = worklist.pop();
 378     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 379       Node* use = n->fast_out(i);
 380       if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) {
 381         safepoints.push(use);
 382       } else if (use->is_ConstraintCast()) {
 383         worklist.push(use);
 384       }
 385     }
 386   }
 387 
 388   // Process all safepoint uses and scalarize inline type
 389   while (safepoints.size() > 0) {
 390     SafePointNode* sfpt = safepoints.pop()->as_SafePoint();
 391     if (use_oop) {
 392       for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) {
 393         Node* debug = sfpt->in(i);
 394         if (debug != nullptr && debug->uncast() == this) {
 395           sfpt->set_req(i, get_oop());
 396         }
 397       }
 398       igvn->rehash_node_delayed(sfpt);
 399     } else {
 400       make_scalar_in_safepoint(igvn, vt_worklist, sfpt);
 401     }
 402   }
 403   // Now scalarize non-flat fields
 404   for (uint i = 0; i < vt_worklist.size(); ++i) {
 405     InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType();
 406     vt->make_scalar_in_safepoints(igvn);
 407   }
 408   if (outcnt() == 0) {
 409     igvn->record_for_igvn(this);
 410   }
 411 }
 412 
 413 // We limit scalarization for inline types with circular fields and can therefore observe nodes
 414 // of the same type but with different scalarization depth during GVN. This method adjusts the
 415 // scalarization depth to avoid inconsistencies during merging.
 416 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) {
 417   if (!kit->C->has_circular_inline_type()) {
 418     return this;
 419   }
 420   GrowableArray<ciType*> visited;
 421   visited.push(inline_klass());
 422   return adjust_scalarization_depth_impl(kit, visited);
 423 }
 424 
 425 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) {
 426   InlineTypeNode* val = this;
 427   for (uint i = 0; i < field_count(); ++i) {
 428     Node* value = field_value(i);
 429     Node* new_value = value;
 430     ciType* ft = field_type(i);
 431     if (value->is_InlineType()) {
 432       if (!field_is_flat(i) && visited.contains(ft)) {
 433         new_value = value->as_InlineType()->buffer(kit)->get_oop();
 434       } else {
 435         int old_len = visited.length();
 436         visited.push(ft);
 437         new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited);
 438         visited.trunc_to(old_len);
 439       }
 440     } else if (ft->is_inlinetype() && !visited.contains(ft)) {
 441       int old_len = visited.length();
 442       visited.push(ft);
 443       new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 444       visited.trunc_to(old_len);
 445     }
 446     if (value != new_value) {
 447       if (val == this) {
 448         val = clone_if_required(&kit->gvn(), kit->map());
 449       }
 450       val->set_field_value(i, new_value);
 451     }
 452   }
 453   return (val == this) ? this : kit->gvn().transform(val)->as_InlineType();
 454 }
 455 
 456 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
 457   // Initialize the inline type by loading its field values from
 458   // memory and adding the values as input edges to the node.
 459   ciInlineKlass* vk = inline_klass();
 460   for (uint i = 0; i < field_count(); ++i) {
 461     int field_off = field_offset(i) - vk->payload_offset();
 462     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 463     Node* value = nullptr;
 464     ciType* ft = field_type(i);
 465     bool field_null_free = field_is_null_free(i);
 466     if (field_is_flat(i)) {
 467       // Recursively load the flat inline type field
 468       ciInlineKlass* fvk = ft->as_inline_klass();
 469       // Atomic if nullable or not LooselyConsistentValue
 470       bool atomic = !field_null_free || fvk->must_be_atomic();
 471 
 472       int old_len = visited.length();
 473       visited.push(ft);
 474       value = make_from_flat_impl(kit, fvk, base, field_ptr, atomic, immutable_memory,
 475                                   field_null_free, trust_null_free_oop && field_null_free, decorators, visited);
 476       visited.trunc_to(old_len);
 477     } else {
 478       // Load field value from memory
 479       BasicType bt = type2field[ft->basic_type()];
 480       assert(is_java_primitive(bt) || field_ptr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent");
 481       const Type* val_type = Type::get_const_type(ft);
 482       if (trust_null_free_oop && field_null_free) {
 483         val_type = val_type->join_speculative(TypePtr::NOTNULL);
 484       }
 485       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 486       value = kit->access_load_at(base, field_ptr, field_ptr_type, val_type, bt, decorators);
 487       // Loading a non-flattened inline type from memory
 488       if (visited.contains(ft)) {
 489         kit->C->set_has_circular_inline_type(true);
 490       } else if (ft->is_inlinetype()) {
 491         int old_len = visited.length();
 492         visited.push(ft);
 493         value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited);
 494         visited.trunc_to(old_len);
 495       }
 496     }
 497     set_field_value(i, value);
 498   }
 499 }
 500 
 501 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
 502   ciInlineKlass* vk = inline_klass();
 503   bool do_atomic = atomic;
 504   // With immutable memory, a non-atomic load and an atomic load are the same
 505   if (immutable_memory) {
 506     do_atomic = false;
 507   }
 508   // If there is only one flattened field, a non-atomic load and an atomic load are the same
 509   if (vk->is_naturally_atomic(null_free)) {
 510     do_atomic = false;
 511   }
 512 
 513   if (!do_atomic) {
 514     if (!null_free) {
 515       int nm_offset = vk->null_marker_offset_in_payload();
 516       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
 517       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 518       kit->access_store_at(base, nm_ptr, nm_ptr_type, get_null_marker(), TypeInt::BOOL, T_BOOLEAN, decorators);
 519     }
 520     store(kit, base, ptr, immutable_memory, decorators);
 521     return;
 522   }
 523 
 524   StoreFlatNode::store(kit, base, ptr, this, null_free, decorators);
 525 }
 526 
 527 void InlineTypeNode::store_flat_array(GraphKit* kit, Node* base, Node* idx) {
 528   PhaseGVN& gvn = kit->gvn();
 529   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED;
 530   kit->C->set_flat_accesses();
 531   ciInlineKlass* vk = inline_klass();
 532   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
 533 
 534   RegionNode* region = new RegionNode(4);
 535   gvn.set_type(region, Type::CONTROL);
 536   kit->record_for_igvn(region);
 537 
 538   Node* input_memory_state = kit->reset_memory();
 539   kit->set_all_memory(input_memory_state);
 540 
 541   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
 542   gvn.set_type(mem, Type::MEMORY);
 543   kit->record_for_igvn(mem);
 544 
 545   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
 546   gvn.set_type(io, Type::ABIO);
 547   kit->record_for_igvn(io);
 548 
 549   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
 550   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
 551 
 552   // Nullable
 553   kit->set_control(kit->IfFalse(iff_null_free));
 554   if (!kit->stopped()) {
 555     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
 556     kit->set_all_memory(input_memory_state);
 557     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
 558     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 559     store_flat(kit, cast, ptr, true, false, false, decorators);
 560 
 561     region->init_req(1, kit->control());
 562     mem->set_req(1, kit->reset_memory());
 563     io->set_req(1, kit->i_o());
 564   }
 565 
 566   // Null-free
 567   kit->set_control(kit->IfTrue(iff_null_free));
 568   if (!kit->stopped()) {
 569     kit->set_all_memory(input_memory_state);
 570 
 571     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
 572     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
 573 
 574     // Atomic
 575     kit->set_control(kit->IfTrue(iff_atomic));
 576     if (!kit->stopped()) {
 577       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
 578       kit->set_all_memory(input_memory_state);
 579       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
 580       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 581       store_flat(kit, cast, ptr, true, false, true, decorators);
 582 
 583       region->init_req(2, kit->control());
 584       mem->set_req(2, kit->reset_memory());
 585       io->set_req(2, kit->i_o());
 586     }
 587 
 588     // Non-atomic
 589     kit->set_control(kit->IfFalse(iff_atomic));
 590     if (!kit->stopped()) {
 591       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
 592       kit->set_all_memory(input_memory_state);
 593       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
 594       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
 595       store_flat(kit, cast, ptr, false, false, true, decorators);
 596 
 597       region->init_req(3, kit->control());
 598       mem->set_req(3, kit->reset_memory());
 599       io->set_req(3, kit->i_o());
 600     }
 601   }
 602 
 603   kit->set_control(gvn.transform(region));
 604   kit->set_all_memory(gvn.transform(mem));
 605   kit->set_i_o(gvn.transform(io));
 606 }
 607 
 608 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, bool immutable_memory, DecoratorSet decorators) const {
 609   // Write field values to memory
 610   ciInlineKlass* vk = inline_klass();
 611   for (uint i = 0; i < field_count(); ++i) {
 612     int field_off = field_offset(i) - vk->payload_offset();
 613     Node* field_val = field_value(i);
 614     bool field_null_free = field_is_null_free(i);
 615     ciType* ft = field_type(i);
 616     Node* field_ptr = kit->basic_plus_adr(base, ptr, field_off);
 617     if (field_is_flat(i)) {
 618       // Recursively store the flat inline type field
 619       ciInlineKlass* fvk = ft->as_inline_klass();
 620       // Atomic if nullable or not LooselyConsistentValue
 621       bool atomic = !field_null_free || fvk->must_be_atomic();
 622 
 623       field_val->as_InlineType()->store_flat(kit, base, field_ptr, atomic, immutable_memory, field_null_free, decorators);
 624     } else {
 625       // Store field value to memory
 626       BasicType bt = type2field[ft->basic_type()];
 627       const TypePtr* field_ptr_type = (decorators & C2_MISMATCHED) == 0 ? kit->gvn().type(field_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
 628       const Type* val_type = Type::get_const_type(ft);
 629       kit->access_store_at(base, field_ptr, field_ptr_type, field_val, val_type, bt, decorators);
 630     }
 631   }
 632 }
 633 
 634 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace) {
 635   if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) {
 636     // Already buffered
 637     return this;
 638   }
 639 
 640   // Check if inline type is already buffered
 641   Node* not_buffered_ctl = kit->top();
 642   Node* not_null_oop = kit->null_check_oop(get_oop(), &not_buffered_ctl, /* never_see_null = */ false, safe_for_replace);
 643   if (not_buffered_ctl->is_top()) {
 644     // Already buffered
 645     InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 646     vt->set_is_buffered(kit->gvn());
 647     vt = kit->gvn().transform(vt)->as_InlineType();
 648     if (safe_for_replace) {
 649       kit->replace_in_map(this, vt);
 650     }
 651     return vt;
 652   }
 653   Node* buffered_ctl = kit->control();
 654   kit->set_control(not_buffered_ctl);
 655 
 656   // Inline type is not buffered, check if it is null.
 657   Node* null_ctl = kit->top();
 658   kit->null_check_common(get_null_marker(), T_INT, false, &null_ctl);
 659   bool null_free = null_ctl->is_top();
 660 
 661   RegionNode* region = new RegionNode(4);
 662   PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM));
 663 
 664   // InlineType is already buffered
 665   region->init_req(1, buffered_ctl);
 666   oop->init_req(1, not_null_oop);
 667 
 668   // InlineType is null
 669   region->init_req(2, null_ctl);
 670   oop->init_req(2, kit->gvn().zerocon(T_OBJECT));
 671 
 672   PhiNode* io  = PhiNode::make(region, kit->i_o(), Type::ABIO);
 673   PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM);
 674 
 675   if (!kit->stopped()) {
 676     assert(!is_allocated(&kit->gvn()), "already buffered");
 677     PreserveJVMState pjvms(kit);
 678     ciInlineKlass* vk = inline_klass();
 679     // Allocate and initialize buffer, re-execute on deoptimization.
 680     kit->jvms()->set_bci(kit->bci());
 681     kit->jvms()->set_should_reexecute(true);
 682     kit->kill_dead_locals();
 683     Node* klass_node = kit->makecon(TypeKlassPtr::make(vk));
 684     Node* alloc_oop  = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this);
 685     Node* payload_alloc_oop = kit->basic_plus_adr(alloc_oop, vk->payload_offset());
 686     store(kit, alloc_oop, payload_alloc_oop, true, IN_HEAP | MO_UNORDERED | C2_TIGHTLY_COUPLED_ALLOC);
 687 
 688     // Do not let stores that initialize this buffer be reordered with a subsequent
 689     // store that would make this buffer accessible by other threads.
 690     AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop);
 691     assert(alloc != nullptr, "must have an allocation node");
 692     kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 693     oop->init_req(3, alloc_oop);
 694     region->init_req(3, kit->control());
 695     io    ->init_req(3, kit->i_o());
 696     mem   ->init_req(3, kit->merged_memory());
 697   }
 698 
 699   // Update GraphKit
 700   kit->set_control(kit->gvn().transform(region));
 701   kit->set_i_o(kit->gvn().transform(io));
 702   kit->set_all_memory(kit->gvn().transform(mem));
 703   kit->record_for_igvn(region);
 704   kit->record_for_igvn(oop);
 705   kit->record_for_igvn(io);
 706   kit->record_for_igvn(mem);
 707 
 708   // Use cloned InlineTypeNode to propagate oop from now on
 709   Node* res_oop = kit->gvn().transform(oop);
 710   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace);
 711   vt->set_oop(kit->gvn(), res_oop);
 712   vt->set_is_buffered(kit->gvn());
 713   vt = kit->gvn().transform(vt)->as_InlineType();
 714   if (safe_for_replace) {
 715     kit->replace_in_map(this, vt);
 716   }
 717   // InlineTypeNode::remove_redundant_allocations piggybacks on split if.
 718   // Make sure it gets a chance to remove this allocation.
 719   kit->C->set_has_split_ifs(true);
 720   return vt;
 721 }
 722 
 723 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const {
 724   if (phase->find_int_con(get_is_buffered(), 0) == 1) {
 725     return true;
 726   }
 727   Node* oop = get_oop();
 728   const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type();
 729   return !oop_type->maybe_null();
 730 }
 731 
 732 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) {
 733   ProjNode* pn = call->proj_out_or_null(proj_idx);
 734   if (pn != nullptr) {
 735     C->gvn_replace_by(pn, value);
 736     C->initial_gvn()->hash_delete(pn);
 737     pn->set_req(0, C->top());
 738   }
 739   proj_idx += type2size[bt];
 740 }
 741 
 742 // When a call returns multiple values, it has several result
 743 // projections, one per field. Replacing the result of the call by an
 744 // inline type node (after late inlining) requires that for each result
 745 // projection, we find the corresponding inline type field.
 746 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) {
 747   uint proj_idx = TypeFunc::Parms;
 748   // Replace oop projection
 749   replace_proj(C, call, proj_idx, get_oop(), T_OBJECT);
 750   // Replace field projections
 751   replace_field_projs(C, call, proj_idx);
 752   // Replace null_marker projection
 753   replace_proj(C, call, proj_idx, get_null_marker(), T_BOOLEAN);
 754   assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection");
 755 }
 756 
 757 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) {
 758   for (uint i = 0; i < field_count(); ++i) {
 759     Node* value = field_value(i);
 760     if (field_is_flat(i)) {
 761       InlineTypeNode* vt = value->as_InlineType();
 762       // Replace field projections for flat field
 763       vt->replace_field_projs(C, call, proj_idx);
 764       if (!field_is_null_free(i)) {
 765         // Replace null_marker projection for nullable field
 766         replace_proj(C, call, proj_idx, vt->get_null_marker(), T_BOOLEAN);
 767       }
 768       continue;
 769     }
 770     // Replace projection for field value
 771     replace_proj(C, call, proj_idx, value, field_type(i)->basic_type());
 772   }
 773 }
 774 
 775 InlineTypeNode* InlineTypeNode::allocate_fields(GraphKit* kit) {
 776   InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map());
 777   for (uint i = 0; i < field_count(); i++) {
 778      Node* value = field_value(i);
 779      if (field_is_flat(i)) {
 780        // Flat inline type field
 781        vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit));
 782      } else if (value->is_InlineType()) {
 783        // Non-flat inline type field
 784        vt->set_field_value(i, value->as_InlineType()->buffer(kit));
 785      }
 786   }
 787   vt = kit->gvn().transform(vt)->as_InlineType();
 788   kit->replace_in_map(this, vt);
 789   return vt;
 790 }
 791 
 792 // Replace a buffer allocation by a dominating allocation
 793 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) {
 794   // Remove initializing stores and GC barriers
 795   for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) {
 796     Node* use = res->fast_out(i);
 797     if (use->is_AddP()) {
 798       for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
 799         Node* store = use->fast_out(j)->isa_Store();
 800         if (store != nullptr) {
 801           igvn->rehash_node_delayed(store);
 802           igvn->replace_in_uses(store, store->in(MemNode::Memory));
 803         }
 804       }
 805     } else if (use->Opcode() == Op_CastP2X) {
 806       if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) {
 807         // The G1 pre-barrier uses a CastP2X both for the pointer of the object
 808         // we store into, as well as the value we are storing. Skip if this is a
 809         // barrier for storing 'res' into another object.
 810         continue;
 811       }
 812       BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
 813       bs->eliminate_gc_barrier(igvn, use);
 814       --i; --imax;
 815     }
 816   }
 817   igvn->replace_node(res, dom);
 818 }
 819 
 820 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 821   Node* oop = get_oop();
 822   Node* is_buffered = get_is_buffered();
 823 
 824   if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) {
 825     InlineTypeNode* vtptr = oop->as_InlineType();
 826     set_oop(*phase, vtptr->get_oop());
 827     set_is_buffered(*phase);
 828     set_null_marker(*phase);
 829     for (uint i = Values; i < vtptr->req(); ++i) {
 830       set_req(i, vtptr->in(i));
 831     }
 832     return this;
 833   }
 834 
 835   // Use base oop if fields are loaded from memory, don't do so if base is the CheckCastPP of an
 836   // allocation because the only case we load from a naked CheckCastPP is when we exit a
 837   // constructor of an inline type and we want to relinquish the larval oop there. This has a
 838   // couple of benefits:
 839   // - The allocation is likely to be elided earlier if it is not an input of an InlineTypeNode.
 840   // - The InlineTypeNode without an allocation input is more likely to be GVN-ed. This may emerge
 841   //   when we try to clone a value object.
 842   // - The buffering, if needed, is delayed until it is required. This new allocation, since it is
 843   //   created from an InlineTypeNode, is recognized as not having a unique identity and in the
 844   //   future, we can move them around more freely such as hoisting out of loops. This is not true
 845   //   for the old allocation since larval value objects do have unique identities.
 846   Node* base = is_loaded(phase);
 847   if (base != nullptr && !base->is_InlineType() && !phase->type(base)->maybe_null() && AllocateNode::Ideal_allocation(base) == nullptr) {
 848     if (oop != base || phase->type(is_buffered) != TypeInt::ONE) {
 849       set_oop(*phase, base);
 850       set_is_buffered(*phase);
 851       return this;
 852     }
 853   }
 854 
 855   if (can_reshape) {
 856     PhaseIterGVN* igvn = phase->is_IterGVN();
 857     if (is_allocated(phase)) {
 858       // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones,
 859       // they will be removed anyway and changing the memory chain will confuse other optimizations.
 860       // This can happen with late inlining when we first allocate an inline type argument
 861       // but later decide to inline the call after the callee code also triggered allocation.
 862       for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
 863         AllocateNode* alloc = fast_out(i)->isa_Allocate();
 864         if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
 865           // Found a re-allocation
 866           Node* res = alloc->result_cast();
 867           if (res != nullptr && res->is_CheckCastPP()) {
 868             // Replace allocation by oop and unlink AllocateNode
 869             replace_allocation(igvn, res, oop);
 870             igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top());
 871             --i; --imax;
 872           }
 873         }
 874       }
 875     }
 876   }
 877 
 878   return nullptr;
 879 }
 880 
 881 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) {
 882   // Create a new InlineTypeNode with uninitialized values and nullptr oop
 883   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free);
 884   vt->set_is_buffered(gvn, false);
 885   vt->set_null_marker(gvn);
 886   return vt;
 887 }
 888 
 889 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk) {
 890   GrowableArray<ciType*> visited;
 891   visited.push(vk);
 892   return make_all_zero_impl(gvn, vk, visited);
 893 }
 894 
 895 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
 896   // Create a new InlineTypeNode initialized with all zero
 897   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true);
 898   vt->set_is_buffered(gvn, false);
 899   vt->set_null_marker(gvn);
 900   for (uint i = 0; i < vt->field_count(); ++i) {
 901     ciType* ft = vt->field_type(i);
 902     Node* value = gvn.zerocon(ft->basic_type());
 903     if (!vt->field_is_flat(i) && visited.contains(ft)) {
 904       gvn.C->set_has_circular_inline_type(true);
 905     } else if (ft->is_inlinetype()) {
 906       int old_len = visited.length();
 907       visited.push(ft);
 908       ciInlineKlass* vk = ft->as_inline_klass();
 909       if (vt->field_is_null_free(i)) {
 910         value = make_all_zero_impl(gvn, vk, visited);
 911       } else {
 912         value = make_null_impl(gvn, vk, visited);
 913       }
 914       visited.trunc_to(old_len);
 915     }
 916     vt->set_field_value(i, value);
 917   }
 918   vt = gvn.transform(vt)->as_InlineType();
 919   assert(vt->is_all_zero(&gvn), "must be the all-zero inline type");
 920   return vt;
 921 }
 922 
 923 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const {
 924   const TypeInt* tinit = gvn->type(get_null_marker())->isa_int();
 925   if (tinit == nullptr || !tinit->is_con(1)) {
 926     return false; // May be null
 927   }
 928   for (uint i = 0; i < field_count(); ++i) {
 929     Node* value = field_value(i);
 930     if (field_is_null_free(i)) {
 931       // Null-free value class field must have the all-zero value. If 'flat' is set,
 932       // reject non-flat fields because they need to be initialized with an oop to a buffer.
 933       if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) {
 934         return false;
 935       }
 936       continue;
 937     } else if (value->is_InlineType()) {
 938       // Nullable value class field must be null
 939       tinit = gvn->type(value->as_InlineType()->get_null_marker())->isa_int();
 940       if (tinit != nullptr && tinit->is_con(0)) {
 941         continue;
 942       }
 943       return false;
 944     } else if (!gvn->type(value)->is_zero_type()) {
 945       return false;
 946     }
 947   }
 948   return true;
 949 }
 950 
 951 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk) {
 952   GrowableArray<ciType*> visited;
 953   visited.push(vk);
 954   return make_from_oop_impl(kit, oop, vk, visited);
 955 }
 956 
 957 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited) {
 958   PhaseGVN& gvn = kit->gvn();
 959 
 960   // Create and initialize an InlineTypeNode by loading all field
 961   // values from a heap-allocated version and also save the oop.
 962   InlineTypeNode* vt = nullptr;
 963 
 964   if (oop->isa_InlineType()) {
 965     return oop->as_InlineType();
 966   }
 967 
 968   if (gvn.type(oop)->maybe_null()) {
 969     // Add a null check because the oop may be null
 970     Node* null_ctl = kit->top();
 971     Node* not_null_oop = kit->null_check_oop(oop, &null_ctl);
 972     if (kit->stopped()) {
 973       // Constant null
 974       kit->set_control(null_ctl);
 975       vt = make_null_impl(gvn, vk, visited);
 976       kit->record_for_igvn(vt);
 977       return vt;
 978     }
 979     vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false);
 980     vt->set_is_buffered(gvn);
 981     vt->set_null_marker(gvn);
 982     Node* payload_ptr = kit->basic_plus_adr(not_null_oop, vk->payload_offset());
 983     vt->load(kit, not_null_oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
 984 
 985     if (null_ctl != kit->top()) {
 986       InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited);
 987       Node* region = new RegionNode(3);
 988       region->init_req(1, kit->control());
 989       region->init_req(2, null_ctl);
 990       vt = vt->clone_with_phis(&gvn, region, kit->map());
 991       vt->merge_with(&gvn, null_vt, 2, true);
 992       vt->set_oop(gvn, oop);
 993       kit->set_control(gvn.transform(region));
 994     }
 995   } else {
 996     // Oop can never be null
 997     vt = new InlineTypeNode(vk, oop, /* null_free= */ true);
 998     Node* init_ctl = kit->control();
 999     vt->set_is_buffered(gvn);
1000     vt->set_null_marker(gvn);
1001     Node* payload_ptr = kit->basic_plus_adr(oop, vk->payload_offset());
1002     vt->load(kit, oop, payload_ptr, true, true, IN_HEAP | MO_UNORDERED, visited);
1003 // TODO 8284443
1004 //    assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType ||
1005 //           AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded");
1006   }
1007   assert(vt->is_allocated(&gvn), "inline type should be allocated");
1008   kit->record_for_igvn(vt);
1009   return gvn.transform(vt)->as_InlineType();
1010 }
1011 
1012 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr,
1013                                                bool atomic, bool immutable_memory, bool null_free, DecoratorSet decorators) {
1014   GrowableArray<ciType*> visited;
1015   visited.push(vk);
1016   return make_from_flat_impl(kit, vk, base, ptr, atomic, immutable_memory, null_free, null_free, decorators, visited);
1017 }
1018 
1019 // GraphKit wrapper for the 'make_from_flat' method
1020 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool atomic, bool immutable_memory,
1021                                                     bool null_free, bool trust_null_free_oop, DecoratorSet decorators, GrowableArray<ciType*>& visited) {
1022   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1023   PhaseGVN& gvn = kit->gvn();
1024   bool do_atomic = atomic;
1025   // With immutable memory, a non-atomic load and an atomic load are the same
1026   if (immutable_memory) {
1027     do_atomic = false;
1028   }
1029   // If there is only one flattened field, a non-atomic load and an atomic load are the same
1030   if (vk->is_naturally_atomic(null_free)) {
1031     do_atomic = false;
1032   }
1033 
1034   if (!do_atomic) {
1035     InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1036     if (!null_free) {
1037       int nm_offset = vk->null_marker_offset_in_payload();
1038       Node* nm_ptr = kit->basic_plus_adr(base, ptr, nm_offset);
1039       const TypePtr* nm_ptr_type = (decorators & C2_MISMATCHED) == 0 ? gvn.type(nm_ptr)->is_ptr() : TypeRawPtr::BOTTOM;
1040       Node* nm_value = kit->access_load_at(base, nm_ptr, nm_ptr_type, TypeInt::BOOL, T_BOOLEAN, decorators);
1041       vt->set_req(NullMarker, nm_value);
1042     }
1043 
1044     vt->load(kit, base, ptr, immutable_memory, trust_null_free_oop, decorators, visited);
1045     return gvn.transform(vt)->as_InlineType();
1046   }
1047 
1048   assert(!immutable_memory, "immutable memory does not need explicit atomic access");
1049   return LoadFlatNode::load(kit, vk, base, ptr, null_free, trust_null_free_oop, decorators);
1050 }
1051 
1052 InlineTypeNode* InlineTypeNode::make_from_flat_array(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* idx) {
1053   assert(vk->maybe_flat_in_array(), "element type %s cannot be flat in array", vk->name()->as_utf8());
1054   PhaseGVN& gvn = kit->gvn();
1055   DecoratorSet decorators = IN_HEAP | IS_ARRAY | MO_UNORDERED | C2_CONTROL_DEPENDENT_LOAD;
1056   kit->C->set_flat_accesses();
1057   InlineTypeNode* vt_nullable = nullptr;
1058   InlineTypeNode* vt_null_free = nullptr;
1059   InlineTypeNode* vt_non_atomic = nullptr;
1060 
1061   RegionNode* region = new RegionNode(4);
1062   gvn.set_type(region, Type::CONTROL);
1063   kit->record_for_igvn(region);
1064 
1065   Node* input_memory_state = kit->reset_memory();
1066   kit->set_all_memory(input_memory_state);
1067 
1068   PhiNode* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM);
1069   gvn.set_type(mem, Type::MEMORY);
1070   kit->record_for_igvn(mem);
1071 
1072   PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO);
1073   gvn.set_type(io, Type::ABIO);
1074   kit->record_for_igvn(io);
1075 
1076   Node* bol_null_free = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first
1077   IfNode* iff_null_free = kit->create_and_map_if(kit->control(), bol_null_free, PROB_FAIR, COUNT_UNKNOWN);
1078 
1079   // Nullable
1080   kit->set_control(kit->IfFalse(iff_null_free));
1081   if (!kit->stopped()) {
1082     assert(vk->has_nullable_atomic_layout(), "element type %s does not have a nullable flat layout", vk->name()->as_utf8());
1083     kit->set_all_memory(input_memory_state);
1084     Node* cast = kit->cast_to_flat_array(base, vk, false, true, true);
1085     Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1086     vt_nullable = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, false, decorators);
1087 
1088     region->init_req(1, kit->control());
1089     mem->set_req(1, kit->reset_memory());
1090     io->set_req(1, kit->i_o());
1091   }
1092 
1093   // Null-free
1094   kit->set_control(kit->IfTrue(iff_null_free));
1095   if (!kit->stopped()) {
1096     kit->set_all_memory(input_memory_state);
1097 
1098     Node* bol_atomic = kit->null_free_atomic_array_test(base, vk);
1099     IfNode* iff_atomic = kit->create_and_map_if(kit->control(), bol_atomic, PROB_FAIR, COUNT_UNKNOWN);
1100 
1101     // Atomic
1102     kit->set_control(kit->IfTrue(iff_atomic));
1103     if (!kit->stopped()) {
1104       assert(vk->has_atomic_layout(), "element type %s does not have a null-free atomic flat layout", vk->name()->as_utf8());
1105       kit->set_all_memory(input_memory_state);
1106       Node* cast = kit->cast_to_flat_array(base, vk, true, false, true);
1107       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1108       vt_null_free = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, true, false, true, decorators);
1109 
1110       region->init_req(2, kit->control());
1111       mem->set_req(2, kit->reset_memory());
1112       io->set_req(2, kit->i_o());
1113     }
1114 
1115     // Non-Atomic
1116     kit->set_control(kit->IfFalse(iff_atomic));
1117     if (!kit->stopped()) {
1118       assert(vk->has_non_atomic_layout(), "element type %s does not have a null-free non-atomic flat layout", vk->name()->as_utf8());
1119       kit->set_all_memory(input_memory_state);
1120       Node* cast = kit->cast_to_flat_array(base, vk, true, false, false);
1121       Node* ptr = kit->array_element_address(cast, idx, T_FLAT_ELEMENT);
1122       vt_non_atomic = InlineTypeNode::make_from_flat(kit, vk, cast, ptr, false, false, true, decorators);
1123 
1124       region->init_req(3, kit->control());
1125       mem->set_req(3, kit->reset_memory());
1126       io->set_req(3, kit->i_o());
1127     }
1128   }
1129 
1130   InlineTypeNode* vt = nullptr;
1131   if (vt_nullable == nullptr && vt_null_free == nullptr && vt_non_atomic == nullptr) {
1132     // All paths are dead
1133     vt = make_null(gvn, vk);
1134   } else if (vt_nullable == nullptr && vt_null_free == nullptr) {
1135     vt = vt_non_atomic;
1136   } else if (vt_nullable == nullptr && vt_non_atomic == nullptr) {
1137     vt = vt_null_free;
1138   } else if (vt_null_free == nullptr && vt_non_atomic == nullptr) {
1139     vt = vt_nullable;
1140   }
1141   if (vt != nullptr) {
1142     kit->set_control(kit->gvn().transform(region));
1143     kit->set_all_memory(kit->gvn().transform(mem));
1144     kit->set_i_o(kit->gvn().transform(io));
1145     return vt;
1146   }
1147 
1148   InlineTypeNode* zero = InlineTypeNode::make_null(gvn, vk);
1149   vt = zero->clone_with_phis(&gvn, region);
1150   if (vt_nullable != nullptr) {
1151     vt = vt->merge_with(&gvn, vt_nullable, 1, false);
1152   }
1153   if (vt_null_free != nullptr) {
1154     vt = vt->merge_with(&gvn, vt_null_free, 2, false);
1155   }
1156   if (vt_non_atomic != nullptr) {
1157     vt = vt->merge_with(&gvn, vt_non_atomic, 3, false);
1158   }
1159 
1160   kit->set_control(kit->gvn().transform(region));
1161   kit->set_all_memory(kit->gvn().transform(mem));
1162   kit->set_i_o(kit->gvn().transform(io));
1163   return gvn.transform(vt)->as_InlineType();
1164 }
1165 
1166 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) {
1167   InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free);
1168   if (!in) {
1169     // Keep track of the oop. The returned inline type might already be buffered.
1170     Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++));
1171     vt->set_oop(kit->gvn(), oop);
1172   }
1173   GrowableArray<ciType*> visited;
1174   visited.push(vk);
1175   vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited);
1176   return kit->gvn().transform(vt)->as_InlineType();
1177 }
1178 
1179 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) {
1180   if (vk == nullptr) {
1181     vk = inline_klass();
1182   }
1183   for (uint i = 0; i < field_count(); ++i) {
1184     int offset = holder_offset + field_offset(i);
1185     Node* value = field_value(i);
1186     if (value->is_InlineType()) {
1187       InlineTypeNode* vt = value->as_InlineType();
1188       if (vt->type()->inline_klass()->is_empty()) {
1189         continue;
1190       } else if (field_is_flat(i) && vt->is_InlineType()) {
1191         // Check inline type field load recursively
1192         base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset());
1193         if (base == nullptr) {
1194           return nullptr;
1195         }
1196         continue;
1197       } else {
1198         value = vt->get_oop();
1199         if (value->Opcode() == Op_CastPP) {
1200           // Skip CastPP
1201           value = value->in(1);
1202         }
1203       }
1204     }
1205     if (value->isa_DecodeN()) {
1206       // Skip DecodeN
1207       value = value->in(1);
1208     }
1209     if (value->isa_Load()) {
1210       // Check if base and offset of field load matches inline type layout
1211       intptr_t loffset = 0;
1212       Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset);
1213       if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) {
1214         return nullptr;
1215       } else if (base == nullptr) {
1216         // Set base and check if pointer type matches
1217         base = lbase;
1218         const TypeInstPtr* vtptr = phase->type(base)->isa_instptr();
1219         if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) {
1220           return nullptr;
1221         }
1222       }
1223     } else {
1224       return nullptr;
1225     }
1226   }
1227   return base;
1228 }
1229 
1230 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) {
1231   const TypeKlassPtr* tk = TypeKlassPtr::make(vk);
1232   intptr_t bits = tk->get_con();
1233   set_nth_bit(bits, 0);
1234   return gvn.longcon((jlong)bits);
1235 }
1236 
1237 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) {
1238   if (!null_free && in) {
1239     n->init_req(base_input++, get_null_marker());
1240   }
1241   for (uint i = 0; i < field_count(); i++) {
1242     Node* arg = field_value(i);
1243     if (field_is_flat(i)) {
1244       // Flat inline type field
1245       arg->as_InlineType()->pass_fields(kit, n, base_input, in);
1246       if (!field_is_null_free(i)) {
1247         assert(field_null_marker_offset(i) != -1, "inconsistency");
1248         n->init_req(base_input++, arg->as_InlineType()->get_null_marker());
1249       }
1250     } else {
1251       if (arg->is_InlineType()) {
1252         // Non-flat inline type field
1253         InlineTypeNode* vt = arg->as_InlineType();
1254         assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return");
1255         arg = vt->buffer(kit);
1256       }
1257       // Initialize call/return arguments
1258       n->init_req(base_input++, arg);
1259       if (field_type(i)->size() == 2) {
1260         n->init_req(base_input++, kit->top());
1261       }
1262     }
1263   }
1264   // The last argument is used to pass the null marker to compiled code and not required here.
1265   if (!null_free && !in) {
1266     n->init_req(base_input++, kit->top());
1267   }
1268 }
1269 
1270 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) {
1271   PhaseGVN& gvn = kit->gvn();
1272   Node* null_marker = nullptr;
1273   if (!null_free) {
1274     // Nullable inline type
1275     if (in) {
1276       // Set null marker
1277       if (multi->is_Start()) {
1278         null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1279       } else {
1280         null_marker = multi->as_Call()->in(base_input);
1281       }
1282       set_req(NullMarker, null_marker);
1283       base_input++;
1284     }
1285     // Add a null check to make subsequent loads dependent on
1286     assert(null_check_region == nullptr, "already set");
1287     if (null_marker == nullptr) {
1288       // Will only be initialized below, use dummy node for now
1289       null_marker = new Node(1);
1290       null_marker->init_req(0, kit->control()); // Add an input to prevent dummy from being dead
1291       gvn.set_type_bottom(null_marker);
1292     }
1293     Node* null_ctrl = kit->top();
1294     kit->null_check_common(null_marker, T_INT, false, &null_ctrl);
1295     Node* non_null_ctrl = kit->control();
1296     null_check_region = new RegionNode(3);
1297     null_check_region->init_req(1, non_null_ctrl);
1298     null_check_region->init_req(2, null_ctrl);
1299     null_check_region = gvn.transform(null_check_region);
1300     kit->set_control(null_check_region);
1301   }
1302 
1303   for (uint i = 0; i < field_count(); ++i) {
1304     ciType* type = field_type(i);
1305     Node* parm = nullptr;
1306     if (field_is_flat(i)) {
1307       // Flat inline type field
1308       InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i));
1309       vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited);
1310       if (!field_is_null_free(i)) {
1311         assert(field_null_marker_offset(i) != -1, "inconsistency");
1312         Node* null_marker = nullptr;
1313         if (multi->is_Start()) {
1314           null_marker = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1315         } else if (in) {
1316           null_marker = multi->as_Call()->in(base_input);
1317         } else {
1318           null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1319         }
1320         vt->set_req(NullMarker, null_marker);
1321         base_input++;
1322       }
1323       parm = gvn.transform(vt);
1324     } else {
1325       if (multi->is_Start()) {
1326         assert(in, "return from start?");
1327         parm = gvn.transform(new ParmNode(multi->as_Start(), base_input));
1328       } else if (in) {
1329         parm = multi->as_Call()->in(base_input);
1330       } else {
1331         parm = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1332       }
1333       bool null_free = field_is_null_free(i);
1334       // Non-flat inline type field
1335       if (type->is_inlinetype()) {
1336         if (null_check_region != nullptr) {
1337           // We limit scalarization for inline types with circular fields and can therefore observe nodes
1338           // of the same type but with different scalarization depth during GVN. To avoid inconsistencies
1339           // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized.
1340           if (parm->is_InlineType() && kit->C->has_circular_inline_type()) {
1341             parm = parm->as_InlineType()->get_oop();
1342           }
1343           // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory
1344           parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass()));
1345           parm->set_req(2, kit->zerocon(T_OBJECT));
1346           parm = gvn.transform(parm);
1347           null_free = false;
1348         }
1349         if (visited.contains(type)) {
1350           kit->C->set_has_circular_inline_type(true);
1351         } else if (!parm->is_InlineType()) {
1352           int old_len = visited.length();
1353           visited.push(type);
1354           if (null_free) {
1355             parm = kit->cast_not_null(parm);
1356           }
1357           parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited);
1358           visited.trunc_to(old_len);
1359         }
1360       }
1361       base_input += type->size();
1362     }
1363     assert(parm != nullptr, "should never be null");
1364     assert(field_value(i) == nullptr, "already set");
1365     set_field_value(i, parm);
1366     gvn.record_for_igvn(parm);
1367   }
1368   // The last argument is used to pass the null marker to compiled code
1369   if (!null_free && !in) {
1370     Node* cmp = null_marker->raw_out(0);
1371     null_marker = gvn.transform(new ProjNode(multi->as_Call(), base_input));
1372     set_req(NullMarker, null_marker);
1373     gvn.hash_delete(cmp);
1374     cmp->set_req(1, null_marker);
1375     gvn.hash_find_insert(cmp);
1376     gvn.record_for_igvn(cmp);
1377     base_input++;
1378   }
1379 }
1380 
1381 // Search for multiple allocations of this inline type and try to replace them by dominating allocations.
1382 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations.
1383 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) {
1384   PhaseIterGVN* igvn = &phase->igvn();
1385   // Search for allocations of this inline type. Ignore scalar replaceable ones, they
1386   // will be removed anyway and changing the memory chain will confuse other optimizations.
1387   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1388     AllocateNode* alloc = fast_out(i)->isa_Allocate();
1389     if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) {
1390       Node* res = alloc->result_cast();
1391       if (res == nullptr || !res->is_CheckCastPP()) {
1392         break; // No unique CheckCastPP
1393       }
1394       // Search for a dominating allocation of the same inline type
1395       Node* res_dom = res;
1396       for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) {
1397         AllocateNode* alloc_other = fast_out(j)->isa_Allocate();
1398         if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) {
1399           Node* res_other = alloc_other->result_cast();
1400           if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom &&
1401               phase->is_dominator(res_other->in(0), res_dom->in(0))) {
1402             res_dom = res_other;
1403           }
1404         }
1405       }
1406       if (res_dom != res) {
1407         // Replace allocation by dominating one.
1408         replace_allocation(igvn, res, res_dom);
1409         // The result of the dominated allocation is now unused and will be removed
1410         // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts.
1411         igvn->_worklist.push(alloc);
1412       }
1413     }
1414   }
1415 }
1416 
1417 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) {
1418   GrowableArray<ciType*> visited;
1419   visited.push(vk);
1420   return make_null_impl(gvn, vk, visited, transform);
1421 }
1422 
1423 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) {
1424   InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false);
1425   vt->set_is_buffered(gvn);
1426   vt->set_null_marker(gvn, gvn.intcon(0));
1427   for (uint i = 0; i < vt->field_count(); i++) {
1428     ciType* ft = vt->field_type(i);
1429     Node* value = gvn.zerocon(ft->basic_type());
1430     if (!vt->field_is_flat(i) && visited.contains(ft)) {
1431       gvn.C->set_has_circular_inline_type(true);
1432     } else if (ft->is_inlinetype()) {
1433       int old_len = visited.length();
1434       visited.push(ft);
1435       value = make_null_impl(gvn, ft->as_inline_klass(), visited);
1436       visited.trunc_to(old_len);
1437     }
1438     vt->set_field_value(i, value);
1439   }
1440   return transform ? gvn.transform(vt)->as_InlineType() : vt;
1441 }
1442 
1443 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) {
1444   if (!safe_for_replace || (map == nullptr && outcnt() != 0)) {
1445     return clone()->as_InlineType();
1446   }
1447   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
1448     if (fast_out(i) != map) {
1449       return clone()->as_InlineType();
1450     }
1451   }
1452   gvn->hash_delete(this);
1453   return this;
1454 }
1455 
1456 const Type* InlineTypeNode::Value(PhaseGVN* phase) const {
1457   Node* oop = get_oop();
1458   const Type* toop = phase->type(oop);
1459 #ifdef ASSERT
1460   if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) {
1461     // We are not allocated (anymore) and should therefore not have an instance id
1462     dump(1);
1463     assert(false, "Unbuffered inline type should not have known instance id");
1464   }
1465 #endif
1466   if (toop == Type::TOP) {
1467     return Type::TOP;
1468   }
1469   const Type* t = toop->filter_speculative(_type);
1470   if (t->singleton()) {
1471     // Don't replace InlineType by a constant
1472     t = _type;
1473   }
1474   const Type* tinit = phase->type(in(NullMarker));
1475   if (tinit == Type::TOP) {
1476     return Type::TOP;
1477   }
1478   if (tinit->isa_int() && tinit->is_int()->is_con(1)) {
1479     t = t->join_speculative(TypePtr::NOTNULL);
1480   }
1481   return t;
1482 }
1483 
1484 InlineTypeNode* LoadFlatNode::load(GraphKit* kit, ciInlineKlass* vk, Node* base, Node* ptr, bool null_free, bool trust_null_free_oop, DecoratorSet decorators) {
1485   int output_type_size = vk->nof_nonstatic_fields() + (null_free ? 0 : 1);
1486   const Type** output_types = TypeTuple::fields(output_type_size);
1487   collect_field_types(vk, output_types + TypeFunc::Parms, 0, output_type_size, null_free, trust_null_free_oop);
1488   const TypeTuple* type = TypeTuple::make(output_type_size + TypeFunc::Parms, output_types);
1489 
1490   LoadFlatNode* load = new LoadFlatNode(vk, type, null_free, decorators);
1491   load->init_req(TypeFunc::Control, kit->control());
1492   load->init_req(TypeFunc::I_O, kit->top());
1493   load->init_req(TypeFunc::Memory, kit->reset_memory());
1494   load->init_req(TypeFunc::FramePtr, kit->frameptr());
1495   load->init_req(TypeFunc::ReturnAdr, kit->top());
1496 
1497   load->init_req(TypeFunc::Parms, base);
1498   load->init_req(TypeFunc::Parms + 1, ptr);
1499   kit->kill_dead_locals();
1500   kit->add_safepoint_edges(load);
1501   load = kit->gvn().transform(load)->as_LoadFlat();
1502   kit->record_for_igvn(load);
1503 
1504   kit->set_control(kit->gvn().transform(new ProjNode(load, TypeFunc::Control)));
1505   kit->set_all_memory(kit->gvn().transform(new ProjNode(load, TypeFunc::Memory)));
1506   return load->collect_projs(kit, vk, TypeFunc::Parms, null_free);
1507 }
1508 
1509 bool LoadFlatNode::expand_non_atomic(PhaseIterGVN& igvn) {
1510   assert(igvn.delay_transform(), "transformation must be delayed");
1511   if ((_decorators & C2_MISMATCHED) != 0) {
1512     return false;
1513   }
1514 
1515   GraphKit kit(jvms(), &igvn);
1516   kit.set_all_memory(kit.reset_memory());
1517 
1518   Node* base = this->base();
1519   Node* ptr = this->ptr();
1520 
1521   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1522     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1523     if (proj_out == nullptr) {
1524       continue;
1525     }
1526 
1527     ciField* field = _vk->nonstatic_field_at(i);
1528     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - _vk->payload_offset());
1529     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1530     igvn.set_type(field_ptr, field_ptr_type);
1531 
1532     Node* field_value = kit.access_load_at(base, field_ptr, field_ptr_type, igvn.type(proj_out), field->type()->basic_type(), _decorators);
1533     igvn.replace_node(proj_out, field_value);
1534   }
1535 
1536   if (!_null_free) {
1537     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1538     if (proj_out != nullptr) {
1539       Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, _vk->null_marker_offset_in_payload());
1540       const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1541       igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1542       Node* null_marker_value = kit.access_load_at(base, null_marker_ptr, null_marker_ptr_type, TypeInt::BOOL, T_BOOLEAN, _decorators);
1543       igvn.replace_node(proj_out, null_marker_value);
1544     }
1545   }
1546 
1547   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1548   if (old_ctrl != nullptr) {
1549     igvn.replace_node(old_ctrl, kit.control());
1550   }
1551   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1552   Node* new_mem = kit.reset_memory();
1553   if (old_mem != nullptr) {
1554     igvn.replace_node(old_mem, new_mem);
1555   }
1556   return true;
1557 }
1558 
1559 void LoadFlatNode::expand_atomic(PhaseIterGVN& igvn) {
1560   assert(igvn.delay_transform(), "transformation must be delayed");
1561   GraphKit kit(jvms(), &igvn);
1562   kit.set_all_memory(kit.reset_memory());
1563 
1564   Node* base = this->base();
1565   Node* ptr = this->ptr();
1566 
1567   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1568   kit.insert_mem_bar(Op_MemBarCPUOrder);
1569   Node* payload = kit.access_load_at(base, ptr, TypeRawPtr::BOTTOM, Type::get_const_basic_type(payload_bt), payload_bt,
1570                                      _decorators | C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD | C2_UNKNOWN_CONTROL_LOAD, kit.control());
1571   kit.insert_mem_bar(Op_MemBarCPUOrder);
1572 
1573   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1574   if (old_ctrl != nullptr) {
1575     igvn.replace_node(old_ctrl, kit.control());
1576   }
1577   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1578   Node* new_mem = kit.reset_memory();
1579   if (old_mem != nullptr) {
1580     igvn.replace_node(old_mem, new_mem);
1581   }
1582 
1583   expand_projs_atomic(igvn, kit.control(), payload);
1584 }
1585 
1586 void LoadFlatNode::collect_field_types(ciInlineKlass* vk, const Type** field_types, int idx, int limit, bool null_free, bool trust_null_free_oop) {
1587   assert(null_free || !trust_null_free_oop, "cannot trust null-free oop when the holder object is not null-free");
1588   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1589     ciField* field = vk->declared_nonstatic_field_at(i);
1590     if (field->is_flat()) {
1591       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1592       collect_field_types(field_klass, field_types, idx, limit, field->is_null_free(), trust_null_free_oop && field->is_null_free());
1593       idx += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1594       continue;
1595     }
1596 
1597     const Type* field_type = Type::get_const_type(field->type());
1598     if (trust_null_free_oop && field->is_null_free()) {
1599       field_type = field_type->filter(TypePtr::NOTNULL);
1600     }
1601 
1602     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1603     field_types[idx] = field_type;
1604     idx++;
1605   }
1606 
1607   if (!null_free) {
1608     assert(idx >= 0 && idx < limit, "field type out of bounds, %d - %d", idx, limit);
1609     field_types[idx] = TypeInt::BOOL;
1610   }
1611 }
1612 
1613 // Create an InlineTypeNode from a LoadFlatNode with its fields being extracted from the
1614 // LoadFlatNode
1615 InlineTypeNode* LoadFlatNode::collect_projs(GraphKit* kit, ciInlineKlass* vk, int proj_con, bool null_free) {
1616   PhaseGVN& gvn = kit->gvn();
1617   InlineTypeNode* res = InlineTypeNode::make_uninitialized(gvn, vk, null_free);
1618   for (int i = 0; i < vk->nof_declared_nonstatic_fields(); i++) {
1619     ciField* field = vk->declared_nonstatic_field_at(i);
1620     Node* field_value;
1621     if (field->is_flat()) {
1622       ciInlineKlass* field_klass = field->type()->as_inline_klass();
1623       field_value = collect_projs(kit, field_klass, proj_con, field->is_null_free());
1624       proj_con += field_klass->nof_nonstatic_fields() + (field->is_null_free() ? 0 : 1);
1625     } else {
1626       field_value = gvn.transform(new ProjNode(this, proj_con));
1627       if (field->type()->is_inlinetype()) {
1628         field_value = InlineTypeNode::make_from_oop(kit, field_value, field->type()->as_inline_klass());
1629       }
1630       proj_con++;
1631     }
1632     res->set_field_value(i, field_value);
1633   }
1634 
1635   if (null_free) {
1636     res->set_null_marker(gvn);
1637   } else {
1638     res->set_null_marker(gvn, gvn.transform(new ProjNode(this, proj_con)));
1639   }
1640   return gvn.transform(res)->as_InlineType();
1641 }
1642 
1643 // Extract the values of the flattened fields from the loaded payload
1644 void LoadFlatNode::expand_projs_atomic(PhaseIterGVN& igvn, Node* ctrl, Node* payload) {
1645   BasicType payload_bt = _vk->atomic_size_to_basic_type(_null_free);
1646   for (int i = 0; i < _vk->nof_nonstatic_fields(); i++) {
1647     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + i);
1648     if (proj_out == nullptr) {
1649       continue;
1650     }
1651 
1652     ciField* field = _vk->nonstatic_field_at(i);
1653     int field_offset = field->offset_in_bytes() - _vk->payload_offset();
1654     const Type* field_type = igvn.type(proj_out);
1655     Node* field_value = get_payload_value(igvn, ctrl, payload_bt, payload, field_type, field->type()->basic_type(), field_offset);
1656     igvn.replace_node(proj_out, field_value);
1657   }
1658 
1659   if (!_null_free) {
1660     ProjNode* proj_out = proj_out_or_null(TypeFunc::Parms + _vk->nof_nonstatic_fields());
1661     if (proj_out == nullptr) {
1662       return;
1663     }
1664 
1665     int null_marker_offset = _vk->null_marker_offset_in_payload();
1666     Node* null_marker_value = get_payload_value(igvn, ctrl, payload_bt, payload, TypeInt::BOOL, T_BOOLEAN, null_marker_offset);
1667     igvn.replace_node(proj_out, null_marker_value);
1668   }
1669 }
1670 
1671 Node* LoadFlatNode::get_payload_value(PhaseIterGVN& igvn, Node* ctrl, BasicType payload_bt, Node* payload, const Type* value_type, BasicType value_bt, int offset) {
1672   assert((offset + type2aelembytes(value_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
1673   Node* value = nullptr;
1674   // Shift to the right position in the long value
1675   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
1676   if (payload_bt == T_LONG) {
1677     value = igvn.transform(new URShiftLNode(payload, shift_val));
1678     value = igvn.transform(new ConvL2INode(value));
1679   } else {
1680     value = igvn.transform(new URShiftINode(payload, shift_val));
1681   }
1682 
1683   if (value_bt == T_INT) {
1684     return value;
1685   } else if (!is_java_primitive(value_bt)) {
1686     assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1687     value = igvn.transform(new CastI2NNode(ctrl, value, value_type->make_narrowoop()));
1688     value = igvn.transform(new DecodeNNode(value, value_type));
1689 
1690     // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the
1691     // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint,
1692     // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to
1693     // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint.
1694     return value;
1695   } else {
1696     // Make sure to zero unused bits in the 32-bit value
1697     return Compile::narrow_value(value_bt, value, nullptr, &igvn, true);
1698   }
1699 }
1700 
1701 void StoreFlatNode::store(GraphKit* kit, Node* base, Node* ptr, InlineTypeNode* value, bool null_free, DecoratorSet decorators) {
1702   value = value->allocate_fields(kit);
1703   StoreFlatNode* store = new StoreFlatNode(null_free, decorators);
1704   store->init_req(TypeFunc::Control, kit->control());
1705   store->init_req(TypeFunc::I_O, kit->top());
1706   store->init_req(TypeFunc::Memory, kit->reset_memory());
1707   store->init_req(TypeFunc::FramePtr, kit->frameptr());
1708   store->init_req(TypeFunc::ReturnAdr, kit->top());
1709 
1710   store->init_req(TypeFunc::Parms, base);
1711   store->init_req(TypeFunc::Parms + 1, ptr);
1712   store->init_req(TypeFunc::Parms + 2, value);
1713   kit->kill_dead_locals();
1714   kit->add_safepoint_edges(store);
1715   store = kit->gvn().transform(store)->as_StoreFlat();
1716   kit->record_for_igvn(store);
1717 
1718   kit->set_control(kit->gvn().transform(new ProjNode(store, TypeFunc::Control)));
1719   kit->set_all_memory(kit->gvn().transform(new ProjNode(store, TypeFunc::Memory)));
1720 }
1721 
1722 bool StoreFlatNode::expand_non_atomic(PhaseIterGVN& igvn) {
1723   assert(igvn.delay_transform(), "transformation must be delayed");
1724   if ((_decorators & C2_MISMATCHED) != 0) {
1725     return false;
1726   }
1727 
1728   GraphKit kit(jvms(), &igvn);
1729   kit.set_all_memory(kit.reset_memory());
1730 
1731   Node* base = this->base();
1732   Node* ptr = this->ptr();
1733   InlineTypeNode* value = this->value();
1734 
1735   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1736   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1737     ciField* field = vk->nonstatic_field_at(i);
1738     Node* field_ptr = kit.basic_plus_adr(base, ptr, field->offset_in_bytes() - vk->payload_offset());
1739     const TypePtr* field_ptr_type = field_ptr->Value(&igvn)->is_ptr();
1740     igvn.set_type(field_ptr, field_ptr_type);
1741     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1742     Node* store = kit.access_store_at(base, field_ptr, field_ptr_type, field_value, igvn.type(field_value), field->type()->basic_type(), _decorators);
1743   }
1744 
1745   if (!_null_free) {
1746     Node* null_marker_ptr = kit.basic_plus_adr(base, ptr, vk->null_marker_offset_in_payload());
1747     const TypePtr* null_marker_ptr_type = null_marker_ptr->Value(&igvn)->is_ptr();
1748     igvn.set_type(null_marker_ptr, null_marker_ptr_type);
1749     Node* null_marker_value = value->get_null_marker();
1750     Node* store = kit.access_store_at(base, null_marker_ptr, null_marker_ptr_type, null_marker_value, TypeInt::BOOL, T_BOOLEAN, _decorators);
1751   }
1752 
1753   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1754   if (old_ctrl != nullptr) {
1755     igvn.replace_node(old_ctrl, kit.control());
1756   }
1757   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1758   Node* new_mem = kit.reset_memory();
1759   if (old_mem != nullptr) {
1760     igvn.replace_node(old_mem, new_mem);
1761   }
1762   return true;
1763 }
1764 
1765 void StoreFlatNode::expand_atomic(PhaseIterGVN& igvn) {
1766   // Convert to a payload value <= 64-bit and write atomically.
1767   // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit
1768   // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always
1769   // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that
1770   // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'.
1771   assert(igvn.delay_transform(), "transformation must be delayed");
1772   GraphKit kit(jvms(), &igvn);
1773   kit.set_all_memory(kit.reset_memory());
1774 
1775   Node* base = this->base();
1776   Node* ptr = this->ptr();
1777   InlineTypeNode* value = this->value();
1778 
1779   int oop_off_1 = -1;
1780   int oop_off_2 = -1;
1781   Node* payload = convert_to_payload(igvn, kit.control(), value, _null_free, oop_off_1, oop_off_2);
1782 
1783   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1784   BasicType payload_bt = vk->atomic_size_to_basic_type(_null_free);
1785   kit.insert_mem_bar(Op_MemBarCPUOrder);
1786   if (!UseG1GC || oop_off_1 == -1) {
1787     // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed.
1788     assert(oop_off_2 == -1 || !UseG1GC, "sanity");
1789     // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
1790     assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields");
1791     kit.access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, Type::get_const_basic_type(payload_bt), payload_bt, _decorators | C2_MISMATCHED, true, value);
1792   } else {
1793     // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend.
1794     assert(UseG1GC, "Unexpected GC");
1795     assert(payload_bt == T_LONG, "Unexpected payload type");
1796     // If one oop, set the offset (if no offset is set, two oops are assumed by the backend)
1797     Node* oop_offset = (oop_off_2 == -1) ? igvn.intcon(oop_off_1) : nullptr;
1798     Node* mem = kit.reset_memory();
1799     kit.set_all_memory(mem);
1800     Node* store = igvn.transform(new StoreLSpecialNode(kit.control(), mem, ptr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered));
1801     kit.set_memory(store, TypeRawPtr::BOTTOM);
1802   }
1803   kit.insert_mem_bar(Op_MemBarCPUOrder);
1804 
1805   Node* old_ctrl = proj_out_or_null(TypeFunc::Control);
1806   if (old_ctrl != nullptr) {
1807     igvn.replace_node(old_ctrl, kit.control());
1808   }
1809   Node* old_mem = proj_out_or_null(TypeFunc::Memory);
1810   Node* new_mem = kit.reset_memory();
1811   if (old_mem != nullptr) {
1812     igvn.replace_node(old_mem, new_mem);
1813   }
1814 }
1815 
1816 // Convert the field values to a payload value of type 'bt'
1817 Node* StoreFlatNode::convert_to_payload(PhaseIterGVN& igvn, Node* ctrl, InlineTypeNode* value, bool null_free, int& oop_off_1, int& oop_off_2) {
1818   ciInlineKlass* vk = igvn.type(value)->inline_klass();
1819   BasicType payload_bt = vk->atomic_size_to_basic_type(null_free);
1820   Node* payload = igvn.zerocon(payload_bt);
1821   if (!null_free) {
1822     // Set the null marker
1823     payload = set_payload_value(igvn, payload_bt, payload, T_BOOLEAN, value->get_null_marker(), vk->null_marker_offset_in_payload());
1824   }
1825 
1826   // Iterate over the fields and add their values to the payload
1827   for (int i = 0; i < vk->nof_nonstatic_fields(); i++) {
1828     ciField* field = vk->nonstatic_field_at(i);
1829     Node* field_value = value->field_value_by_offset(field->offset_in_bytes(), true);
1830     ciType* field_klass = field->type();
1831     BasicType field_bt = field_klass->basic_type();
1832     int field_offset_in_payload = field->offset_in_bytes() - vk->payload_offset();
1833     if (!field_klass->is_primitive_type()) {
1834       // Narrow oop field
1835       assert(UseCompressedOops && payload_bt == T_LONG, "Naturally atomic");
1836       if (oop_off_1 == -1) {
1837         oop_off_1 = field_offset_in_payload;
1838       } else {
1839         assert(oop_off_2 == -1, "already set");
1840         oop_off_2 = field_offset_in_payload;
1841       }
1842 
1843       const Type* val_type = Type::get_const_type(field_klass)->make_narrowoop();
1844       if (field_value->is_InlineType()) {
1845         assert(field_value->as_InlineType()->is_allocated(&igvn), "must be allocated");
1846       }
1847 
1848       field_value = igvn.transform(new EncodePNode(field_value, val_type));
1849       field_value = igvn.transform(new CastP2XNode(ctrl, field_value));
1850       field_value = igvn.transform(new ConvL2INode(field_value));
1851       field_bt = T_INT;
1852     }
1853     payload = set_payload_value(igvn, payload_bt, payload, field_bt, field_value, field_offset_in_payload);
1854   }
1855 
1856   return payload;
1857 }
1858 
1859 Node* StoreFlatNode::set_payload_value(PhaseIterGVN& igvn, BasicType payload_bt, Node* payload, BasicType val_bt, Node* value, int offset) {
1860   assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(payload_bt), "Value does not fit into payload");
1861 
1862   // Make sure to zero unused bits in the 32-bit value
1863   if (val_bt == T_BYTE || val_bt == T_BOOLEAN) {
1864     value = igvn.transform(new AndINode(value, igvn.intcon(0xFF)));
1865   } else if (val_bt == T_CHAR || val_bt == T_SHORT) {
1866     value = igvn.transform(new AndINode(value, igvn.intcon(0xFFFF)));
1867   } else if (val_bt == T_FLOAT) {
1868     value = igvn.transform(new MoveF2INode(value));
1869   } else {
1870     assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt));
1871   }
1872 
1873   Node* shift_val = igvn.intcon(offset << LogBitsPerByte);
1874   if (payload_bt == T_LONG) {
1875     // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l)
1876     value = igvn.transform(new ConvI2LNode(value));
1877     value = igvn.transform(new AndLNode(value, igvn.longcon(0xFFFFFFFF)));
1878 
1879     Node* shift_value = igvn.transform(new LShiftLNode(value, shift_val));
1880     payload = new OrLNode(shift_value, payload);
1881   } else {
1882     Node* shift_value = igvn.transform(new LShiftINode(value, shift_val));
1883     payload = new OrINode(shift_value, payload);
1884   }
1885   return igvn.transform(payload);
1886 }
1887 
1888 const Type* LoadFlatNode::Value(PhaseGVN* phase) const {
1889   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
1890       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP) {
1891     return Type::TOP;
1892   }
1893   return bottom_type();
1894 }
1895 
1896 const Type* StoreFlatNode::Value(PhaseGVN* phase) const {
1897   if (phase->type(in(TypeFunc::Control)) == Type::TOP || phase->type(in(TypeFunc::Memory)) == Type::TOP ||
1898       phase->type(base()) == Type::TOP || phase->type(ptr()) == Type::TOP || phase->type(value()) == Type::TOP) {
1899     return Type::TOP;
1900   }
1901   return bottom_type();
1902 }